summaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
authorDoug Ledford <dledford@redhat.com>2017-08-24 20:25:15 -0400
committerDoug Ledford <dledford@redhat.com>2017-08-24 20:25:15 -0400
commita1139697adac394ce1fb06410b914f070b314f64 (patch)
tree34beeb4d2030ac0c6accadedadde2b8b6fb23002 /drivers/infiniband/hw
parentaccbef5cc624be745c1de903dd3a05681aaa0ac1 (diff)
parent050da902adde8faf6b1bef15ac4876ae145358f4 (diff)
downloadop-kernel-dev-a1139697adac394ce1fb06410b914f070b314f64.zip
op-kernel-dev-a1139697adac394ce1fb06410b914f070b314f64.tar.gz
Merge branch 'mellanox' into k.o/for-next
Signed-off-by: Doug Ledford <dledford@redhat.com>
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/mlx4/alias_GUID.c2
-rw-r--r--drivers/infiniband/hw/mlx4/cq.c2
-rw-r--r--drivers/infiniband/hw/mlx4/mcg.c9
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c22
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c2
-rw-r--r--drivers/infiniband/hw/mlx5/mad.c2
-rw-r--r--drivers/infiniband/hw/mlx5/main.c86
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h2
-rw-r--r--drivers/infiniband/hw/mlx5/mr.c121
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c5
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c2
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c5
-rw-r--r--drivers/infiniband/hw/usnic/usnic_fwd.c12
-rw-r--r--drivers/infiniband/hw/usnic/usnic_fwd.h2
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_main.c10
15 files changed, 176 insertions, 108 deletions
diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c
index 0e4f60c..155b4df 100644
--- a/drivers/infiniband/hw/mlx4/alias_GUID.c
+++ b/drivers/infiniband/hw/mlx4/alias_GUID.c
@@ -781,7 +781,7 @@ void mlx4_ib_init_alias_guid_work(struct mlx4_ib_dev *dev, int port)
spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
spin_lock_irqsave(&dev->sriov.alias_guid.ag_work_lock, flags1);
if (!dev->sriov.is_going_down) {
- /* If there is pending one should cancell then run, otherwise
+ /* If there is pending one should cancel then run, otherwise
* won't run till previous one is ended as same work
* struct is used.
*/
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c
index 95382fa..cab7963 100644
--- a/drivers/infiniband/hw/mlx4/cq.c
+++ b/drivers/infiniband/hw/mlx4/cq.c
@@ -637,7 +637,7 @@ static void mlx4_ib_poll_sw_comp(struct mlx4_ib_cq *cq, int num_entries,
struct mlx4_ib_qp *qp;
*npolled = 0;
- /* Find uncompleted WQEs belonging to that cq and retrun
+ /* Find uncompleted WQEs belonging to that cq and return
* simulated FLUSH_ERR completions
*/
list_for_each_entry(qp, &cq->send_qp_list, cq_send_list) {
diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c
index b73f897..70eb9f9 100644
--- a/drivers/infiniband/hw/mlx4/mcg.c
+++ b/drivers/infiniband/hw/mlx4/mcg.c
@@ -808,8 +808,7 @@ static ssize_t sysfs_show_group(struct device *dev,
struct device_attribute *attr, char *buf);
static struct mcast_group *acquire_group(struct mlx4_ib_demux_ctx *ctx,
- union ib_gid *mgid, int create,
- gfp_t gfp_mask)
+ union ib_gid *mgid, int create)
{
struct mcast_group *group, *cur_group;
int is_mgid0;
@@ -825,7 +824,7 @@ static struct mcast_group *acquire_group(struct mlx4_ib_demux_ctx *ctx,
if (!create)
return ERR_PTR(-ENOENT);
- group = kzalloc(sizeof *group, gfp_mask);
+ group = kzalloc(sizeof(*group), GFP_KERNEL);
if (!group)
return ERR_PTR(-ENOMEM);
@@ -892,7 +891,7 @@ int mlx4_ib_mcg_demux_handler(struct ib_device *ibdev, int port, int slave,
case IB_MGMT_METHOD_GET_RESP:
case IB_SA_METHOD_DELETE_RESP:
mutex_lock(&ctx->mcg_table_lock);
- group = acquire_group(ctx, &rec->mgid, 0, GFP_KERNEL);
+ group = acquire_group(ctx, &rec->mgid, 0);
mutex_unlock(&ctx->mcg_table_lock);
if (IS_ERR(group)) {
if (mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP) {
@@ -954,7 +953,7 @@ int mlx4_ib_mcg_multiplex_handler(struct ib_device *ibdev, int port,
req->sa_mad = *sa_mad;
mutex_lock(&ctx->mcg_table_lock);
- group = acquire_group(ctx, &rec->mgid, may_create, GFP_KERNEL);
+ group = acquire_group(ctx, &rec->mgid, may_create);
mutex_unlock(&ctx->mcg_table_lock);
if (IS_ERR(group)) {
kfree(req);
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index b40d50c..2747abd 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -748,7 +748,7 @@ static int create_qp_rss(struct mlx4_ib_dev *dev, struct ib_pd *ibpd,
INIT_LIST_HEAD(&qp->gid_list);
INIT_LIST_HEAD(&qp->steering_rules);
- qp->mlx4_ib_qp_type = MLX4_IB_QPT_RAW_ETHERTYPE;
+ qp->mlx4_ib_qp_type = MLX4_IB_QPT_RAW_PACKET;
qp->state = IB_QPS_RESET;
/* Set dummy send resources to be compatible with HV and PRM */
@@ -812,6 +812,9 @@ static struct ib_qp *_mlx4_ib_create_qp_rss(struct ib_pd *pd,
return ERR_PTR(-EFAULT);
}
+ if (memchr_inv(ucmd.reserved, 0, sizeof(ucmd.reserved)))
+ return ERR_PTR(-EOPNOTSUPP);
+
if (ucmd.comp_mask || ucmd.reserved1)
return ERR_PTR(-EOPNOTSUPP);
@@ -1046,9 +1049,8 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd,
}
if (src == MLX4_IB_RWQ_SRC) {
- if (ucmd.wq.comp_mask || ucmd.wq.reserved1 ||
- ucmd.wq.reserved[0] || ucmd.wq.reserved[1] ||
- ucmd.wq.reserved[2]) {
+ if (ucmd.wq.comp_mask || ucmd.wq.reserved[0] ||
+ ucmd.wq.reserved[1] || ucmd.wq.reserved[2]) {
pr_debug("user command isn't supported\n");
err = -EOPNOTSUPP;
goto err;
@@ -2027,8 +2029,8 @@ static u8 gid_type_to_qpc(enum ib_gid_type gid_type)
*/
static int bringup_rss_rwqs(struct ib_rwq_ind_table *ind_tbl, u8 port_num)
{
+ int err = 0;
int i;
- int err;
for (i = 0; i < (1 << ind_tbl->log_ind_tbl_size); i++) {
struct ib_wq *ibwq = ind_tbl->ind_tbl[i];
@@ -2723,19 +2725,17 @@ enum {
static int _mlx4_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask, struct ib_udata *udata)
{
+ enum rdma_link_layer ll = IB_LINK_LAYER_UNSPECIFIED;
struct mlx4_ib_dev *dev = to_mdev(ibqp->device);
struct mlx4_ib_qp *qp = to_mqp(ibqp);
enum ib_qp_state cur_state, new_state;
int err = -EINVAL;
- int ll;
mutex_lock(&qp->mutex);
cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
- if (cur_state == new_state && cur_state == IB_QPS_RESET) {
- ll = IB_LINK_LAYER_UNSPECIFIED;
- } else {
+ if (cur_state != new_state || cur_state != IB_QPS_RESET) {
int port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
ll = rdma_port_get_link_layer(&dev->ib_dev, port);
}
@@ -4146,8 +4146,8 @@ struct ib_wq *mlx4_ib_create_wq(struct ib_pd *pd,
if (!(udata && pd->uobject))
return ERR_PTR(-EINVAL);
- required_cmd_sz = offsetof(typeof(ucmd), reserved) +
- sizeof(ucmd.reserved);
+ required_cmd_sz = offsetof(typeof(ucmd), comp_mask) +
+ sizeof(ucmd.comp_mask);
if (udata->inlen < required_cmd_sz) {
pr_debug("invalid inlen\n");
return ERR_PTR(-EINVAL);
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index c155df4..2aa53f4 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -499,7 +499,7 @@ static void mlx5_ib_poll_sw_comp(struct mlx5_ib_cq *cq, int num_entries,
struct mlx5_ib_qp *qp;
*npolled = 0;
- /* Find uncompleted WQEs belonging to that cq and retrun mmics ones */
+ /* Find uncompleted WQEs belonging to that cq and return mmics ones */
list_for_each_entry(qp, &cq->list_send_qp, cq_send_list) {
sw_send_comp(qp, num_entries, wc + *npolled, npolled);
if (*npolled >= num_entries)
diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c
index 18cfe5b..1003b01 100644
--- a/drivers/infiniband/hw/mlx5/mad.c
+++ b/drivers/infiniband/hw/mlx5/mad.c
@@ -204,7 +204,7 @@ static int process_pma_cmd(struct ib_device *ibdev, u8 port_num,
int err;
void *out_cnt;
- /* Decalring support of extended counters */
+ /* Declaring support of extended counters */
if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO) {
struct ib_class_port_info cpi = {};
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 55045e2..62e6298 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -802,8 +802,14 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
if (field_avail(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes,
uhw->outlen)) {
- resp.mlx5_ib_support_multi_pkt_send_wqes =
- MLX5_CAP_ETH(mdev, multi_pkt_send_wqe);
+ if (MLX5_CAP_ETH(mdev, multi_pkt_send_wqe))
+ resp.mlx5_ib_support_multi_pkt_send_wqes =
+ MLX5_IB_ALLOW_MPW;
+
+ if (MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe))
+ resp.mlx5_ib_support_multi_pkt_send_wqes |=
+ MLX5_IB_SUPPORT_EMPW;
+
resp.response_length +=
sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes);
}
@@ -811,6 +817,27 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
if (field_avail(typeof(resp), reserved, uhw->outlen))
resp.response_length += sizeof(resp.reserved);
+ if (field_avail(typeof(resp), sw_parsing_caps,
+ uhw->outlen)) {
+ resp.response_length += sizeof(resp.sw_parsing_caps);
+ if (MLX5_CAP_ETH(mdev, swp)) {
+ resp.sw_parsing_caps.sw_parsing_offloads |=
+ MLX5_IB_SW_PARSING;
+
+ if (MLX5_CAP_ETH(mdev, swp_csum))
+ resp.sw_parsing_caps.sw_parsing_offloads |=
+ MLX5_IB_SW_PARSING_CSUM;
+
+ if (MLX5_CAP_ETH(mdev, swp_lso))
+ resp.sw_parsing_caps.sw_parsing_offloads |=
+ MLX5_IB_SW_PARSING_LSO;
+
+ if (resp.sw_parsing_caps.sw_parsing_offloads)
+ resp.sw_parsing_caps.supported_qpts =
+ BIT(IB_QPT_RAW_PACKET);
+ }
+ }
+
if (uhw->outlen) {
err = ib_copy_to_udata(uhw, &resp, resp.response_length);
@@ -2104,7 +2131,7 @@ static int parse_flow_attr(struct mlx5_core_dev *mdev, u32 *match_c,
* it won't fall into the multicast flow steering table and this rule
* could steal other multicast packets.
*/
-static bool flow_is_multicast_only(struct ib_flow_attr *ib_attr)
+static bool flow_is_multicast_only(const struct ib_flow_attr *ib_attr)
{
union ib_flow_spec *flow_spec;
@@ -2316,10 +2343,31 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
return err ? ERR_PTR(err) : prio;
}
-static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
- struct mlx5_ib_flow_prio *ft_prio,
- const struct ib_flow_attr *flow_attr,
- struct mlx5_flow_destination *dst)
+static void set_underlay_qp(struct mlx5_ib_dev *dev,
+ struct mlx5_flow_spec *spec,
+ u32 underlay_qpn)
+{
+ void *misc_params_c = MLX5_ADDR_OF(fte_match_param,
+ spec->match_criteria,
+ misc_parameters);
+ void *misc_params_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
+ misc_parameters);
+
+ if (underlay_qpn &&
+ MLX5_CAP_FLOWTABLE_NIC_RX(dev->mdev,
+ ft_field_support.bth_dst_qp)) {
+ MLX5_SET(fte_match_set_misc,
+ misc_params_v, bth_dst_qp, underlay_qpn);
+ MLX5_SET(fte_match_set_misc,
+ misc_params_c, bth_dst_qp, 0xffffff);
+ }
+}
+
+static struct mlx5_ib_flow_handler *_create_flow_rule(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_flow_prio *ft_prio,
+ const struct ib_flow_attr *flow_attr,
+ struct mlx5_flow_destination *dst,
+ u32 underlay_qpn)
{
struct mlx5_flow_table *ft = ft_prio->flow_table;
struct mlx5_ib_flow_handler *handler;
@@ -2355,6 +2403,9 @@ static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
ib_flow += ((union ib_flow_spec *)ib_flow)->size;
}
+ if (!flow_is_multicast_only(flow_attr))
+ set_underlay_qp(dev, spec, underlay_qpn);
+
spec->match_criteria_enable = get_match_criteria_enable(spec->match_criteria);
if (is_drop) {
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP;
@@ -2394,6 +2445,14 @@ free:
return err ? ERR_PTR(err) : handler;
}
+static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_flow_prio *ft_prio,
+ const struct ib_flow_attr *flow_attr,
+ struct mlx5_flow_destination *dst)
+{
+ return _create_flow_rule(dev, ft_prio, flow_attr, dst, 0);
+}
+
static struct mlx5_ib_flow_handler *create_dont_trap_rule(struct mlx5_ib_dev *dev,
struct mlx5_ib_flow_prio *ft_prio,
struct ib_flow_attr *flow_attr,
@@ -2530,6 +2589,7 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
struct mlx5_ib_flow_prio *ft_prio_tx = NULL;
struct mlx5_ib_flow_prio *ft_prio;
int err;
+ int underlay_qpn;
if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO)
return ERR_PTR(-ENOMEM);
@@ -2570,8 +2630,10 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
handler = create_dont_trap_rule(dev, ft_prio,
flow_attr, dst);
} else {
- handler = create_flow_rule(dev, ft_prio, flow_attr,
- dst);
+ underlay_qpn = (mqp->flags & MLX5_IB_QP_UNDERLAY) ?
+ mqp->underlay_qpn : 0;
+ handler = _create_flow_rule(dev, ft_prio, flow_attr,
+ dst, underlay_qpn);
}
} else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
@@ -3793,6 +3855,8 @@ static int delay_drop_debugfs_init(struct mlx5_ib_dev *dev)
if (!dbg->timeout_debugfs)
goto out_debugfs;
+ dev->delay_drop.dbg = dbg;
+
return 0;
out_debugfs:
@@ -3817,8 +3881,8 @@ static void init_delay_drop(struct mlx5_ib_dev *dev)
mlx5_ib_warn(dev, "Failed to init delay drop debugfs\n");
}
-const struct cpumask *mlx5_ib_get_vector_affinity(struct ib_device *ibdev,
- int comp_vector)
+static const struct cpumask *
+mlx5_ib_get_vector_affinity(struct ib_device *ibdev, int comp_vector)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index 7ac9910..b3380e8 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -503,7 +503,7 @@ struct mlx5_ib_mr {
struct mlx5_shared_mr_info *smr_info;
struct list_head list;
int order;
- int umred;
+ bool allocated_from_cache;
int npages;
struct mlx5_ib_dev *dev;
u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c
index a0eb2f9..0e2789d 100644
--- a/drivers/infiniband/hw/mlx5/mr.c
+++ b/drivers/infiniband/hw/mlx5/mr.c
@@ -48,8 +48,7 @@ enum {
#define MLX5_UMR_ALIGN 2048
static int clean_mr(struct mlx5_ib_mr *mr);
-static int max_umr_order(struct mlx5_ib_dev *dev);
-static int use_umr(struct mlx5_ib_dev *dev, int order);
+static int mr_cache_max_order(struct mlx5_ib_dev *dev);
static int unreg_umr(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr);
static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr)
@@ -184,7 +183,7 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
break;
}
mr->order = ent->order;
- mr->umred = 1;
+ mr->allocated_from_cache = 1;
mr->dev = dev;
MLX5_SET(mkc, mkc, free, 1);
@@ -497,7 +496,7 @@ static struct mlx5_ib_mr *alloc_cached_mr(struct mlx5_ib_dev *dev, int order)
int i;
c = order2idx(dev, order);
- last_umr_cache_entry = order2idx(dev, max_umr_order(dev));
+ last_umr_cache_entry = order2idx(dev, mr_cache_max_order(dev));
if (c < 0 || c > last_umr_cache_entry) {
mlx5_ib_warn(dev, "order %d, cache index %d\n", order, c);
return NULL;
@@ -677,12 +676,12 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
INIT_DELAYED_WORK(&ent->dwork, delayed_cache_work_func);
queue_work(cache->wq, &ent->work);
- if (i > MAX_UMR_CACHE_ENTRY) {
+ if (i > MR_CACHE_LAST_STD_ENTRY) {
mlx5_odp_init_mr_cache_entry(ent);
continue;
}
- if (!use_umr(dev, ent->order))
+ if (ent->order > mr_cache_max_order(dev))
continue;
ent->page = PAGE_SHIFT;
@@ -809,28 +808,24 @@ err_free:
return ERR_PTR(err);
}
-static int get_octo_len(u64 addr, u64 len, int page_size)
+static int get_octo_len(u64 addr, u64 len, int page_shift)
{
+ u64 page_size = 1ULL << page_shift;
u64 offset;
int npages;
offset = addr & (page_size - 1);
- npages = ALIGN(len + offset, page_size) >> ilog2(page_size);
+ npages = ALIGN(len + offset, page_size) >> page_shift;
return (npages + 1) / 2;
}
-static int max_umr_order(struct mlx5_ib_dev *dev)
+static int mr_cache_max_order(struct mlx5_ib_dev *dev)
{
if (MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset))
- return MAX_UMR_CACHE_ENTRY + 2;
+ return MR_CACHE_LAST_STD_ENTRY + 2;
return MLX5_MAX_UMR_SHIFT;
}
-static int use_umr(struct mlx5_ib_dev *dev, int order)
-{
- return order <= max_umr_order(dev);
-}
-
static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length,
int access_flags, struct ib_umem **umem,
int *npages, int *page_shift, int *ncont,
@@ -904,7 +899,8 @@ static int mlx5_ib_post_send_wait(struct mlx5_ib_dev *dev,
return err;
}
-static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
+static struct mlx5_ib_mr *alloc_mr_from_cache(
+ struct ib_pd *pd, struct ib_umem *umem,
u64 virt_addr, u64 len, int npages,
int page_shift, int order, int access_flags)
{
@@ -936,16 +932,6 @@ static struct mlx5_ib_mr *reg_umr(struct ib_pd *pd, struct ib_umem *umem,
mr->mmkey.size = len;
mr->mmkey.pd = to_mpd(pd)->pdn;
- err = mlx5_ib_update_xlt(mr, 0, npages, page_shift,
- MLX5_IB_UPD_XLT_ENABLE);
-
- if (err) {
- mlx5_mr_cache_free(dev, mr);
- return ERR_PTR(err);
- }
-
- mr->live = 1;
-
return mr;
}
@@ -1111,7 +1097,8 @@ free_xlt:
static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
u64 virt_addr, u64 length,
struct ib_umem *umem, int npages,
- int page_shift, int access_flags)
+ int page_shift, int access_flags,
+ bool populate)
{
struct mlx5_ib_dev *dev = to_mdev(pd->device);
struct mlx5_ib_mr *mr;
@@ -1126,15 +1113,19 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
if (!mr)
return ERR_PTR(-ENOMEM);
- inlen = MLX5_ST_SZ_BYTES(create_mkey_in) +
- sizeof(*pas) * ((npages + 1) / 2) * 2;
+ mr->ibmr.pd = pd;
+ mr->access_flags = access_flags;
+
+ inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
+ if (populate)
+ inlen += sizeof(*pas) * roundup(npages, 2);
in = kvzalloc(inlen, GFP_KERNEL);
if (!in) {
err = -ENOMEM;
goto err_1;
}
pas = (__be64 *)MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt);
- if (!(access_flags & IB_ACCESS_ON_DEMAND))
+ if (populate && !(access_flags & IB_ACCESS_ON_DEMAND))
mlx5_ib_populate_pas(dev, umem, page_shift, pas,
pg_cap ? MLX5_IB_MTT_PRESENT : 0);
@@ -1143,23 +1134,27 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
MLX5_SET(create_mkey_in, in, pg_access, !!(pg_cap));
mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
+ MLX5_SET(mkc, mkc, free, !populate);
MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_MTT);
MLX5_SET(mkc, mkc, a, !!(access_flags & IB_ACCESS_REMOTE_ATOMIC));
MLX5_SET(mkc, mkc, rw, !!(access_flags & IB_ACCESS_REMOTE_WRITE));
MLX5_SET(mkc, mkc, rr, !!(access_flags & IB_ACCESS_REMOTE_READ));
MLX5_SET(mkc, mkc, lw, !!(access_flags & IB_ACCESS_LOCAL_WRITE));
MLX5_SET(mkc, mkc, lr, 1);
+ MLX5_SET(mkc, mkc, umr_en, 1);
MLX5_SET64(mkc, mkc, start_addr, virt_addr);
MLX5_SET64(mkc, mkc, len, length);
MLX5_SET(mkc, mkc, pd, to_mpd(pd)->pdn);
MLX5_SET(mkc, mkc, bsf_octword_size, 0);
MLX5_SET(mkc, mkc, translations_octword_size,
- get_octo_len(virt_addr, length, 1 << page_shift));
+ get_octo_len(virt_addr, length, page_shift));
MLX5_SET(mkc, mkc, log_page_size, page_shift);
MLX5_SET(mkc, mkc, qpn, 0xffffff);
- MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
- get_octo_len(virt_addr, length, 1 << page_shift));
+ if (populate) {
+ MLX5_SET(create_mkey_in, in, translations_octword_actual_size,
+ get_octo_len(virt_addr, length, page_shift));
+ }
err = mlx5_core_create_mkey(dev->mdev, &mr->mmkey, in, inlen);
if (err) {
@@ -1168,9 +1163,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_mr *ibmr, struct ib_pd *pd,
}
mr->mmkey.type = MLX5_MKEY_MR;
mr->desc_size = sizeof(struct mlx5_mtt);
- mr->umem = umem;
mr->dev = dev;
- mr->live = 1;
kvfree(in);
mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key);
@@ -1210,6 +1203,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
int ncont;
int order;
int err;
+ bool use_umr = true;
mlx5_ib_dbg(dev, "start 0x%llx, virt_addr 0x%llx, length 0x%llx, access_flags 0x%x\n",
start, virt_addr, length, access_flags);
@@ -1228,27 +1222,29 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
err = mr_umem_get(pd, start, length, access_flags, &umem, &npages,
&page_shift, &ncont, &order);
- if (err < 0)
+ if (err < 0)
return ERR_PTR(err);
- if (use_umr(dev, order)) {
- mr = reg_umr(pd, umem, virt_addr, length, ncont, page_shift,
- order, access_flags);
+ if (order <= mr_cache_max_order(dev)) {
+ mr = alloc_mr_from_cache(pd, umem, virt_addr, length, ncont,
+ page_shift, order, access_flags);
if (PTR_ERR(mr) == -EAGAIN) {
mlx5_ib_dbg(dev, "cache empty for order %d", order);
mr = NULL;
}
- } else if (access_flags & IB_ACCESS_ON_DEMAND &&
- !MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
- err = -EINVAL;
- pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB");
- goto error;
+ } else if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset)) {
+ if (access_flags & IB_ACCESS_ON_DEMAND) {
+ err = -EINVAL;
+ pr_err("Got MR registration for ODP MR > 512MB, not supported for Connect-IB");
+ goto error;
+ }
+ use_umr = false;
}
if (!mr) {
mutex_lock(&dev->slow_path_mutex);
mr = reg_create(NULL, pd, virt_addr, length, umem, ncont,
- page_shift, access_flags);
+ page_shift, access_flags, !use_umr);
mutex_unlock(&dev->slow_path_mutex);
}
@@ -1266,8 +1262,22 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
update_odp_mr(mr);
#endif
- return &mr->ibmr;
+ if (use_umr) {
+ int update_xlt_flags = MLX5_IB_UPD_XLT_ENABLE;
+
+ if (access_flags & IB_ACCESS_ON_DEMAND)
+ update_xlt_flags |= MLX5_IB_UPD_XLT_ZAP;
+ err = mlx5_ib_update_xlt(mr, 0, ncont, page_shift,
+ update_xlt_flags);
+ if (err) {
+ mlx5_ib_dereg_mr(&mr->ibmr);
+ return ERR_PTR(err);
+ }
+ }
+
+ mr->live = 1;
+ return &mr->ibmr;
error:
ib_umem_release(umem);
return ERR_PTR(err);
@@ -1355,7 +1365,7 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
/*
* UMR can't be used - MKey needs to be replaced.
*/
- if (mr->umred) {
+ if (mr->allocated_from_cache) {
err = unreg_umr(dev, mr);
if (err)
mlx5_ib_warn(dev, "Failed to unregister MR\n");
@@ -1368,12 +1378,13 @@ int mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
return err;
mr = reg_create(ib_mr, pd, addr, len, mr->umem, ncont,
- page_shift, access_flags);
+ page_shift, access_flags, true);
if (IS_ERR(mr))
return PTR_ERR(mr);
- mr->umred = 0;
+ mr->allocated_from_cache = 0;
+ mr->live = 1;
} else {
/*
* Send a UMR WQE
@@ -1461,7 +1472,7 @@ mlx5_free_priv_descs(struct mlx5_ib_mr *mr)
static int clean_mr(struct mlx5_ib_mr *mr)
{
struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device);
- int umred = mr->umred;
+ int allocated_from_cache = mr->allocated_from_cache;
int err;
if (mr->sig) {
@@ -1479,20 +1490,20 @@ static int clean_mr(struct mlx5_ib_mr *mr)
mlx5_free_priv_descs(mr);
- if (!umred) {
+ if (!allocated_from_cache) {
+ u32 key = mr->mmkey.key;
+
err = destroy_mkey(dev, mr);
+ kfree(mr);
if (err) {
mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
- mr->mmkey.key, err);
+ key, err);
return err;
}
} else {
mlx5_mr_cache_free(dev, mr);
}
- if (!umred)
- kfree(mr);
-
return 0;
}
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index bc49d14..d6df88a 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1083,11 +1083,16 @@ static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
MLX5_SET(sqc, sqc, flush_in_error_en, 1);
+ if (MLX5_CAP_ETH(dev->mdev, multi_pkt_send_wqe))
+ MLX5_SET(sqc, sqc, allow_multi_pkt_send_wqe, 1);
MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
MLX5_SET(sqc, sqc, user_index, MLX5_GET(qpc, qpc, user_index));
MLX5_SET(sqc, sqc, cqn, MLX5_GET(qpc, qpc, cqn_snd));
MLX5_SET(sqc, sqc, tis_lst_sz, 1);
MLX5_SET(sqc, sqc, tis_num_0, sq->tisn);
+ if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
+ MLX5_CAP_ETH(dev->mdev, swp))
+ MLX5_SET(sqc, sqc, allow_swp, 1);
wq = MLX5_ADDR_OF(sqc, sqc, wq);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index a1900d2..419a2a2 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -698,7 +698,7 @@ static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm,
for (i = 0; i < mthca_icm_size(&iter) >> lg; ++i) {
if (virt != -1) {
pages[nent * 2] = cpu_to_be64(virt);
- virt += 1 << lg;
+ virt += 1ULL << lg;
}
pages[nent * 2 + 1] =
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index c2943e3..f0dc5f4 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -481,21 +481,16 @@ static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr
props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
props->lid = 1;
- props->lmc = 0;
- props->sm_lid = 0;
- props->sm_sl = 0;
if (netif_queue_stopped(netdev))
props->state = IB_PORT_DOWN;
else if (nesvnic->linkup)
props->state = IB_PORT_ACTIVE;
else
props->state = IB_PORT_DOWN;
- props->phys_state = 0;
props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
props->gid_tbl_len = 1;
props->pkey_tbl_len = 1;
- props->qkey_viol_cntr = 0;
props->active_width = IB_WIDTH_4X;
props->active_speed = IB_SPEED_SDR;
props->max_msg_sz = 0x80000000;
diff --git a/drivers/infiniband/hw/usnic/usnic_fwd.c b/drivers/infiniband/hw/usnic/usnic_fwd.c
index 3c37dd5..995a26b 100644
--- a/drivers/infiniband/hw/usnic/usnic_fwd.c
+++ b/drivers/infiniband/hw/usnic/usnic_fwd.c
@@ -110,20 +110,12 @@ void usnic_fwd_set_mac(struct usnic_fwd_dev *ufdev, char mac[ETH_ALEN])
spin_unlock(&ufdev->lock);
}
-int usnic_fwd_add_ipaddr(struct usnic_fwd_dev *ufdev, __be32 inaddr)
+void usnic_fwd_add_ipaddr(struct usnic_fwd_dev *ufdev, __be32 inaddr)
{
- int status;
-
spin_lock(&ufdev->lock);
- if (ufdev->inaddr == 0) {
+ if (!ufdev->inaddr)
ufdev->inaddr = inaddr;
- status = 0;
- } else {
- status = -EFAULT;
- }
spin_unlock(&ufdev->lock);
-
- return status;
}
void usnic_fwd_del_ipaddr(struct usnic_fwd_dev *ufdev)
diff --git a/drivers/infiniband/hw/usnic/usnic_fwd.h b/drivers/infiniband/hw/usnic/usnic_fwd.h
index b2ac22b..0b2cc4e 100644
--- a/drivers/infiniband/hw/usnic/usnic_fwd.h
+++ b/drivers/infiniband/hw/usnic/usnic_fwd.h
@@ -75,7 +75,7 @@ struct usnic_fwd_dev *usnic_fwd_dev_alloc(struct pci_dev *pdev);
void usnic_fwd_dev_free(struct usnic_fwd_dev *ufdev);
void usnic_fwd_set_mac(struct usnic_fwd_dev *ufdev, char mac[ETH_ALEN]);
-int usnic_fwd_add_ipaddr(struct usnic_fwd_dev *ufdev, __be32 inaddr);
+void usnic_fwd_add_ipaddr(struct usnic_fwd_dev *ufdev, __be32 inaddr);
void usnic_fwd_del_ipaddr(struct usnic_fwd_dev *ufdev);
void usnic_fwd_carrier_up(struct usnic_fwd_dev *ufdev);
void usnic_fwd_carrier_down(struct usnic_fwd_dev *ufdev);
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c
index e86700f9..f45e99a 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_main.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c
@@ -351,7 +351,7 @@ static void *usnic_ib_device_add(struct pci_dev *dev)
{
struct usnic_ib_dev *us_ibdev;
union ib_gid gid;
- struct in_ifaddr *in;
+ struct in_device *ind;
struct net_device *netdev;
usnic_dbg("\n");
@@ -441,9 +441,11 @@ static void *usnic_ib_device_add(struct pci_dev *dev)
if (netif_carrier_ok(us_ibdev->netdev))
usnic_fwd_carrier_up(us_ibdev->ufdev);
- in = ((struct in_device *)(netdev->ip_ptr))->ifa_list;
- if (in != NULL)
- usnic_fwd_add_ipaddr(us_ibdev->ufdev, in->ifa_address);
+ ind = in_dev_get(netdev);
+ if (ind->ifa_list)
+ usnic_fwd_add_ipaddr(us_ibdev->ufdev,
+ ind->ifa_list->ifa_address);
+ in_dev_put(ind);
usnic_mac_ip_to_gid(us_ibdev->netdev->perm_addr,
us_ibdev->ufdev->inaddr, &gid.raw[0]);
OpenPOWER on IntegriCloud