diff options
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/en_main.c')
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/en_main.c | 318 |
1 files changed, 184 insertions, 134 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index d75f309..dfc2972 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -213,6 +213,7 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv) s->rx_cache_full += rq_stats->cache_full; s->rx_cache_empty += rq_stats->cache_empty; s->rx_cache_busy += rq_stats->cache_busy; + s->rx_cache_waive += rq_stats->cache_waive; for (j = 0; j < priv->channels.params.num_tc; j++) { sq_stats = &c->sq[j].stats; @@ -293,6 +294,12 @@ static void mlx5e_update_pport_counters(struct mlx5e_priv *priv, bool full) mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); } + if (MLX5_CAP_PCAM_FEATURE(mdev, rx_buffer_fullness_counters)) { + out = pstats->eth_ext_counters; + MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP); + mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0); + } + MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP); for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { out = pstats->per_prio_counters[prio]; @@ -593,12 +600,12 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, } rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; - rq->rx_headroom = params->rq_headroom; + rq->buff.headroom = params->rq_headroom; switch (rq->wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: - rq->alloc_wqe = mlx5e_alloc_rx_mpwqe; + rq->post_wqes = mlx5e_post_rx_mpwqes; rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe; rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe_mpwqe; @@ -615,11 +622,10 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, goto err_rq_wq_destroy; } - rq->mpwqe_stride_sz = BIT(params->mpwqe_log_stride_sz); - rq->mpwqe_num_strides = BIT(params->mpwqe_log_num_strides); + rq->mpwqe.log_stride_sz = params->mpwqe_log_stride_sz; + rq->mpwqe.num_strides = BIT(params->mpwqe_log_num_strides); - rq->buff.wqe_sz = rq->mpwqe_stride_sz * rq->mpwqe_num_strides; - byte_count = rq->buff.wqe_sz; + byte_count = rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz; err = mlx5e_create_rq_umr_mkey(mdev, rq); if (err) @@ -639,7 +645,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, err = -ENOMEM; goto err_rq_wq_destroy; } - rq->alloc_wqe = mlx5e_alloc_rx_wqe; + rq->post_wqes = mlx5e_post_rx_wqes; rq->dealloc_wqe = mlx5e_dealloc_rx_wqe; #ifdef CONFIG_MLX5_EN_IPSEC @@ -655,18 +661,17 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, goto err_rq_wq_destroy; } - rq->buff.wqe_sz = params->lro_en ? + byte_count = params->lro_en ? params->lro_wqe_sz : MLX5E_SW2HW_MTU(c->priv, c->netdev->mtu); #ifdef CONFIG_MLX5_EN_IPSEC if (MLX5_IPSEC_DEV(mdev)) - rq->buff.wqe_sz += MLX5E_METADATA_ETHER_LEN; + byte_count += MLX5E_METADATA_ETHER_LEN; #endif rq->wqe.page_reuse = !params->xdp_prog && !params->lro_en; - byte_count = rq->buff.wqe_sz; /* calc the required page order */ - rq->wqe.frag_sz = MLX5_SKB_FRAG_SZ(rq->rx_headroom + byte_count); + rq->wqe.frag_sz = MLX5_SKB_FRAG_SZ(rq->buff.headroom + byte_count); npages = DIV_ROUND_UP(rq->wqe.frag_sz, PAGE_SIZE); rq->buff.page_order = order_base_2(npages); @@ -677,6 +682,12 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, for (i = 0; i < wq_sz; i++) { struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i); + if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) { + u64 dma_offset = (u64)mlx5e_get_wqe_mtt_offset(rq, i) << PAGE_SHIFT; + + wqe->data.addr = cpu_to_be64(dma_offset); + } + wqe->data.byte_count = cpu_to_be32(byte_count); wqe->data.lkey = rq->mkey_be; } @@ -883,7 +894,8 @@ static void mlx5e_free_rx_descs(struct mlx5e_rq *rq) u16 wqe_ix; /* UMR WQE (if in progress) is always at wq->head */ - if (test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state)) + if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ && + rq->mpwqe.umr_in_progress) mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]); while (!mlx5_wq_ll_is_empty(wq)) { @@ -926,7 +938,7 @@ static int mlx5e_open_rq(struct mlx5e_channel *c, goto err_destroy_rq; if (params->rx_am_enabled) - set_bit(MLX5E_RQ_STATE_AM, &c->rq.state); + c->rq.state |= BIT(MLX5E_RQ_STATE_AM); return 0; @@ -946,7 +958,6 @@ static void mlx5e_activate_rq(struct mlx5e_rq *rq) set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state); sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP; - sq->db.ico_wqe[pi].num_wqebbs = 1; nopwqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc); mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &nopwqe->ctrl); } @@ -1048,7 +1059,6 @@ static int mlx5e_alloc_icosq(struct mlx5e_channel *c, struct mlx5_core_dev *mdev = c->mdev; int err; - sq->pdev = c->pdev; sq->mkey_be = c->mkey_be; sq->channel = c; sq->uar_map = mdev->mlx5e_res.bfreg.map; @@ -1752,7 +1762,9 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, struct mlx5e_cq_moder icocq_moder = {0, 0}; struct net_device *netdev = priv->netdev; struct mlx5e_channel *c; + unsigned int irq; int err; + int eqn; c = kzalloc_node(sizeof(*c), GFP_KERNEL, mlx5e_get_node(priv, ix)); if (!c) @@ -1768,6 +1780,9 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, c->num_tc = params->num_tc; c->xdp = !!params->xdp_prog; + mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq); + c->irq_desc = irq_to_desc(irq); + netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64); err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq_cq, &c->icosq.cq); @@ -2345,9 +2360,10 @@ static void mlx5e_build_tir_ctx_lro(struct mlx5e_params *params, void *tirc) void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_params *params, enum mlx5e_traffic_types tt, - void *tirc) + void *tirc, bool inner) { - void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer); + void *hfso = inner ? MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_inner) : + MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer); #define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\ MLX5_HASH_FIELD_SEL_DST_IP) @@ -2496,6 +2512,21 @@ free_in: return err; } +static void mlx5e_build_inner_indir_tir_ctx(struct mlx5e_priv *priv, + enum mlx5e_traffic_types tt, + u32 *tirc) +{ + MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn); + + mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc); + + MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); + MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn); + MLX5_SET(tirc, tirc, tunneled_offload_en, 0x1); + + mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, true); +} + static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu) { struct mlx5_core_dev *mdev = priv->mdev; @@ -2583,12 +2614,6 @@ static void mlx5e_build_channels_tx_maps(struct mlx5e_priv *priv) } } -static bool mlx5e_is_eswitch_vport_mngr(struct mlx5_core_dev *mdev) -{ - return (MLX5_CAP_GEN(mdev, vport_group_manager) && - MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH); -} - void mlx5e_activate_priv_channels(struct mlx5e_priv *priv) { int num_txqs = priv->channels.num * priv->channels.params.num_tc; @@ -2602,7 +2627,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv) mlx5e_activate_channels(&priv->channels); netif_tx_start_all_queues(priv->netdev); - if (mlx5e_is_eswitch_vport_mngr(priv->mdev)) + if (MLX5_VPORT_MANAGER(priv->mdev)) mlx5e_add_sqs_fwd_rules(priv); mlx5e_wait_channels_min_rx_wqes(&priv->channels); @@ -2613,7 +2638,7 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv) { mlx5e_redirect_rqts_to_drop(priv); - if (mlx5e_is_eswitch_vport_mngr(priv->mdev)) + if (MLX5_VPORT_MANAGER(priv->mdev)) mlx5e_remove_sqs_fwd_rules(priv); /* FIXME: This is a W/A only for tx timeout watch dog false alarm when @@ -2690,6 +2715,8 @@ int mlx5e_open(struct net_device *netdev) mutex_lock(&priv->state_lock); err = mlx5e_open_locked(netdev); + if (!err) + mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP); mutex_unlock(&priv->state_lock); return err; @@ -2724,6 +2751,7 @@ int mlx5e_close(struct net_device *netdev) return -ENODEV; mutex_lock(&priv->state_lock); + mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_DOWN); err = mlx5e_close_locked(netdev); mutex_unlock(&priv->state_lock); @@ -2864,7 +2892,7 @@ static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn); - mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc); + mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, false); } static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 rqtn, u32 *tirc) @@ -2883,6 +2911,7 @@ int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv) struct mlx5e_tir *tir; void *tirc; int inlen; + int i = 0; int err; u32 *in; int tt; @@ -2898,16 +2927,36 @@ int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv) tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); mlx5e_build_indir_tir_ctx(priv, tt, tirc); err = mlx5e_create_tir(priv->mdev, tir, in, inlen); - if (err) - goto err_destroy_tirs; + if (err) { + mlx5_core_warn(priv->mdev, "create indirect tirs failed, %d\n", err); + goto err_destroy_inner_tirs; + } } + if (!mlx5e_tunnel_inner_ft_supported(priv->mdev)) + goto out; + + for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) { + memset(in, 0, inlen); + tir = &priv->inner_indir_tir[i]; + tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); + mlx5e_build_inner_indir_tir_ctx(priv, i, tirc); + err = mlx5e_create_tir(priv->mdev, tir, in, inlen); + if (err) { + mlx5_core_warn(priv->mdev, "create inner indirect tirs failed, %d\n", err); + goto err_destroy_inner_tirs; + } + } + +out: kvfree(in); return 0; -err_destroy_tirs: - mlx5_core_warn(priv->mdev, "create indirect tirs failed, %d\n", err); +err_destroy_inner_tirs: + for (i--; i >= 0; i--) + mlx5e_destroy_tir(priv->mdev, &priv->inner_indir_tir[i]); + for (tt--; tt >= 0; tt--) mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[tt]); @@ -2961,6 +3010,12 @@ void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv) for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]); + + if (!mlx5e_tunnel_inner_ft_supported(priv->mdev)) + return; + + for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) + mlx5e_destroy_tir(priv->mdev, &priv->inner_indir_tir[i]); } void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv) @@ -3000,12 +3055,16 @@ static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd) return 0; } -static int mlx5e_setup_tc(struct net_device *netdev, u8 tc) +static int mlx5e_setup_tc_mqprio(struct net_device *netdev, + struct tc_mqprio_qopt *mqprio) { struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5e_channels new_channels = {}; + u8 tc = mqprio->num_tc; int err = 0; + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + if (tc && tc != MLX5E_MAX_NUM_TC) return -EINVAL; @@ -3029,39 +3088,42 @@ out: return err; } -static int mlx5e_ndo_setup_tc(struct net_device *dev, u32 handle, - u32 chain_index, __be16 proto, - struct tc_to_netdev *tc) +#ifdef CONFIG_MLX5_ESWITCH +static int mlx5e_setup_tc_cls_flower(struct net_device *dev, + struct tc_cls_flower_offload *cls_flower) { struct mlx5e_priv *priv = netdev_priv(dev); - if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS)) - goto mqprio; + if (!is_classid_clsact_ingress(cls_flower->common.classid) || + cls_flower->common.chain_index) + return -EOPNOTSUPP; - if (chain_index) + switch (cls_flower->command) { + case TC_CLSFLOWER_REPLACE: + return mlx5e_configure_flower(priv, cls_flower); + case TC_CLSFLOWER_DESTROY: + return mlx5e_delete_flower(priv, cls_flower); + case TC_CLSFLOWER_STATS: + return mlx5e_stats_flower(priv, cls_flower); + default: return -EOPNOTSUPP; + } +} +#endif - switch (tc->type) { +static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type, + void *type_data) +{ + switch (type) { +#ifdef CONFIG_MLX5_ESWITCH case TC_SETUP_CLSFLOWER: - switch (tc->cls_flower->command) { - case TC_CLSFLOWER_REPLACE: - return mlx5e_configure_flower(priv, proto, tc->cls_flower); - case TC_CLSFLOWER_DESTROY: - return mlx5e_delete_flower(priv, tc->cls_flower); - case TC_CLSFLOWER_STATS: - return mlx5e_stats_flower(priv, tc->cls_flower); - } + return mlx5e_setup_tc_cls_flower(dev, type_data); +#endif + case TC_SETUP_MQPRIO: + return mlx5e_setup_tc_mqprio(dev, type_data); default: return -EOPNOTSUPP; } - -mqprio: - if (tc->type != TC_SETUP_MQPRIO) - return -EINVAL; - - tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; - - return mlx5e_setup_tc(dev, tc->mqprio->num_tc); } static void @@ -3358,6 +3420,7 @@ static int mlx5e_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) } } +#ifdef CONFIG_MLX5_ESWITCH static int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac) { struct mlx5e_priv *priv = netdev_priv(dev); @@ -3460,6 +3523,7 @@ static int mlx5e_get_vf_stats(struct net_device *dev, return mlx5_eswitch_get_vport_stats(mdev->priv.eswitch, vf + 1, vf_stats); } +#endif static void mlx5e_add_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti) @@ -3489,13 +3553,13 @@ static void mlx5e_del_vxlan_port(struct net_device *netdev, mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 0); } -static netdev_features_t mlx5e_vxlan_features_check(struct mlx5e_priv *priv, - struct sk_buff *skb, - netdev_features_t features) +static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv, + struct sk_buff *skb, + netdev_features_t features) { struct udphdr *udph; - u16 proto; - u16 port = 0; + u8 proto; + u16 port; switch (vlan_get_protocol(skb)) { case htons(ETH_P_IP): @@ -3508,14 +3572,17 @@ static netdev_features_t mlx5e_vxlan_features_check(struct mlx5e_priv *priv, goto out; } - if (proto == IPPROTO_UDP) { + switch (proto) { + case IPPROTO_GRE: + return features; + case IPPROTO_UDP: udph = udp_hdr(skb); port = be16_to_cpu(udph->dest); - } - /* Verify if UDP port is being offloaded by HW */ - if (port && mlx5e_vxlan_lookup_port(priv, port)) - return features; + /* Verify if UDP port is being offloaded by HW */ + if (mlx5e_vxlan_lookup_port(priv, port)) + return features; + } out: /* Disable CSUM and GSO if the udp dport is not offloaded by HW */ @@ -3539,7 +3606,7 @@ static netdev_features_t mlx5e_features_check(struct sk_buff *skb, /* Validate if the tunneled packet is being offloaded by HW */ if (skb->encapsulation && (features & NETIF_F_CSUM_MASK || features & NETIF_F_GSO_MASK)) - return mlx5e_vxlan_features_check(priv, skb, features); + return mlx5e_tunnel_features_check(priv, skb, features); return features; } @@ -3636,7 +3703,6 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog) set_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state); /* napi_schedule in case we have missed anything */ - set_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags); napi_schedule(&c->napi); if (old_prog) @@ -3693,11 +3759,11 @@ static void mlx5e_netpoll(struct net_device *dev) } #endif -static const struct net_device_ops mlx5e_netdev_ops_basic = { +static const struct net_device_ops mlx5e_netdev_ops = { .ndo_open = mlx5e_open, .ndo_stop = mlx5e_close, .ndo_start_xmit = mlx5e_xmit, - .ndo_setup_tc = mlx5e_ndo_setup_tc, + .ndo_setup_tc = mlx5e_setup_tc, .ndo_select_queue = mlx5e_select_queue, .ndo_get_stats64 = mlx5e_get_stats, .ndo_set_rx_mode = mlx5e_set_rx_mode, @@ -3708,6 +3774,9 @@ static const struct net_device_ops mlx5e_netdev_ops_basic = { .ndo_change_mtu = mlx5e_change_mtu, .ndo_do_ioctl = mlx5e_ioctl, .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate, + .ndo_udp_tunnel_add = mlx5e_add_vxlan_port, + .ndo_udp_tunnel_del = mlx5e_del_vxlan_port, + .ndo_features_check = mlx5e_features_check, #ifdef CONFIG_RFS_ACCEL .ndo_rx_flow_steer = mlx5e_rx_flow_steer, #endif @@ -3716,29 +3785,8 @@ static const struct net_device_ops mlx5e_netdev_ops_basic = { #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = mlx5e_netpoll, #endif -}; - -static const struct net_device_ops mlx5e_netdev_ops_sriov = { - .ndo_open = mlx5e_open, - .ndo_stop = mlx5e_close, - .ndo_start_xmit = mlx5e_xmit, - .ndo_setup_tc = mlx5e_ndo_setup_tc, - .ndo_select_queue = mlx5e_select_queue, - .ndo_get_stats64 = mlx5e_get_stats, - .ndo_set_rx_mode = mlx5e_set_rx_mode, - .ndo_set_mac_address = mlx5e_set_mac, - .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid, - .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid, - .ndo_set_features = mlx5e_set_features, - .ndo_change_mtu = mlx5e_change_mtu, - .ndo_do_ioctl = mlx5e_ioctl, - .ndo_udp_tunnel_add = mlx5e_add_vxlan_port, - .ndo_udp_tunnel_del = mlx5e_del_vxlan_port, - .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate, - .ndo_features_check = mlx5e_features_check, -#ifdef CONFIG_RFS_ACCEL - .ndo_rx_flow_steer = mlx5e_rx_flow_steer, -#endif +#ifdef CONFIG_MLX5_ESWITCH + /* SRIOV E-Switch NDOs */ .ndo_set_vf_mac = mlx5e_set_vf_mac, .ndo_set_vf_vlan = mlx5e_set_vf_vlan, .ndo_set_vf_spoofchk = mlx5e_set_vf_spoofchk, @@ -3747,13 +3795,9 @@ static const struct net_device_ops mlx5e_netdev_ops_sriov = { .ndo_get_vf_config = mlx5e_get_vf_config, .ndo_set_vf_link_state = mlx5e_set_vf_link_state, .ndo_get_vf_stats = mlx5e_get_vf_stats, - .ndo_tx_timeout = mlx5e_tx_timeout, - .ndo_xdp = mlx5e_xdp, -#ifdef CONFIG_NET_POLL_CONTROLLER - .ndo_poll_controller = mlx5e_netpoll, -#endif .ndo_has_offload_stats = mlx5e_has_offload_stats, .ndo_get_offload_stats = mlx5e_get_offload_stats, +#endif }; static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev) @@ -3790,8 +3834,7 @@ u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev) 2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/; } -void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev, - u32 *indirection_rqt, int len, +void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len, int num_channels) { int i; @@ -3934,7 +3977,7 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, /* RSS */ params->rss_hfunc = ETH_RSS_HASH_XOR; netdev_rss_key_fill(params->toeplitz_hash_key, sizeof(params->toeplitz_hash_key)); - mlx5e_build_default_indir_rqt(mdev, params->indirection_rqt, + mlx5e_build_default_indir_rqt(params->indirection_rqt, MLX5E_INDIR_RQT_SIZE, max_channels); } @@ -3973,9 +4016,11 @@ static void mlx5e_set_netdev_dev_addr(struct net_device *netdev) } } +#if IS_ENABLED(CONFIG_NET_SWITCHDEV) && IS_ENABLED(CONFIG_MLX5_ESWITCH) static const struct switchdev_ops mlx5e_switchdev_ops = { .switchdev_port_attr_get = mlx5e_attr_get, }; +#endif static void mlx5e_build_nic_netdev(struct net_device *netdev) { @@ -3986,15 +4031,12 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) SET_NETDEV_DEV(netdev, &mdev->pdev->dev); - if (MLX5_CAP_GEN(mdev, vport_group_manager)) { - netdev->netdev_ops = &mlx5e_netdev_ops_sriov; + netdev->netdev_ops = &mlx5e_netdev_ops; + #ifdef CONFIG_MLX5_CORE_EN_DCB - if (MLX5_CAP_GEN(mdev, qos)) - netdev->dcbnl_ops = &mlx5e_dcbnl_ops; + if (MLX5_CAP_GEN(mdev, vport_group_manager) && MLX5_CAP_GEN(mdev, qos)) + netdev->dcbnl_ops = &mlx5e_dcbnl_ops; #endif - } else { - netdev->netdev_ops = &mlx5e_netdev_ops_basic; - } netdev->watchdog_timeo = 15 * HZ; @@ -4017,20 +4059,32 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; - if (mlx5e_vxlan_allowed(mdev)) { - netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_GSO_UDP_TUNNEL_CSUM | - NETIF_F_GSO_PARTIAL; + if (mlx5e_vxlan_allowed(mdev) || MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) { + netdev->hw_features |= NETIF_F_GSO_PARTIAL; netdev->hw_enc_features |= NETIF_F_IP_CSUM; netdev->hw_enc_features |= NETIF_F_IPV6_CSUM; netdev->hw_enc_features |= NETIF_F_TSO; netdev->hw_enc_features |= NETIF_F_TSO6; - netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL; - netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM | - NETIF_F_GSO_PARTIAL; + netdev->hw_enc_features |= NETIF_F_GSO_PARTIAL; + } + + if (mlx5e_vxlan_allowed(mdev)) { + netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM; + netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL | + NETIF_F_GSO_UDP_TUNNEL_CSUM; netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM; } + if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) { + netdev->hw_features |= NETIF_F_GSO_GRE | + NETIF_F_GSO_GRE_CSUM; + netdev->hw_enc_features |= NETIF_F_GSO_GRE | + NETIF_F_GSO_GRE_CSUM; + netdev->gso_partial_features |= NETIF_F_GSO_GRE | + NETIF_F_GSO_GRE_CSUM; + } + mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled); if (fcs_supported) @@ -4066,8 +4120,8 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) mlx5e_set_netdev_dev_addr(netdev); -#ifdef CONFIG_NET_SWITCHDEV - if (MLX5_CAP_GEN(mdev, vport_group_manager)) +#if IS_ENABLED(CONFIG_NET_SWITCHDEV) && IS_ENABLED(CONFIG_MLX5_ESWITCH) + if (MLX5_VPORT_MANAGER(mdev)) netdev->switchdev_ops = &mlx5e_switchdev_ops; #endif @@ -4199,6 +4253,10 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv) mlx5e_init_l2_addr(priv); + /* Marking the link as currently not needed by the Driver */ + if (!netif_running(netdev)) + mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN); + /* MTU range: 68 - hw-specific max */ netdev->min_mtu = ETH_MIN_MTU; mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1); @@ -4209,7 +4267,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv) mlx5e_enable_async_events(priv); - if (MLX5_CAP_GEN(mdev, vport_group_manager)) + if (MLX5_VPORT_MANAGER(priv->mdev)) mlx5e_register_vport_reps(priv); if (netdev->reg_state != NETREG_REGISTERED) @@ -4243,7 +4301,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv) queue_work(priv->wq, &priv->set_rx_mode_work); - if (MLX5_CAP_GEN(mdev, vport_group_manager)) + if (MLX5_VPORT_MANAGER(priv->mdev)) mlx5e_unregister_vport_reps(priv); mlx5e_disable_async_events(priv); @@ -4416,32 +4474,29 @@ static void mlx5e_detach(struct mlx5_core_dev *mdev, void *vpriv) static void *mlx5e_add(struct mlx5_core_dev *mdev) { - struct mlx5_eswitch *esw = mdev->priv.eswitch; - int total_vfs = MLX5_TOTAL_VPORTS(mdev); - struct mlx5e_rep_priv *rpriv = NULL; + struct net_device *netdev; + void *rpriv = NULL; void *priv; - int vport; int err; - struct net_device *netdev; err = mlx5e_check_required_hca_cap(mdev); if (err) return NULL; - if (MLX5_CAP_GEN(mdev, vport_group_manager)) { - rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL); +#ifdef CONFIG_MLX5_ESWITCH + if (MLX5_VPORT_MANAGER(mdev)) { + rpriv = mlx5e_alloc_nic_rep_priv(mdev); if (!rpriv) { - mlx5_core_warn(mdev, - "Not creating net device, Failed to alloc rep priv data\n"); + mlx5_core_warn(mdev, "Failed to alloc NIC rep priv data\n"); return NULL; } - rpriv->rep = &esw->offloads.vport_reps[0]; } +#endif netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, rpriv); if (!netdev) { mlx5_core_err(mdev, "mlx5e_create_netdev failed\n"); - goto err_unregister_reps; + goto err_free_rpriv; } priv = netdev_priv(netdev); @@ -4462,14 +4517,9 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev) err_detach: mlx5e_detach(mdev, priv); - err_destroy_netdev: mlx5e_destroy_netdev(priv); - -err_unregister_reps: - for (vport = 1; vport < total_vfs; vport++) - mlx5_eswitch_unregister_vport_rep(esw, vport); - +err_free_rpriv: kfree(rpriv); return NULL; } |