diff options
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core/eswitch.c')
-rw-r--r-- | drivers/net/ethernet/mellanox/mlx5/core/eswitch.c | 251 |
1 files changed, 215 insertions, 36 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index be1f733..d6807c3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -56,7 +56,7 @@ struct esw_uc_addr { /* E-Switch MC FDB table hash node */ struct esw_mc_addr { /* SRIOV only */ struct l2addr_node node; - struct mlx5_flow_rule *uplink_rule; /* Forward to uplink rule */ + struct mlx5_flow_handle *uplink_rule; /* Forward to uplink rule */ u32 refcnt; }; @@ -65,7 +65,7 @@ struct vport_addr { struct l2addr_node node; u8 action; u32 vport; - struct mlx5_flow_rule *flow_rule; /* SRIOV only */ + struct mlx5_flow_handle *flow_rule; /* SRIOV only */ /* A flag indicating that mac was added due to mc promiscuous vport */ bool mc_promisc; }; @@ -237,13 +237,14 @@ static void del_l2_table_entry(struct mlx5_core_dev *dev, u32 index) } /* E-Switch FDB */ -static struct mlx5_flow_rule * +static struct mlx5_flow_handle * __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule, u8 mac_c[ETH_ALEN], u8 mac_v[ETH_ALEN]) { int match_header = (is_zero_ether_addr(mac_c) ? 0 : MLX5_MATCH_OUTER_HEADERS); - struct mlx5_flow_rule *flow_rule = NULL; + struct mlx5_flow_handle *flow_rule = NULL; + struct mlx5_flow_act flow_act = {0}; struct mlx5_flow_destination dest; struct mlx5_flow_spec *spec; void *mv_misc = NULL; @@ -285,10 +286,10 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule, "\tFDB add rule dmac_v(%pM) dmac_c(%pM) -> vport(%d)\n", dmac_v, dmac_c, vport); spec->match_criteria_enable = match_header; + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; flow_rule = - mlx5_add_flow_rule(esw->fdb_table.fdb, spec, - MLX5_FLOW_CONTEXT_ACTION_FWD_DEST, - 0, &dest); + mlx5_add_flow_rules(esw->fdb_table.fdb, spec, + &flow_act, &dest, 1); if (IS_ERR(flow_rule)) { esw_warn(esw->dev, "FDB: Failed to add flow rule: dmac_v(%pM) dmac_c(%pM) -> vport(%d), err(%ld)\n", @@ -300,7 +301,7 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule, return flow_rule; } -static struct mlx5_flow_rule * +static struct mlx5_flow_handle * esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport) { u8 mac_c[ETH_ALEN]; @@ -309,7 +310,7 @@ esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u8 mac[ETH_ALEN], u32 vport) return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac); } -static struct mlx5_flow_rule * +static struct mlx5_flow_handle * esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u32 vport) { u8 mac_c[ETH_ALEN]; @@ -322,7 +323,7 @@ esw_fdb_set_vport_allmulti_rule(struct mlx5_eswitch *esw, u32 vport) return __esw_fdb_set_vport_rule(esw, vport, false, mac_c, mac_v); } -static struct mlx5_flow_rule * +static struct mlx5_flow_handle * esw_fdb_set_vport_promisc_rule(struct mlx5_eswitch *esw, u32 vport) { u8 mac_c[ETH_ALEN]; @@ -361,7 +362,7 @@ static int esw_create_legacy_fdb_table(struct mlx5_eswitch *esw, int nvports) memset(flow_group_in, 0, inlen); table_size = BIT(MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size)); - fdb = mlx5_create_flow_table(root_ns, 0, table_size, 0); + fdb = mlx5_create_flow_table(root_ns, 0, table_size, 0, 0); if (IS_ERR(fdb)) { err = PTR_ERR(fdb); esw_warn(dev, "Failed to create FDB Table err %d\n", err); @@ -515,7 +516,7 @@ static int esw_del_uc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) del_l2_table_entry(esw->dev, esw_uc->table_index); if (vaddr->flow_rule) - mlx5_del_flow_rule(vaddr->flow_rule); + mlx5_del_flow_rules(vaddr->flow_rule); vaddr->flow_rule = NULL; l2addr_hash_del(esw_uc); @@ -562,7 +563,7 @@ static void update_allmulti_vports(struct mlx5_eswitch *esw, case MLX5_ACTION_DEL: if (!iter_vaddr) continue; - mlx5_del_flow_rule(iter_vaddr->flow_rule); + mlx5_del_flow_rules(iter_vaddr->flow_rule); l2addr_hash_del(iter_vaddr); break; } @@ -632,7 +633,7 @@ static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) esw_mc->uplink_rule); if (vaddr->flow_rule) - mlx5_del_flow_rule(vaddr->flow_rule); + mlx5_del_flow_rules(vaddr->flow_rule); vaddr->flow_rule = NULL; /* If the multicast mac is added as a result of mc promiscuous vport, @@ -645,7 +646,7 @@ static int esw_del_mc_addr(struct mlx5_eswitch *esw, struct vport_addr *vaddr) update_allmulti_vports(esw, vaddr, esw_mc); if (esw_mc->uplink_rule) - mlx5_del_flow_rule(esw_mc->uplink_rule); + mlx5_del_flow_rules(esw_mc->uplink_rule); l2addr_hash_del(esw_mc); return 0; @@ -828,14 +829,14 @@ static void esw_apply_vport_rx_mode(struct mlx5_eswitch *esw, u32 vport_num, UPLINK_VPORT); allmulti_addr->refcnt++; } else if (vport->allmulti_rule) { - mlx5_del_flow_rule(vport->allmulti_rule); + mlx5_del_flow_rules(vport->allmulti_rule); vport->allmulti_rule = NULL; if (--allmulti_addr->refcnt > 0) goto promisc; if (allmulti_addr->uplink_rule) - mlx5_del_flow_rule(allmulti_addr->uplink_rule); + mlx5_del_flow_rules(allmulti_addr->uplink_rule); allmulti_addr->uplink_rule = NULL; } @@ -847,7 +848,7 @@ promisc: vport->promisc_rule = esw_fdb_set_vport_promisc_rule(esw, vport_num); } else if (vport->promisc_rule) { - mlx5_del_flow_rule(vport->promisc_rule); + mlx5_del_flow_rules(vport->promisc_rule); vport->promisc_rule = NULL; } } @@ -1018,10 +1019,10 @@ static void esw_vport_cleanup_egress_rules(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { if (!IS_ERR_OR_NULL(vport->egress.allowed_vlan)) - mlx5_del_flow_rule(vport->egress.allowed_vlan); + mlx5_del_flow_rules(vport->egress.allowed_vlan); if (!IS_ERR_OR_NULL(vport->egress.drop_rule)) - mlx5_del_flow_rule(vport->egress.drop_rule); + mlx5_del_flow_rules(vport->egress.drop_rule); vport->egress.allowed_vlan = NULL; vport->egress.drop_rule = NULL; @@ -1179,10 +1180,10 @@ static void esw_vport_cleanup_ingress_rules(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { if (!IS_ERR_OR_NULL(vport->ingress.drop_rule)) - mlx5_del_flow_rule(vport->ingress.drop_rule); + mlx5_del_flow_rules(vport->ingress.drop_rule); if (!IS_ERR_OR_NULL(vport->ingress.allow_rule)) - mlx5_del_flow_rule(vport->ingress.allow_rule); + mlx5_del_flow_rules(vport->ingress.allow_rule); vport->ingress.drop_rule = NULL; vport->ingress.allow_rule = NULL; @@ -1212,6 +1213,7 @@ static void esw_vport_disable_ingress_acl(struct mlx5_eswitch *esw, static int esw_vport_ingress_config(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { + struct mlx5_flow_act flow_act = {0}; struct mlx5_flow_spec *spec; int err = 0; u8 *smac_v; @@ -1264,10 +1266,10 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, } spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW; vport->ingress.allow_rule = - mlx5_add_flow_rule(vport->ingress.acl, spec, - MLX5_FLOW_CONTEXT_ACTION_ALLOW, - 0, NULL); + mlx5_add_flow_rules(vport->ingress.acl, spec, + &flow_act, NULL, 0); if (IS_ERR(vport->ingress.allow_rule)) { err = PTR_ERR(vport->ingress.allow_rule); esw_warn(esw->dev, @@ -1278,10 +1280,10 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw, } memset(spec, 0, sizeof(*spec)); + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP; vport->ingress.drop_rule = - mlx5_add_flow_rule(vport->ingress.acl, spec, - MLX5_FLOW_CONTEXT_ACTION_DROP, - 0, NULL); + mlx5_add_flow_rules(vport->ingress.acl, spec, + &flow_act, NULL, 0); if (IS_ERR(vport->ingress.drop_rule)) { err = PTR_ERR(vport->ingress.drop_rule); esw_warn(esw->dev, @@ -1301,6 +1303,7 @@ out: static int esw_vport_egress_config(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { + struct mlx5_flow_act flow_act = {0}; struct mlx5_flow_spec *spec; int err = 0; @@ -1338,10 +1341,10 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw, MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vport->info.vlan); spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_ALLOW; vport->egress.allowed_vlan = - mlx5_add_flow_rule(vport->egress.acl, spec, - MLX5_FLOW_CONTEXT_ACTION_ALLOW, - 0, NULL); + mlx5_add_flow_rules(vport->egress.acl, spec, + &flow_act, NULL, 0); if (IS_ERR(vport->egress.allowed_vlan)) { err = PTR_ERR(vport->egress.allowed_vlan); esw_warn(esw->dev, @@ -1353,10 +1356,10 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw, /* Drop others rule (star rule) */ memset(spec, 0, sizeof(*spec)); + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP; vport->egress.drop_rule = - mlx5_add_flow_rule(vport->egress.acl, spec, - MLX5_FLOW_CONTEXT_ACTION_DROP, - 0, NULL); + mlx5_add_flow_rules(vport->egress.acl, spec, + &flow_act, NULL, 0); if (IS_ERR(vport->egress.drop_rule)) { err = PTR_ERR(vport->egress.drop_rule); esw_warn(esw->dev, @@ -1369,6 +1372,147 @@ out: return err; } +/* Vport QoS management */ +static int esw_create_tsar(struct mlx5_eswitch *esw) +{ + u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0}; + struct mlx5_core_dev *dev = esw->dev; + int err; + + if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling)) + return 0; + + if (esw->qos.enabled) + return -EEXIST; + + err = mlx5_create_scheduling_element_cmd(dev, + SCHEDULING_HIERARCHY_E_SWITCH, + &tsar_ctx, + &esw->qos.root_tsar_id); + if (err) { + esw_warn(esw->dev, "E-Switch create TSAR failed (%d)\n", err); + return err; + } + + esw->qos.enabled = true; + return 0; +} + +static void esw_destroy_tsar(struct mlx5_eswitch *esw) +{ + int err; + + if (!esw->qos.enabled) + return; + + err = mlx5_destroy_scheduling_element_cmd(esw->dev, + SCHEDULING_HIERARCHY_E_SWITCH, + esw->qos.root_tsar_id); + if (err) + esw_warn(esw->dev, "E-Switch destroy TSAR failed (%d)\n", err); + + esw->qos.enabled = false; +} + +static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num, + u32 initial_max_rate) +{ + u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0}; + struct mlx5_vport *vport = &esw->vports[vport_num]; + struct mlx5_core_dev *dev = esw->dev; + void *vport_elem; + int err = 0; + + if (!esw->qos.enabled || !MLX5_CAP_GEN(dev, qos) || + !MLX5_CAP_QOS(dev, esw_scheduling)) + return 0; + + if (vport->qos.enabled) + return -EEXIST; + + MLX5_SET(scheduling_context, &sched_ctx, element_type, + SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT); + vport_elem = MLX5_ADDR_OF(scheduling_context, &sched_ctx, + element_attributes); + MLX5_SET(vport_element, vport_elem, vport_number, vport_num); + MLX5_SET(scheduling_context, &sched_ctx, parent_element_id, + esw->qos.root_tsar_id); + MLX5_SET(scheduling_context, &sched_ctx, max_average_bw, + initial_max_rate); + + err = mlx5_create_scheduling_element_cmd(dev, + SCHEDULING_HIERARCHY_E_SWITCH, + &sched_ctx, + &vport->qos.esw_tsar_ix); + if (err) { + esw_warn(esw->dev, "E-Switch create TSAR vport element failed (vport=%d,err=%d)\n", + vport_num, err); + return err; + } + + vport->qos.enabled = true; + return 0; +} + +static void esw_vport_disable_qos(struct mlx5_eswitch *esw, int vport_num) +{ + struct mlx5_vport *vport = &esw->vports[vport_num]; + int err = 0; + + if (!vport->qos.enabled) + return; + + err = mlx5_destroy_scheduling_element_cmd(esw->dev, + SCHEDULING_HIERARCHY_E_SWITCH, + vport->qos.esw_tsar_ix); + if (err) + esw_warn(esw->dev, "E-Switch destroy TSAR vport element failed (vport=%d,err=%d)\n", + vport_num, err); + + vport->qos.enabled = false; +} + +static int esw_vport_qos_config(struct mlx5_eswitch *esw, int vport_num, + u32 max_rate) +{ + u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0}; + struct mlx5_vport *vport = &esw->vports[vport_num]; + struct mlx5_core_dev *dev = esw->dev; + void *vport_elem; + u32 bitmask = 0; + int err = 0; + + if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling)) + return -EOPNOTSUPP; + + if (!vport->qos.enabled) + return -EIO; + + MLX5_SET(scheduling_context, &sched_ctx, element_type, + SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT); + vport_elem = MLX5_ADDR_OF(scheduling_context, &sched_ctx, + element_attributes); + MLX5_SET(vport_element, vport_elem, vport_number, vport_num); + MLX5_SET(scheduling_context, &sched_ctx, parent_element_id, + esw->qos.root_tsar_id); + MLX5_SET(scheduling_context, &sched_ctx, max_average_bw, + max_rate); + bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW; + + err = mlx5_modify_scheduling_element_cmd(dev, + SCHEDULING_HIERARCHY_E_SWITCH, + &sched_ctx, + vport->qos.esw_tsar_ix, + bitmask); + if (err) { + esw_warn(esw->dev, "E-Switch modify TSAR vport element failed (vport=%d,err=%d)\n", + vport_num, err); + return err; + } + + return 0; +} + static void node_guid_gen_from_mac(u64 *node_guid, u8 mac[ETH_ALEN]) { ((u8 *)node_guid)[7] = mac[0]; @@ -1404,6 +1548,7 @@ static void esw_apply_vport_conf(struct mlx5_eswitch *esw, esw_vport_egress_config(esw, vport); } } + static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num, int enable_events) { @@ -1417,6 +1562,10 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num, /* Restore old vport configuration */ esw_apply_vport_conf(esw, vport); + /* Attach vport to the eswitch rate limiter */ + if (esw_vport_enable_qos(esw, vport_num, vport->info.max_rate)) + esw_warn(esw->dev, "Failed to attach vport %d to eswitch rate limiter", vport_num); + /* Sync with current vport context */ vport->enabled_events = enable_events; vport->enabled = true; @@ -1455,7 +1604,7 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num) */ esw_vport_change_handle_locked(vport); vport->enabled_events = 0; - + esw_vport_disable_qos(esw, vport_num); if (vport_num && esw->mode == SRIOV_LEGACY) { mlx5_modify_vport_admin_state(esw->dev, MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT, @@ -1501,6 +1650,10 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) if (err) goto abort; + err = esw_create_tsar(esw); + if (err) + esw_warn(esw->dev, "Failed to create eswitch TSAR"); + enabled_events = (mode == SRIOV_LEGACY) ? SRIOV_VPORT_EVENTS : UC_ADDR_CHANGE; for (i = 0; i <= nvfs; i++) esw_enable_vport(esw, i, enabled_events); @@ -1535,7 +1688,9 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) esw_disable_vport(esw, i); if (mc_promisc && mc_promisc->uplink_rule) - mlx5_del_flow_rule(mc_promisc->uplink_rule); + mlx5_del_flow_rules(mc_promisc->uplink_rule); + + esw_destroy_tsar(esw); if (esw->mode == SRIOV_LEGACY) esw_destroy_legacy_fdb_table(esw); @@ -1627,6 +1782,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) goto abort; } + hash_init(esw->offloads.encap_tbl); mutex_init(&esw->state_lock); for (vport_num = 0; vport_num < total_vports; vport_num++) { @@ -1642,6 +1798,7 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) esw->total_vports = total_vports; esw->enabled_vports = 0; esw->mode = SRIOV_NONE; + esw->offloads.inline_mode = MLX5_INLINE_MODE_NONE; dev->priv.eswitch = esw; return 0; @@ -1795,6 +1952,7 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, ivi->qos = evport->info.qos; ivi->spoofchk = evport->info.spoofchk; ivi->trusted = evport->info.trusted; + ivi->max_tx_rate = evport->info.max_rate; mutex_unlock(&esw->state_lock); return 0; @@ -1888,6 +2046,27 @@ int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw, return 0; } +int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, + int vport, u32 max_rate) +{ + struct mlx5_vport *evport; + int err = 0; + + if (!ESW_ALLOWED(esw)) + return -EPERM; + if (!LEGAL_VPORT(esw, vport)) + return -EINVAL; + + mutex_lock(&esw->state_lock); + evport = &esw->vports[vport]; + err = esw_vport_qos_config(esw, vport, max_rate); + if (!err) + evport->info.max_rate = max_rate; + + mutex_unlock(&esw->state_lock); + return err; +} + int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, int vport, struct ifla_vf_stats *vf_stats) |