summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/mellanox/mlx5/core
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-11-15 11:56:19 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2017-11-15 11:56:19 -0800
commit5bbcc0f595fadb4cac0eddc4401035ec0bd95b09 (patch)
tree3b65e490cc36a6c6fecac1fa24d9e0ac9ced4455 /drivers/net/ethernet/mellanox/mlx5/core
parent892204e06cb9e89fbc4b299a678f9ca358e97cac (diff)
parent50895b9de1d3e0258e015e8e55128d835d9a9f19 (diff)
downloadop-kernel-dev-5bbcc0f595fadb4cac0eddc4401035ec0bd95b09.zip
op-kernel-dev-5bbcc0f595fadb4cac0eddc4401035ec0bd95b09.tar.gz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: "Highlights: 1) Maintain the TCP retransmit queue using an rbtree, with 1GB windows at 100Gb this really has become necessary. From Eric Dumazet. 2) Multi-program support for cgroup+bpf, from Alexei Starovoitov. 3) Perform broadcast flooding in hardware in mv88e6xxx, from Andrew Lunn. 4) Add meter action support to openvswitch, from Andy Zhou. 5) Add a data meta pointer for BPF accessible packets, from Daniel Borkmann. 6) Namespace-ify almost all TCP sysctl knobs, from Eric Dumazet. 7) Turn on Broadcom Tags in b53 driver, from Florian Fainelli. 8) More work to move the RTNL mutex down, from Florian Westphal. 9) Add 'bpftool' utility, to help with bpf program introspection. From Jakub Kicinski. 10) Add new 'cpumap' type for XDP_REDIRECT action, from Jesper Dangaard Brouer. 11) Support 'blocks' of transformations in the packet scheduler which can span multiple network devices, from Jiri Pirko. 12) TC flower offload support in cxgb4, from Kumar Sanghvi. 13) Priority based stream scheduler for SCTP, from Marcelo Ricardo Leitner. 14) Thunderbolt networking driver, from Amir Levy and Mika Westerberg. 15) Add RED qdisc offloadability, and use it in mlxsw driver. From Nogah Frankel. 16) eBPF based device controller for cgroup v2, from Roman Gushchin. 17) Add some fundamental tracepoints for TCP, from Song Liu. 18) Remove garbage collection from ipv6 route layer, this is a significant accomplishment. From Wei Wang. 19) Add multicast route offload support to mlxsw, from Yotam Gigi" * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (2177 commits) tcp: highest_sack fix geneve: fix fill_info when link down bpf: fix lockdep splat net: cdc_ncm: GetNtbFormat endian fix openvswitch: meter: fix NULL pointer dereference in ovs_meter_cmd_reply_start netem: remove unnecessary 64 bit modulus netem: use 64 bit divide by rate tcp: Namespace-ify sysctl_tcp_default_congestion_control net: Protect iterations over net::fib_notifier_ops in fib_seq_sum() ipv6: set all.accept_dad to 0 by default uapi: fix linux/tls.h userspace compilation error usbnet: ipheth: prevent TX queue timeouts when device not ready vhost_net: conditionally enable tx polling uapi: fix linux/rxrpc.h userspace compilation errors net: stmmac: fix LPI transitioning for dwmac4 atm: horizon: Fix irq release error net-sysfs: trigger netlink notification on ifalias change via sysfs openvswitch: Using kfree_rcu() to simplify the code openvswitch: Make local function ovs_nsh_key_attr_size() static openvswitch: Fix return value check in ovs_meter_cmd_features() ...
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlx5/core')
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h91
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_clock.c619
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c265
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c315
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c171
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c244
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c74
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c59
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.c899
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h291
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c998
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c272
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c350
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c525
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h51
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c111
37 files changed, 3765 insertions, 1791 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index fdaef00..25deaa5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -6,6 +6,7 @@ config MLX5_CORE
tristate "Mellanox Technologies ConnectX-4 and Connect-IB core driver"
depends on MAY_USE_DEVLINK
depends on PCI
+ imply PTP_1588_CLOCK
default n
---help---
Core driver for low level functionality of the ConnectX-4 and
@@ -29,7 +30,6 @@ config MLX5_CORE_EN
bool "Mellanox Technologies ConnectX-4 Ethernet support"
depends on NETDEVICES && ETHERNET && INET && PCI && MLX5_CORE
depends on IPV6=y || IPV6=n || MLX5_CORE=m
- imply PTP_1588_CLOCK
default n
---help---
Ethernet support in Mellanox Technologies ConnectX-4 NIC.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 714dd0d..19b21b4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -5,7 +5,7 @@ subdir-ccflags-y += -I$(src)
mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \
mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o \
- fs_counters.o rl.o lag.o dev.o wq.o lib/gid.o \
+ fs_counters.o rl.o lag.o dev.o wq.o lib/gid.o lib/clock.o \
diag/fs_tracepoint.o
mlx5_core-$(CONFIG_MLX5_ACCEL) += accel/ipsec.o
@@ -14,7 +14,7 @@ mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \
fpga/ipsec.o
mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \
- en_tx.o en_rx.o en_rx_am.o en_txrx.o en_clock.o vxlan.o \
+ en_tx.o en_rx.o en_rx_am.o en_txrx.o en_stats.o vxlan.o \
en_arfs.o en_fs_ethtool.o en_selftest.o
mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o
@@ -23,7 +23,7 @@ mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o en_rep.o en_tc.
mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o
-mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o
+mlx5_core-$(CONFIG_MLX5_CORE_IPOIB) += ipoib/ipoib.o ipoib/ethtool.o ipoib/ipoib_vlan.o
mlx5_core-$(CONFIG_MLX5_EN_IPSEC) += en_accel/ipsec.o en_accel/ipsec_rxtx.o \
en_accel/ipsec_stats.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index 336d473..1016e05 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -58,7 +58,7 @@ void mlx5_cq_tasklet_cb(unsigned long data)
tasklet_ctx.list) {
list_del_init(&mcq->tasklet_ctx.list);
mcq->tasklet_ctx.comp(mcq);
- if (atomic_dec_and_test(&mcq->refcount))
+ if (refcount_dec_and_test(&mcq->refcount))
complete(&mcq->free);
if (time_after(jiffies, end))
break;
@@ -80,7 +80,7 @@ static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq)
* still arrive.
*/
if (list_empty_careful(&cq->tasklet_ctx.list)) {
- atomic_inc(&cq->refcount);
+ refcount_inc(&cq->refcount);
list_add_tail(&cq->tasklet_ctx.list, &tasklet_ctx->list);
}
spin_unlock_irqrestore(&tasklet_ctx->lock, flags);
@@ -94,7 +94,7 @@ void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn)
spin_lock(&table->lock);
cq = radix_tree_lookup(&table->tree, cqn);
if (likely(cq))
- atomic_inc(&cq->refcount);
+ refcount_inc(&cq->refcount);
spin_unlock(&table->lock);
if (!cq) {
@@ -106,7 +106,7 @@ void mlx5_cq_completion(struct mlx5_core_dev *dev, u32 cqn)
cq->comp(cq);
- if (atomic_dec_and_test(&cq->refcount))
+ if (refcount_dec_and_test(&cq->refcount))
complete(&cq->free);
}
@@ -119,7 +119,7 @@ void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type)
cq = radix_tree_lookup(&table->tree, cqn);
if (cq)
- atomic_inc(&cq->refcount);
+ refcount_inc(&cq->refcount);
spin_unlock(&table->lock);
@@ -130,7 +130,7 @@ void mlx5_cq_event(struct mlx5_core_dev *dev, u32 cqn, int event_type)
cq->event(cq, event_type);
- if (atomic_dec_and_test(&cq->refcount))
+ if (refcount_dec_and_test(&cq->refcount))
complete(&cq->free);
}
@@ -159,7 +159,7 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
cq->cqn = MLX5_GET(create_cq_out, out, cqn);
cq->cons_index = 0;
cq->arm_sn = 0;
- atomic_set(&cq->refcount, 1);
+ refcount_set(&cq->refcount, 1);
init_completion(&cq->free);
if (!cq->comp)
cq->comp = mlx5_add_cq_to_tasklet;
@@ -222,7 +222,7 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq)
synchronize_irq(cq->irqn);
mlx5_debug_cq_remove(dev, cq);
- if (atomic_dec_and_test(&cq->refcount))
+ if (refcount_dec_and_test(&cq->refcount))
complete(&cq->free);
wait_for_completion(&cq->free);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 13b5ef9..c0872b3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -57,6 +57,7 @@
#define MLX5E_HW2SW_MTU(priv, hwmtu) ((hwmtu) - ((priv)->hard_mtu))
#define MLX5E_SW2HW_MTU(priv, swmtu) ((swmtu) + ((priv)->hard_mtu))
+#define MLX5E_MAX_DSCP 64
#define MLX5E_MAX_NUM_TC 8
#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x6
@@ -105,6 +106,7 @@
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE 0x3
#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20
#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x10
+#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE 0x10
#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20
#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES 0x80
#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW 0x2
@@ -126,6 +128,16 @@
#define MLX5E_NUM_MAIN_GROUPS 9
+#define MLX5E_MSG_LEVEL NETIF_MSG_LINK
+
+#define mlx5e_dbg(mlevel, priv, format, ...) \
+do { \
+ if (NETIF_MSG_##mlevel & (priv)->msglevel) \
+ netdev_warn(priv->netdev, format, \
+ ##__VA_ARGS__); \
+} while (0)
+
+
static inline u16 mlx5_min_rx_wqes(int wq_type, u32 wq_size)
{
switch (wq_type) {
@@ -187,12 +199,14 @@ extern const char mlx5e_self_tests[][ETH_GSTRING_LEN];
static const char mlx5e_priv_flags[][ETH_GSTRING_LEN] = {
"rx_cqe_moder",
+ "tx_cqe_moder",
"rx_cqe_compress",
};
enum mlx5e_priv_flag {
MLX5E_PFLAG_RX_CQE_BASED_MODER = (1 << 0),
- MLX5E_PFLAG_RX_CQE_COMPRESS = (1 << 1),
+ MLX5E_PFLAG_TX_CQE_BASED_MODER = (1 << 1),
+ MLX5E_PFLAG_RX_CQE_COMPRESS = (1 << 2),
};
#define MLX5E_SET_PFLAG(params, pflag, enable) \
@@ -212,6 +226,7 @@ enum mlx5e_priv_flag {
struct mlx5e_cq_moder {
u16 usec;
u16 pkts;
+ u8 cq_period_mode;
};
struct mlx5e_params {
@@ -223,7 +238,6 @@ struct mlx5e_params {
u8 log_rq_size;
u16 num_channels;
u8 num_tc;
- u8 rx_cq_period_mode;
bool rx_cqe_compress_def;
struct mlx5e_cq_moder rx_cq_moderation;
struct mlx5e_cq_moder tx_cq_moderation;
@@ -260,34 +274,18 @@ enum {
struct mlx5e_dcbx {
enum mlx5_dcbx_oper_mode mode;
struct mlx5e_cee_config cee_cfg; /* pending configuration */
+ u8 dscp_app_cnt;
/* The only setting that cannot be read from FW */
u8 tc_tsa[IEEE_8021QAZ_MAX_TCS];
u8 cap;
};
-#endif
-#define MAX_PIN_NUM 8
-struct mlx5e_pps {
- u8 pin_caps[MAX_PIN_NUM];
- struct work_struct out_work;
- u64 start[MAX_PIN_NUM];
- u8 enabled;
-};
-
-struct mlx5e_tstamp {
- rwlock_t lock;
- struct cyclecounter cycles;
- struct timecounter clock;
- struct hwtstamp_config hwtstamp_config;
- u32 nominal_c_mult;
- unsigned long overflow_period;
- struct delayed_work overflow_work;
- struct mlx5_core_dev *mdev;
- struct ptp_clock *ptp;
- struct ptp_clock_info ptp_info;
- struct mlx5e_pps pps_info;
+struct mlx5e_dcbx_dp {
+ u8 dscp2prio[MLX5E_MAX_DSCP];
+ u8 trust_state;
};
+#endif
enum {
MLX5E_RQ_STATE_ENABLED,
@@ -375,9 +373,10 @@ struct mlx5e_txqsq {
u8 min_inline_mode;
u16 edge;
struct device *pdev;
- struct mlx5e_tstamp *tstamp;
__be32 mkey_be;
unsigned long state;
+ struct hwtstamp_config *tstamp;
+ struct mlx5_clock *clock;
/* control path */
struct mlx5_wq_ctrl wq_ctrl;
@@ -543,10 +542,11 @@ struct mlx5e_rq {
struct mlx5e_channel *channel;
struct device *pdev;
struct net_device *netdev;
- struct mlx5e_tstamp *tstamp;
struct mlx5e_rq_stats stats;
struct mlx5e_cq cq;
struct mlx5e_page_cache page_cache;
+ struct hwtstamp_config *tstamp;
+ struct mlx5_clock *clock;
mlx5e_fp_handle_rx_cqe handle_rx_cqe;
mlx5e_fp_post_rx_wqes post_wqes;
@@ -588,7 +588,7 @@ struct mlx5e_channel {
/* control */
struct mlx5e_priv *priv;
struct mlx5_core_dev *mdev;
- struct mlx5e_tstamp *tstamp;
+ struct hwtstamp_config *tstamp;
int ix;
};
@@ -655,12 +655,14 @@ struct mlx5e_tc_table {
struct mlx5e_vlan_table {
struct mlx5e_flow_table ft;
- unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
- struct mlx5_flow_handle *active_vlans_rule[VLAN_N_VID];
+ DECLARE_BITMAP(active_cvlans, VLAN_N_VID);
+ DECLARE_BITMAP(active_svlans, VLAN_N_VID);
+ struct mlx5_flow_handle *active_cvlans_rule[VLAN_N_VID];
+ struct mlx5_flow_handle *active_svlans_rule[VLAN_N_VID];
struct mlx5_flow_handle *untagged_rule;
struct mlx5_flow_handle *any_cvlan_rule;
struct mlx5_flow_handle *any_svlan_rule;
- bool filter_disabled;
+ bool cvlan_filter_disabled;
};
struct mlx5e_l2_table {
@@ -762,8 +764,12 @@ struct mlx5e_priv {
/* priv data path fields - start */
struct mlx5e_txqsq *txq2sq[MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC];
int channel_tc2txq[MLX5E_MAX_NUM_CHANNELS][MLX5E_MAX_NUM_TC];
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+ struct mlx5e_dcbx_dp dcbx_dp;
+#endif
/* priv data path fields - end */
+ u32 msglevel;
unsigned long state;
struct mutex state_lock; /* Protects Interface state */
struct mlx5e_rq drop_rq;
@@ -789,7 +795,7 @@ struct mlx5e_priv {
struct mlx5_core_dev *mdev;
struct net_device *netdev;
struct mlx5e_stats stats;
- struct mlx5e_tstamp tstamp;
+ struct hwtstamp_config tstamp;
u16 q_counter;
#ifdef CONFIG_MLX5_CORE_EN_DCB
struct mlx5e_dcbx dcbx;
@@ -820,6 +826,8 @@ struct mlx5e_profile {
mlx5e_fp_handle_rx_cqe handle_rx_cqe;
mlx5e_fp_handle_rx_cqe handle_rx_cqe_mpwqe;
} rx_handlers;
+ void (*netdev_registered_init)(struct mlx5e_priv *priv);
+ void (*netdev_registered_remove)(struct mlx5e_priv *priv);
int max_tc;
};
@@ -873,12 +881,6 @@ void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv);
void mlx5e_ethtool_cleanup_steering(struct mlx5e_priv *priv);
void mlx5e_set_rx_mode_work(struct work_struct *work);
-void mlx5e_fill_hwstamp(struct mlx5e_tstamp *clock, u64 timestamp,
- struct skb_shared_hwtstamps *hwts);
-void mlx5e_timestamp_init(struct mlx5e_priv *priv);
-void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv);
-void mlx5e_pps_event_handler(struct mlx5e_priv *priv,
- struct ptp_clock_event *event);
int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr);
int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr);
int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val);
@@ -887,8 +889,9 @@ int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
u16 vid);
int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
u16 vid);
-void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv);
-void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
+void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv);
+void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv);
+void mlx5e_timestamp_set(struct mlx5e_priv *priv);
struct mlx5e_redirect_rqt_param {
bool is_rss;
@@ -928,6 +931,8 @@ void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
int num_channels);
int mlx5e_get_max_linkspeed(struct mlx5_core_dev *mdev, u32 *speed);
+void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params,
+ u8 cq_period_mode);
void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params,
u8 cq_period_mode);
void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,
@@ -993,6 +998,8 @@ extern const struct ethtool_ops mlx5e_ethtool_ops;
extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops;
int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets);
void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv);
+void mlx5e_dcbnl_init_app(struct mlx5e_priv *priv);
+void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv);
#endif
#ifndef CONFIG_RFS_ACCEL
@@ -1045,6 +1052,9 @@ void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt);
int mlx5e_create_ttc_table(struct mlx5e_priv *priv);
void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv);
+int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv);
+void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv);
+
int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc,
u32 underlay_qpn, u32 *tisn);
void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn);
@@ -1081,6 +1091,9 @@ int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv,
struct ethtool_flash *flash);
+int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv);
+
/* mlx5e generic netdev management API */
struct net_device*
mlx5e_create_netdev(struct mlx5_core_dev *mdev, const struct mlx5e_profile *profile,
@@ -1091,5 +1104,5 @@ void mlx5e_destroy_netdev(struct mlx5e_priv *priv);
void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
struct mlx5e_params *params,
u16 max_channels);
-
+u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev);
#endif /* __MLX5_EN_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
index 4614ddf..6a7c8b0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c
@@ -256,7 +256,7 @@ struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev,
goto drop;
}
mdata = mlx5e_ipsec_add_metadata(skb);
- if (unlikely(IS_ERR(mdata))) {
+ if (IS_ERR(mdata)) {
atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_metadata);
goto drop;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index 12d3ced..610d485 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -92,7 +92,7 @@ static enum mlx5e_traffic_types arfs_get_tt(enum arfs_type type)
static int arfs_disable(struct mlx5e_priv *priv)
{
- struct mlx5_flow_destination dest;
+ struct mlx5_flow_destination dest = {};
struct mlx5e_tir *tir = priv->indir_tir;
int err = 0;
int tt;
@@ -126,7 +126,7 @@ int mlx5e_arfs_disable(struct mlx5e_priv *priv)
int mlx5e_arfs_enable(struct mlx5e_priv *priv)
{
- struct mlx5_flow_destination dest;
+ struct mlx5_flow_destination dest = {};
int err = 0;
int tt;
int i;
@@ -175,7 +175,7 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv,
{
struct arfs_table *arfs_t = &priv->fs.arfs.arfs_tables[type];
struct mlx5e_tir *tir = priv->indir_tir;
- struct mlx5_flow_destination dest;
+ struct mlx5_flow_destination dest = {};
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_spec *spec;
enum mlx5e_traffic_types tt;
@@ -466,7 +466,7 @@ static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv,
struct mlx5e_arfs_tables *arfs = &priv->fs.arfs;
struct arfs_tuple *tuple = &arfs_rule->tuple;
struct mlx5_flow_handle *rule = NULL;
- struct mlx5_flow_destination dest;
+ struct mlx5_flow_destination dest = {};
MLX5_DECLARE_FLOW_ACT(flow_act);
struct arfs_table *arfs_table;
struct mlx5_flow_spec *spec;
@@ -557,7 +557,7 @@ out:
static void arfs_modify_rule_rq(struct mlx5e_priv *priv,
struct mlx5_flow_handle *rule, u16 rxq)
{
- struct mlx5_flow_destination dst;
+ struct mlx5_flow_destination dst = {};
int err = 0;
dst.type = MLX5_FLOW_DESTINATION_TYPE_TIR;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
deleted file mode 100644
index 84dd63e..0000000
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
+++ /dev/null
@@ -1,619 +0,0 @@
-/*
- * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#include <linux/clocksource.h>
-#include "en.h"
-
-enum {
- MLX5E_CYCLES_SHIFT = 23
-};
-
-enum {
- MLX5E_PIN_MODE_IN = 0x0,
- MLX5E_PIN_MODE_OUT = 0x1,
-};
-
-enum {
- MLX5E_OUT_PATTERN_PULSE = 0x0,
- MLX5E_OUT_PATTERN_PERIODIC = 0x1,
-};
-
-enum {
- MLX5E_EVENT_MODE_DISABLE = 0x0,
- MLX5E_EVENT_MODE_REPETETIVE = 0x1,
- MLX5E_EVENT_MODE_ONCE_TILL_ARM = 0x2,
-};
-
-enum {
- MLX5E_MTPPS_FS_ENABLE = BIT(0x0),
- MLX5E_MTPPS_FS_PATTERN = BIT(0x2),
- MLX5E_MTPPS_FS_PIN_MODE = BIT(0x3),
- MLX5E_MTPPS_FS_TIME_STAMP = BIT(0x4),
- MLX5E_MTPPS_FS_OUT_PULSE_DURATION = BIT(0x5),
- MLX5E_MTPPS_FS_ENH_OUT_PER_ADJ = BIT(0x7),
-};
-
-void mlx5e_fill_hwstamp(struct mlx5e_tstamp *tstamp, u64 timestamp,
- struct skb_shared_hwtstamps *hwts)
-{
- u64 nsec;
-
- read_lock(&tstamp->lock);
- nsec = timecounter_cyc2time(&tstamp->clock, timestamp);
- read_unlock(&tstamp->lock);
-
- hwts->hwtstamp = ns_to_ktime(nsec);
-}
-
-static u64 mlx5e_read_internal_timer(const struct cyclecounter *cc)
-{
- struct mlx5e_tstamp *tstamp = container_of(cc, struct mlx5e_tstamp,
- cycles);
-
- return mlx5_read_internal_timer(tstamp->mdev) & cc->mask;
-}
-
-static void mlx5e_pps_out(struct work_struct *work)
-{
- struct mlx5e_pps *pps_info = container_of(work, struct mlx5e_pps,
- out_work);
- struct mlx5e_tstamp *tstamp = container_of(pps_info, struct mlx5e_tstamp,
- pps_info);
- u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
- unsigned long flags;
- int i;
-
- for (i = 0; i < tstamp->ptp_info.n_pins; i++) {
- u64 tstart;
-
- write_lock_irqsave(&tstamp->lock, flags);
- tstart = tstamp->pps_info.start[i];
- tstamp->pps_info.start[i] = 0;
- write_unlock_irqrestore(&tstamp->lock, flags);
- if (!tstart)
- continue;
-
- MLX5_SET(mtpps_reg, in, pin, i);
- MLX5_SET64(mtpps_reg, in, time_stamp, tstart);
- MLX5_SET(mtpps_reg, in, field_select, MLX5E_MTPPS_FS_TIME_STAMP);
- mlx5_set_mtpps(tstamp->mdev, in, sizeof(in));
- }
-}
-
-static void mlx5e_timestamp_overflow(struct work_struct *work)
-{
- struct delayed_work *dwork = to_delayed_work(work);
- struct mlx5e_tstamp *tstamp = container_of(dwork, struct mlx5e_tstamp,
- overflow_work);
- struct mlx5e_priv *priv = container_of(tstamp, struct mlx5e_priv, tstamp);
- unsigned long flags;
-
- write_lock_irqsave(&tstamp->lock, flags);
- timecounter_read(&tstamp->clock);
- write_unlock_irqrestore(&tstamp->lock, flags);
- queue_delayed_work(priv->wq, &tstamp->overflow_work,
- msecs_to_jiffies(tstamp->overflow_period * 1000));
-}
-
-int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
-{
- struct hwtstamp_config config;
- int err;
-
- if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
- return -EOPNOTSUPP;
-
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
- /* TX HW timestamp */
- switch (config.tx_type) {
- case HWTSTAMP_TX_OFF:
- case HWTSTAMP_TX_ON:
- break;
- default:
- return -ERANGE;
- }
-
- mutex_lock(&priv->state_lock);
- /* RX HW timestamp */
- switch (config.rx_filter) {
- case HWTSTAMP_FILTER_NONE:
- /* Reset CQE compression to Admin default */
- mlx5e_modify_rx_cqe_compression_locked(priv, priv->channels.params.rx_cqe_compress_def);
- break;
- case HWTSTAMP_FILTER_ALL:
- case HWTSTAMP_FILTER_SOME:
- case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
- case HWTSTAMP_FILTER_PTP_V2_EVENT:
- case HWTSTAMP_FILTER_PTP_V2_SYNC:
- case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
- case HWTSTAMP_FILTER_NTP_ALL:
- /* Disable CQE compression */
- netdev_warn(priv->netdev, "Disabling cqe compression");
- err = mlx5e_modify_rx_cqe_compression_locked(priv, false);
- if (err) {
- netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err);
- mutex_unlock(&priv->state_lock);
- return err;
- }
- config.rx_filter = HWTSTAMP_FILTER_ALL;
- break;
- default:
- mutex_unlock(&priv->state_lock);
- return -ERANGE;
- }
-
- memcpy(&priv->tstamp.hwtstamp_config, &config, sizeof(config));
- mutex_unlock(&priv->state_lock);
-
- return copy_to_user(ifr->ifr_data, &config,
- sizeof(config)) ? -EFAULT : 0;
-}
-
-int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr)
-{
- struct hwtstamp_config *cfg = &priv->tstamp.hwtstamp_config;
-
- if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
- return -EOPNOTSUPP;
-
- return copy_to_user(ifr->ifr_data, cfg, sizeof(*cfg)) ? -EFAULT : 0;
-}
-
-static int mlx5e_ptp_settime(struct ptp_clock_info *ptp,
- const struct timespec64 *ts)
-{
- struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
- ptp_info);
- u64 ns = timespec64_to_ns(ts);
- unsigned long flags;
-
- write_lock_irqsave(&tstamp->lock, flags);
- timecounter_init(&tstamp->clock, &tstamp->cycles, ns);
- write_unlock_irqrestore(&tstamp->lock, flags);
-
- return 0;
-}
-
-static int mlx5e_ptp_gettime(struct ptp_clock_info *ptp,
- struct timespec64 *ts)
-{
- struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
- ptp_info);
- u64 ns;
- unsigned long flags;
-
- write_lock_irqsave(&tstamp->lock, flags);
- ns = timecounter_read(&tstamp->clock);
- write_unlock_irqrestore(&tstamp->lock, flags);
-
- *ts = ns_to_timespec64(ns);
-
- return 0;
-}
-
-static int mlx5e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
-{
- struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
- ptp_info);
- unsigned long flags;
-
- write_lock_irqsave(&tstamp->lock, flags);
- timecounter_adjtime(&tstamp->clock, delta);
- write_unlock_irqrestore(&tstamp->lock, flags);
-
- return 0;
-}
-
-static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
-{
- u64 adj;
- u32 diff;
- unsigned long flags;
- int neg_adj = 0;
- struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
- ptp_info);
-
- if (delta < 0) {
- neg_adj = 1;
- delta = -delta;
- }
-
- adj = tstamp->nominal_c_mult;
- adj *= delta;
- diff = div_u64(adj, 1000000000ULL);
-
- write_lock_irqsave(&tstamp->lock, flags);
- timecounter_read(&tstamp->clock);
- tstamp->cycles.mult = neg_adj ? tstamp->nominal_c_mult - diff :
- tstamp->nominal_c_mult + diff;
- write_unlock_irqrestore(&tstamp->lock, flags);
-
- return 0;
-}
-
-static int mlx5e_extts_configure(struct ptp_clock_info *ptp,
- struct ptp_clock_request *rq,
- int on)
-{
- struct mlx5e_tstamp *tstamp =
- container_of(ptp, struct mlx5e_tstamp, ptp_info);
- struct mlx5e_priv *priv =
- container_of(tstamp, struct mlx5e_priv, tstamp);
- u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
- u32 field_select = 0;
- u8 pin_mode = 0;
- u8 pattern = 0;
- int pin = -1;
- int err = 0;
-
- if (!MLX5_PPS_CAP(priv->mdev))
- return -EOPNOTSUPP;
-
- if (rq->extts.index >= tstamp->ptp_info.n_pins)
- return -EINVAL;
-
- if (on) {
- pin = ptp_find_pin(tstamp->ptp, PTP_PF_EXTTS, rq->extts.index);
- if (pin < 0)
- return -EBUSY;
- pin_mode = MLX5E_PIN_MODE_IN;
- pattern = !!(rq->extts.flags & PTP_FALLING_EDGE);
- field_select = MLX5E_MTPPS_FS_PIN_MODE |
- MLX5E_MTPPS_FS_PATTERN |
- MLX5E_MTPPS_FS_ENABLE;
- } else {
- pin = rq->extts.index;
- field_select = MLX5E_MTPPS_FS_ENABLE;
- }
-
- MLX5_SET(mtpps_reg, in, pin, pin);
- MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
- MLX5_SET(mtpps_reg, in, pattern, pattern);
- MLX5_SET(mtpps_reg, in, enable, on);
- MLX5_SET(mtpps_reg, in, field_select, field_select);
-
- err = mlx5_set_mtpps(priv->mdev, in, sizeof(in));
- if (err)
- return err;
-
- return mlx5_set_mtppse(priv->mdev, pin, 0,
- MLX5E_EVENT_MODE_REPETETIVE & on);
-}
-
-static int mlx5e_perout_configure(struct ptp_clock_info *ptp,
- struct ptp_clock_request *rq,
- int on)
-{
- struct mlx5e_tstamp *tstamp =
- container_of(ptp, struct mlx5e_tstamp, ptp_info);
- struct mlx5e_priv *priv =
- container_of(tstamp, struct mlx5e_priv, tstamp);
- u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
- u64 nsec_now, nsec_delta, time_stamp = 0;
- u64 cycles_now, cycles_delta;
- struct timespec64 ts;
- unsigned long flags;
- u32 field_select = 0;
- u8 pin_mode = 0;
- u8 pattern = 0;
- int pin = -1;
- int err = 0;
- s64 ns;
-
- if (!MLX5_PPS_CAP(priv->mdev))
- return -EOPNOTSUPP;
-
- if (rq->perout.index >= tstamp->ptp_info.n_pins)
- return -EINVAL;
-
- if (on) {
- pin = ptp_find_pin(tstamp->ptp, PTP_PF_PEROUT,
- rq->perout.index);
- if (pin < 0)
- return -EBUSY;
-
- pin_mode = MLX5E_PIN_MODE_OUT;
- pattern = MLX5E_OUT_PATTERN_PERIODIC;
- ts.tv_sec = rq->perout.period.sec;
- ts.tv_nsec = rq->perout.period.nsec;
- ns = timespec64_to_ns(&ts);
-
- if ((ns >> 1) != 500000000LL)
- return -EINVAL;
-
- ts.tv_sec = rq->perout.start.sec;
- ts.tv_nsec = rq->perout.start.nsec;
- ns = timespec64_to_ns(&ts);
- cycles_now = mlx5_read_internal_timer(tstamp->mdev);
- write_lock_irqsave(&tstamp->lock, flags);
- nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now);
- nsec_delta = ns - nsec_now;
- cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift,
- tstamp->cycles.mult);
- write_unlock_irqrestore(&tstamp->lock, flags);
- time_stamp = cycles_now + cycles_delta;
- field_select = MLX5E_MTPPS_FS_PIN_MODE |
- MLX5E_MTPPS_FS_PATTERN |
- MLX5E_MTPPS_FS_ENABLE |
- MLX5E_MTPPS_FS_TIME_STAMP;
- } else {
- pin = rq->perout.index;
- field_select = MLX5E_MTPPS_FS_ENABLE;
- }
-
- MLX5_SET(mtpps_reg, in, pin, pin);
- MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
- MLX5_SET(mtpps_reg, in, pattern, pattern);
- MLX5_SET(mtpps_reg, in, enable, on);
- MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp);
- MLX5_SET(mtpps_reg, in, field_select, field_select);
-
- err = mlx5_set_mtpps(priv->mdev, in, sizeof(in));
- if (err)
- return err;
-
- return mlx5_set_mtppse(priv->mdev, pin, 0,
- MLX5E_EVENT_MODE_REPETETIVE & on);
-}
-
-static int mlx5e_pps_configure(struct ptp_clock_info *ptp,
- struct ptp_clock_request *rq,
- int on)
-{
- struct mlx5e_tstamp *tstamp =
- container_of(ptp, struct mlx5e_tstamp, ptp_info);
-
- tstamp->pps_info.enabled = !!on;
- return 0;
-}
-
-static int mlx5e_ptp_enable(struct ptp_clock_info *ptp,
- struct ptp_clock_request *rq,
- int on)
-{
- switch (rq->type) {
- case PTP_CLK_REQ_EXTTS:
- return mlx5e_extts_configure(ptp, rq, on);
- case PTP_CLK_REQ_PEROUT:
- return mlx5e_perout_configure(ptp, rq, on);
- case PTP_CLK_REQ_PPS:
- return mlx5e_pps_configure(ptp, rq, on);
- default:
- return -EOPNOTSUPP;
- }
- return 0;
-}
-
-static int mlx5e_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
- enum ptp_pin_function func, unsigned int chan)
-{
- return (func == PTP_PF_PHYSYNC) ? -EOPNOTSUPP : 0;
-}
-
-static const struct ptp_clock_info mlx5e_ptp_clock_info = {
- .owner = THIS_MODULE,
- .max_adj = 100000000,
- .n_alarm = 0,
- .n_ext_ts = 0,
- .n_per_out = 0,
- .n_pins = 0,
- .pps = 0,
- .adjfreq = mlx5e_ptp_adjfreq,
- .adjtime = mlx5e_ptp_adjtime,
- .gettime64 = mlx5e_ptp_gettime,
- .settime64 = mlx5e_ptp_settime,
- .enable = NULL,
- .verify = NULL,
-};
-
-static void mlx5e_timestamp_init_config(struct mlx5e_tstamp *tstamp)
-{
- tstamp->hwtstamp_config.tx_type = HWTSTAMP_TX_OFF;
- tstamp->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
-}
-
-static int mlx5e_init_pin_config(struct mlx5e_tstamp *tstamp)
-{
- int i;
-
- tstamp->ptp_info.pin_config =
- kzalloc(sizeof(*tstamp->ptp_info.pin_config) *
- tstamp->ptp_info.n_pins, GFP_KERNEL);
- if (!tstamp->ptp_info.pin_config)
- return -ENOMEM;
- tstamp->ptp_info.enable = mlx5e_ptp_enable;
- tstamp->ptp_info.verify = mlx5e_ptp_verify;
- tstamp->ptp_info.pps = 1;
-
- for (i = 0; i < tstamp->ptp_info.n_pins; i++) {
- snprintf(tstamp->ptp_info.pin_config[i].name,
- sizeof(tstamp->ptp_info.pin_config[i].name),
- "mlx5_pps%d", i);
- tstamp->ptp_info.pin_config[i].index = i;
- tstamp->ptp_info.pin_config[i].func = PTP_PF_NONE;
- tstamp->ptp_info.pin_config[i].chan = i;
- }
-
- return 0;
-}
-
-static void mlx5e_get_pps_caps(struct mlx5e_priv *priv,
- struct mlx5e_tstamp *tstamp)
-{
- u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
-
- mlx5_query_mtpps(priv->mdev, out, sizeof(out));
-
- tstamp->ptp_info.n_pins = MLX5_GET(mtpps_reg, out,
- cap_number_of_pps_pins);
- tstamp->ptp_info.n_ext_ts = MLX5_GET(mtpps_reg, out,
- cap_max_num_of_pps_in_pins);
- tstamp->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out,
- cap_max_num_of_pps_out_pins);
-
- tstamp->pps_info.pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode);
- tstamp->pps_info.pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode);
- tstamp->pps_info.pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode);
- tstamp->pps_info.pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode);
- tstamp->pps_info.pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode);
- tstamp->pps_info.pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode);
- tstamp->pps_info.pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode);
- tstamp->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
-}
-
-void mlx5e_pps_event_handler(struct mlx5e_priv *priv,
- struct ptp_clock_event *event)
-{
- struct net_device *netdev = priv->netdev;
- struct mlx5e_tstamp *tstamp = &priv->tstamp;
- struct timespec64 ts;
- u64 nsec_now, nsec_delta;
- u64 cycles_now, cycles_delta;
- int pin = event->index;
- s64 ns;
- unsigned long flags;
-
- switch (tstamp->ptp_info.pin_config[pin].func) {
- case PTP_PF_EXTTS:
- if (tstamp->pps_info.enabled) {
- event->type = PTP_CLOCK_PPSUSR;
- event->pps_times.ts_real = ns_to_timespec64(event->timestamp);
- } else {
- event->type = PTP_CLOCK_EXTTS;
- }
- ptp_clock_event(tstamp->ptp, event);
- break;
- case PTP_PF_PEROUT:
- mlx5e_ptp_gettime(&tstamp->ptp_info, &ts);
- cycles_now = mlx5_read_internal_timer(tstamp->mdev);
- ts.tv_sec += 1;
- ts.tv_nsec = 0;
- ns = timespec64_to_ns(&ts);
- write_lock_irqsave(&tstamp->lock, flags);
- nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now);
- nsec_delta = ns - nsec_now;
- cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift,
- tstamp->cycles.mult);
- tstamp->pps_info.start[pin] = cycles_now + cycles_delta;
- queue_work(priv->wq, &tstamp->pps_info.out_work);
- write_unlock_irqrestore(&tstamp->lock, flags);
- break;
- default:
- netdev_err(netdev, "%s: Unhandled event\n", __func__);
- }
-}
-
-void mlx5e_timestamp_init(struct mlx5e_priv *priv)
-{
- struct mlx5e_tstamp *tstamp = &priv->tstamp;
- u64 ns;
- u64 frac = 0;
- u32 dev_freq;
-
- mlx5e_timestamp_init_config(tstamp);
- dev_freq = MLX5_CAP_GEN(priv->mdev, device_frequency_khz);
- if (!dev_freq) {
- mlx5_core_warn(priv->mdev, "invalid device_frequency_khz, aborting HW clock init\n");
- return;
- }
- rwlock_init(&tstamp->lock);
- tstamp->cycles.read = mlx5e_read_internal_timer;
- tstamp->cycles.shift = MLX5E_CYCLES_SHIFT;
- tstamp->cycles.mult = clocksource_khz2mult(dev_freq,
- tstamp->cycles.shift);
- tstamp->nominal_c_mult = tstamp->cycles.mult;
- tstamp->cycles.mask = CLOCKSOURCE_MASK(41);
- tstamp->mdev = priv->mdev;
-
- timecounter_init(&tstamp->clock, &tstamp->cycles,
- ktime_to_ns(ktime_get_real()));
-
- /* Calculate period in seconds to call the overflow watchdog - to make
- * sure counter is checked at least once every wrap around.
- */
- ns = cyclecounter_cyc2ns(&tstamp->cycles, tstamp->cycles.mask,
- frac, &frac);
- do_div(ns, NSEC_PER_SEC / 2 / HZ);
- tstamp->overflow_period = ns;
-
- INIT_WORK(&tstamp->pps_info.out_work, mlx5e_pps_out);
- INIT_DELAYED_WORK(&tstamp->overflow_work, mlx5e_timestamp_overflow);
- if (tstamp->overflow_period)
- queue_delayed_work(priv->wq, &tstamp->overflow_work, 0);
- else
- mlx5_core_warn(priv->mdev, "invalid overflow period, overflow_work is not scheduled\n");
-
- /* Configure the PHC */
- tstamp->ptp_info = mlx5e_ptp_clock_info;
- snprintf(tstamp->ptp_info.name, 16, "mlx5 ptp");
-
- /* Initialize 1PPS data structures */
- if (MLX5_PPS_CAP(priv->mdev))
- mlx5e_get_pps_caps(priv, tstamp);
- if (tstamp->ptp_info.n_pins)
- mlx5e_init_pin_config(tstamp);
-
- tstamp->ptp = ptp_clock_register(&tstamp->ptp_info,
- &priv->mdev->pdev->dev);
- if (IS_ERR(tstamp->ptp)) {
- mlx5_core_warn(priv->mdev, "ptp_clock_register failed %ld\n",
- PTR_ERR(tstamp->ptp));
- tstamp->ptp = NULL;
- }
-}
-
-void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv)
-{
- struct mlx5e_tstamp *tstamp = &priv->tstamp;
-
- if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
- return;
-
- if (priv->tstamp.ptp) {
- ptp_clock_unregister(priv->tstamp.ptp);
- priv->tstamp.ptp = NULL;
- }
-
- cancel_work_sync(&tstamp->pps_info.out_work);
- cancel_delayed_work_sync(&tstamp->overflow_work);
- kfree(tstamp->ptp_info.pin_config);
-}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index ece3fb1..784e282 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -134,6 +134,7 @@ void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev)
mlx5_core_destroy_mkey(mdev, &res->mkey);
mlx5_core_dealloc_transport_domain(mdev, res->td.tdn);
mlx5_core_dealloc_pd(mdev, res->pdn);
+ memset(res, 0, sizeof(*res));
}
int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb)
@@ -170,3 +171,15 @@ out:
return err;
}
+
+u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev)
+{
+ u8 min_inline_mode;
+
+ mlx5_query_min_inline(mdev, &min_inline_mode);
+ if (min_inline_mode == MLX5_INLINE_MODE_NONE &&
+ !MLX5_CAP_ETH(mdev, wqe_vlan_insert))
+ min_inline_mode = MLX5_INLINE_MODE_L2;
+
+ return min_inline_mode;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 51c4cc0..c6d90b6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -46,6 +46,13 @@ enum {
MLX5E_LOWEST_PRIO_GROUP = 0,
};
+#define MLX5_DSCP_SUPPORTED(mdev) (MLX5_CAP_GEN(mdev, qcam_reg) && \
+ MLX5_CAP_QCAM_REG(mdev, qpts) && \
+ MLX5_CAP_QCAM_REG(mdev, qpdpm))
+
+static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state);
+static int mlx5e_set_dscp2prio(struct mlx5e_priv *priv, u8 dscp, u8 prio);
+
/* If dcbx mode is non-host set the dcbx mode to host.
*/
static int mlx5e_dcbnl_set_dcbx_mode(struct mlx5e_priv *priv,
@@ -234,7 +241,7 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
u8 tc_tx_bw[IEEE_8021QAZ_MAX_TCS];
u8 tc_group[IEEE_8021QAZ_MAX_TCS];
int max_tc = mlx5_max_tc(mdev);
- int err;
+ int err, i;
mlx5e_build_tc_group(ets, tc_group, max_tc);
mlx5e_build_tc_tx_bw(ets, tc_tx_bw, tc_group, max_tc);
@@ -253,6 +260,14 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
return err;
memcpy(priv->dcbx.tc_tsa, ets->tc_tsa, sizeof(ets->tc_tsa));
+
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ mlx5e_dbg(HW, priv, "%s: prio_%d <=> tc_%d\n",
+ __func__, i, ets->prio_tc[i]);
+ mlx5e_dbg(HW, priv, "%s: tc_%d <=> tx_bw_%d%%, group_%d\n",
+ __func__, i, tc_tx_bw[i], tc_group[i]);
+ }
+
return err;
}
@@ -338,6 +353,11 @@ static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev,
ret = mlx5_set_port_pfc(mdev, pfc->pfc_en, pfc->pfc_en);
mlx5_toggle_port_link(mdev);
+ if (!ret) {
+ mlx5e_dbg(HW, priv,
+ "%s: PFC per priority bit mask: 0x%x\n",
+ __func__, pfc->pfc_en);
+ }
return ret;
}
@@ -381,6 +401,113 @@ static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
return 0;
}
+static int mlx5e_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct dcb_app temp;
+ bool is_new;
+ int err;
+
+ if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP)
+ return -EINVAL;
+
+ if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
+ return -EINVAL;
+
+ if (!MLX5_DSCP_SUPPORTED(priv->mdev))
+ return -EINVAL;
+
+ if (app->protocol >= MLX5E_MAX_DSCP)
+ return -EINVAL;
+
+ /* Save the old entry info */
+ temp.selector = IEEE_8021QAZ_APP_SEL_DSCP;
+ temp.protocol = app->protocol;
+ temp.priority = priv->dcbx_dp.dscp2prio[app->protocol];
+
+ /* Check if need to switch to dscp trust state */
+ if (!priv->dcbx.dscp_app_cnt) {
+ err = mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_DSCP);
+ if (err)
+ return err;
+ }
+
+ /* Skip the fw command if new and old mapping are the same */
+ if (app->priority != priv->dcbx_dp.dscp2prio[app->protocol]) {
+ err = mlx5e_set_dscp2prio(priv, app->protocol, app->priority);
+ if (err)
+ goto fw_err;
+ }
+
+ /* Delete the old entry if exists */
+ is_new = false;
+ err = dcb_ieee_delapp(dev, &temp);
+ if (err)
+ is_new = true;
+
+ /* Add new entry and update counter */
+ err = dcb_ieee_setapp(dev, app);
+ if (err)
+ return err;
+
+ if (is_new)
+ priv->dcbx.dscp_app_cnt++;
+
+ return err;
+
+fw_err:
+ mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_PCP);
+ return err;
+}
+
+static int mlx5e_dcbnl_ieee_delapp(struct net_device *dev, struct dcb_app *app)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ int err;
+
+ if (app->selector != IEEE_8021QAZ_APP_SEL_DSCP)
+ return -EINVAL;
+
+ if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
+ return -EINVAL;
+
+ if (!MLX5_DSCP_SUPPORTED(priv->mdev))
+ return -EINVAL;
+
+ if (app->protocol >= MLX5E_MAX_DSCP)
+ return -EINVAL;
+
+ /* Skip if no dscp app entry */
+ if (!priv->dcbx.dscp_app_cnt)
+ return -ENOENT;
+
+ /* Check if the entry matches fw setting */
+ if (app->priority != priv->dcbx_dp.dscp2prio[app->protocol])
+ return -ENOENT;
+
+ /* Delete the app entry */
+ err = dcb_ieee_delapp(dev, app);
+ if (err)
+ return err;
+
+ /* Reset the priority mapping back to zero */
+ err = mlx5e_set_dscp2prio(priv, app->protocol, 0);
+ if (err)
+ goto fw_err;
+
+ priv->dcbx.dscp_app_cnt--;
+
+ /* Check if need to switch to pcp trust state */
+ if (!priv->dcbx.dscp_app_cnt)
+ err = mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_PCP);
+
+ return err;
+
+fw_err:
+ mlx5e_set_trust_state(priv, MLX5_QPTS_TRUST_PCP);
+ return err;
+}
+
static int mlx5e_dcbnl_ieee_getmaxrate(struct net_device *netdev,
struct ieee_maxrate *maxrate)
{
@@ -446,6 +573,11 @@ static int mlx5e_dcbnl_ieee_setmaxrate(struct net_device *netdev,
}
}
+ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+ mlx5e_dbg(HW, priv, "%s: tc_%d <=> max_bw %d Gbps\n",
+ __func__, i, max_bw_value[i]);
+ }
+
return mlx5_modify_port_ets_rate_limit(mdev, max_bw_value, max_bw_unit);
}
@@ -471,6 +603,10 @@ static u8 mlx5e_dcbnl_setall(struct net_device *netdev)
ets.tc_rx_bw[i] = cee_cfg->pg_bw_pct[i];
ets.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
ets.prio_tc[i] = cee_cfg->prio_to_pg_map[i];
+ mlx5e_dbg(HW, priv,
+ "%s: Priority group %d: tx_bw %d, rx_bw %d, prio_tc %d\n",
+ __func__, i, ets.tc_tx_bw[i], ets.tc_rx_bw[i],
+ ets.prio_tc[i]);
}
err = mlx5e_dbcnl_validate_ets(netdev, &ets);
@@ -740,6 +876,8 @@ const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops = {
.ieee_setmaxrate = mlx5e_dcbnl_ieee_setmaxrate,
.ieee_getpfc = mlx5e_dcbnl_ieee_getpfc,
.ieee_setpfc = mlx5e_dcbnl_ieee_setpfc,
+ .ieee_setapp = mlx5e_dcbnl_ieee_setapp,
+ .ieee_delapp = mlx5e_dcbnl_ieee_delapp,
.getdcbx = mlx5e_dcbnl_getdcbx,
.setdcbx = mlx5e_dcbnl_setdcbx,
@@ -801,10 +939,135 @@ static void mlx5e_ets_init(struct mlx5e_priv *priv)
mlx5e_dcbnl_ieee_setets_core(priv, &ets);
}
+enum {
+ INIT,
+ DELETE,
+};
+
+static void mlx5e_dcbnl_dscp_app(struct mlx5e_priv *priv, int action)
+{
+ struct dcb_app temp;
+ int i;
+
+ if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
+ return;
+
+ if (!MLX5_DSCP_SUPPORTED(priv->mdev))
+ return;
+
+ /* No SEL_DSCP entry in non DSCP state */
+ if (priv->dcbx_dp.trust_state != MLX5_QPTS_TRUST_DSCP)
+ return;
+
+ temp.selector = IEEE_8021QAZ_APP_SEL_DSCP;
+ for (i = 0; i < MLX5E_MAX_DSCP; i++) {
+ temp.protocol = i;
+ temp.priority = priv->dcbx_dp.dscp2prio[i];
+ if (action == INIT)
+ dcb_ieee_setapp(priv->netdev, &temp);
+ else
+ dcb_ieee_delapp(priv->netdev, &temp);
+ }
+
+ priv->dcbx.dscp_app_cnt = (action == INIT) ? MLX5E_MAX_DSCP : 0;
+}
+
+void mlx5e_dcbnl_init_app(struct mlx5e_priv *priv)
+{
+ mlx5e_dcbnl_dscp_app(priv, INIT);
+}
+
+void mlx5e_dcbnl_delete_app(struct mlx5e_priv *priv)
+{
+ mlx5e_dcbnl_dscp_app(priv, DELETE);
+}
+
+static void mlx5e_trust_update_tx_min_inline_mode(struct mlx5e_priv *priv,
+ struct mlx5e_params *params)
+{
+ params->tx_min_inline_mode = mlx5e_params_calculate_tx_min_inline(priv->mdev);
+ if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP &&
+ params->tx_min_inline_mode == MLX5_INLINE_MODE_L2)
+ params->tx_min_inline_mode = MLX5_INLINE_MODE_IP;
+}
+
+static void mlx5e_trust_update_sq_inline_mode(struct mlx5e_priv *priv)
+{
+ struct mlx5e_channels new_channels = {};
+
+ mutex_lock(&priv->state_lock);
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ goto out;
+
+ new_channels.params = priv->channels.params;
+ mlx5e_trust_update_tx_min_inline_mode(priv, &new_channels.params);
+
+ /* Skip if tx_min_inline is the same */
+ if (new_channels.params.tx_min_inline_mode ==
+ priv->channels.params.tx_min_inline_mode)
+ goto out;
+
+ if (mlx5e_open_channels(priv, &new_channels))
+ goto out;
+ mlx5e_switch_priv_channels(priv, &new_channels, NULL);
+
+out:
+ mutex_unlock(&priv->state_lock);
+}
+
+static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state)
+{
+ int err;
+
+ err = mlx5_set_trust_state(priv->mdev, trust_state);
+ if (err)
+ return err;
+ priv->dcbx_dp.trust_state = trust_state;
+ mlx5e_trust_update_sq_inline_mode(priv);
+
+ return err;
+}
+
+static int mlx5e_set_dscp2prio(struct mlx5e_priv *priv, u8 dscp, u8 prio)
+{
+ int err;
+
+ err = mlx5_set_dscp2prio(priv->mdev, dscp, prio);
+ if (err)
+ return err;
+
+ priv->dcbx_dp.dscp2prio[dscp] = prio;
+ return err;
+}
+
+static int mlx5e_trust_initialize(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int err;
+
+ if (!MLX5_DSCP_SUPPORTED(mdev))
+ return 0;
+
+ err = mlx5_query_trust_state(priv->mdev, &priv->dcbx_dp.trust_state);
+ if (err)
+ return err;
+
+ mlx5e_trust_update_tx_min_inline_mode(priv, &priv->channels.params);
+
+ err = mlx5_query_dscp2prio(priv->mdev, priv->dcbx_dp.dscp2prio);
+ if (err)
+ return err;
+
+ return 0;
+}
+
void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv)
{
struct mlx5e_dcbx *dcbx = &priv->dcbx;
+ mlx5e_trust_initialize(priv);
+
if (!MLX5_CAP_GEN(priv->mdev, qos))
return;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index d12e9fc..23425f0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -31,7 +31,6 @@
*/
#include "en.h"
-#include "en_accel/ipsec.h"
void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
struct ethtool_drvinfo *drvinfo)
@@ -136,59 +135,15 @@ void mlx5e_build_ptys2ethtool_map(void)
ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT);
}
-static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
-{
- struct mlx5_core_dev *mdev = priv->mdev;
- u8 pfc_en_tx;
- u8 pfc_en_rx;
- int err;
-
- if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
- return 0;
-
- err = mlx5_query_port_pfc(mdev, &pfc_en_tx, &pfc_en_rx);
-
- return err ? 0 : pfc_en_tx | pfc_en_rx;
-}
-
-static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv)
-{
- struct mlx5_core_dev *mdev = priv->mdev;
- u32 rx_pause;
- u32 tx_pause;
- int err;
-
- if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
- return false;
-
- err = mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
-
- return err ? false : rx_pause | tx_pause;
-}
-
-#define MLX5E_NUM_Q_CNTRS(priv) (NUM_Q_COUNTERS * (!!priv->q_counter))
-#define MLX5E_NUM_RQ_STATS(priv) (NUM_RQ_STATS * (priv)->channels.num)
-#define MLX5E_NUM_SQ_STATS(priv) \
- (NUM_SQ_STATS * (priv)->channels.num * (priv)->channels.params.num_tc)
-#define MLX5E_NUM_PFC_COUNTERS(priv) \
- ((mlx5e_query_global_pause_combined(priv) + hweight8(mlx5e_query_pfc_combined(priv))) * \
- NUM_PPORT_PER_PRIO_PFC_COUNTERS)
-
int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset)
{
+ int i, num_stats = 0;
+
switch (sset) {
case ETH_SS_STATS:
- return NUM_SW_COUNTERS +
- MLX5E_NUM_Q_CNTRS(priv) +
- NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS(priv) +
- NUM_PCIE_COUNTERS(priv) +
- MLX5E_NUM_RQ_STATS(priv) +
- MLX5E_NUM_SQ_STATS(priv) +
- MLX5E_NUM_PFC_COUNTERS(priv) +
- ARRAY_SIZE(mlx5e_pme_status_desc) +
- ARRAY_SIZE(mlx5e_pme_error_desc) +
- mlx5e_ipsec_get_count(priv);
-
+ for (i = 0; i < mlx5e_num_stats_grps; i++)
+ num_stats += mlx5e_stats_grps[i].get_num_stats(priv);
+ return num_stats;
case ETH_SS_PRIV_FLAGS:
return ARRAY_SIZE(mlx5e_priv_flags);
case ETH_SS_TEST:
@@ -208,104 +163,10 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset)
static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, u8 *data)
{
- int i, j, tc, prio, idx = 0;
- unsigned long pfc_combined;
-
- /* SW counters */
- for (i = 0; i < NUM_SW_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format);
-
- /* Q counters */
- for (i = 0; i < MLX5E_NUM_Q_CNTRS(priv); i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN, q_stats_desc[i].format);
-
- /* VPORT counters */
- for (i = 0; i < NUM_VPORT_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- vport_stats_desc[i].format);
-
- /* PPORT counters */
- for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pport_802_3_stats_desc[i].format);
-
- for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pport_2863_stats_desc[i].format);
-
- for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pport_2819_stats_desc[i].format);
-
- for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv); i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pport_phy_statistical_stats_desc[i].format);
-
- for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS(priv); i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pport_eth_ext_stats_desc[i].format);
-
- for (i = 0; i < NUM_PCIE_PERF_COUNTERS(priv); i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pcie_perf_stats_desc[i].format);
-
- for (i = 0; i < NUM_PCIE_PERF_COUNTERS64(priv); i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pcie_perf_stats_desc64[i].format);
-
- for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS(priv); i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN,
- pcie_perf_stall_stats_desc[i].format);
-
- for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
- for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- pport_per_prio_traffic_stats_desc[i].format, prio);
- }
-
- pfc_combined = mlx5e_query_pfc_combined(priv);
- for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
- for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
- char pfc_string[ETH_GSTRING_LEN];
-
- snprintf(pfc_string, sizeof(pfc_string), "prio%d", prio);
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- pport_per_prio_pfc_stats_desc[i].format, pfc_string);
- }
- }
-
- if (mlx5e_query_global_pause_combined(priv)) {
- for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- pport_per_prio_pfc_stats_desc[i].format, "global");
- }
- }
-
- /* port module event counters */
- for (i = 0; i < ARRAY_SIZE(mlx5e_pme_status_desc); i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_status_desc[i].format);
-
- for (i = 0; i < ARRAY_SIZE(mlx5e_pme_error_desc); i++)
- strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_error_desc[i].format);
-
- /* IPSec counters */
- idx += mlx5e_ipsec_get_strings(priv, data + idx * ETH_GSTRING_LEN);
-
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
- return;
-
- /* per channel counters */
- for (i = 0; i < priv->channels.num; i++)
- for (j = 0; j < NUM_RQ_STATS; j++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- rq_stats_desc[j].format, i);
+ int i, idx = 0;
- for (tc = 0; tc < priv->channels.params.num_tc; tc++)
- for (i = 0; i < priv->channels.num; i++)
- for (j = 0; j < NUM_SQ_STATS; j++)
- sprintf(data + (idx++) * ETH_GSTRING_LEN,
- sq_stats_desc[j].format,
- priv->channel_tc2txq[i][tc]);
+ for (i = 0; i < mlx5e_num_stats_grps; i++)
+ idx = mlx5e_stats_grps[i].fill_strings(priv, data, idx);
}
void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv, u32 stringset, u8 *data)
@@ -340,10 +201,7 @@ static void mlx5e_get_strings(struct net_device *dev, u32 stringset, u8 *data)
void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
struct ethtool_stats *stats, u64 *data)
{
- struct mlx5e_channels *channels;
- struct mlx5_priv *mlx5_priv;
- int i, j, tc, prio, idx = 0;
- unsigned long pfc_combined;
+ int i, idx = 0;
if (!data)
return;
@@ -351,102 +209,10 @@ void mlx5e_ethtool_get_ethtool_stats(struct mlx5e_priv *priv,
mutex_lock(&priv->state_lock);
if (test_bit(MLX5E_STATE_OPENED, &priv->state))
mlx5e_update_stats(priv, true);
- channels = &priv->channels;
mutex_unlock(&priv->state_lock);
- for (i = 0; i < NUM_SW_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw,
- sw_stats_desc, i);
-
- for (i = 0; i < MLX5E_NUM_Q_CNTRS(priv); i++)
- data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt,
- q_stats_desc, i);
-
- for (i = 0; i < NUM_VPORT_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
- vport_stats_desc, i);
-
- for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.IEEE_802_3_counters,
- pport_802_3_stats_desc, i);
-
- for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2863_counters,
- pport_2863_stats_desc, i);
-
- for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters,
- pport_2819_stats_desc, i);
-
- for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv); i++)
- data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
- pport_phy_statistical_stats_desc, i);
-
- for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS(priv); i++)
- data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.eth_ext_counters,
- pport_eth_ext_stats_desc, i);
-
- for (i = 0; i < NUM_PCIE_PERF_COUNTERS(priv); i++)
- data[idx++] = MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
- pcie_perf_stats_desc, i);
-
- for (i = 0; i < NUM_PCIE_PERF_COUNTERS64(priv); i++)
- data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pcie.pcie_perf_counters,
- pcie_perf_stats_desc64, i);
-
- for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS(priv); i++)
- data[idx++] = MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
- pcie_perf_stall_stats_desc, i);
-
- for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
- for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
- data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
- pport_per_prio_traffic_stats_desc, i);
- }
-
- pfc_combined = mlx5e_query_pfc_combined(priv);
- for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
- for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
- data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
- pport_per_prio_pfc_stats_desc, i);
- }
- }
-
- if (mlx5e_query_global_pause_combined(priv)) {
- for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
- data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
- pport_per_prio_pfc_stats_desc, i);
- }
- }
-
- /* port module event counters */
- mlx5_priv = &priv->mdev->priv;
- for (i = 0; i < ARRAY_SIZE(mlx5e_pme_status_desc); i++)
- data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_priv->pme_stats.status_counters,
- mlx5e_pme_status_desc, i);
-
- for (i = 0; i < ARRAY_SIZE(mlx5e_pme_error_desc); i++)
- data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_priv->pme_stats.error_counters,
- mlx5e_pme_error_desc, i);
-
- /* IPSec counters */
- idx += mlx5e_ipsec_get_stats(priv, data + idx);
-
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
- return;
-
- /* per channel counters */
- for (i = 0; i < channels->num; i++)
- for (j = 0; j < NUM_RQ_STATS; j++)
- data[idx++] =
- MLX5E_READ_CTR64_CPU(&channels->c[i]->rq.stats,
- rq_stats_desc, j);
-
- for (tc = 0; tc < priv->channels.params.num_tc; tc++)
- for (i = 0; i < channels->num; i++)
- for (j = 0; j < NUM_SQ_STATS; j++)
- data[idx++] = MLX5E_READ_CTR64_CPU(&channels->c[i]->sq[tc].stats,
- sq_stats_desc, j);
+ for (i = 0; i < mlx5e_num_stats_grps; i++)
+ idx = mlx5e_stats_grps[i].fill_stats(priv, data, idx);
}
static void mlx5e_get_ethtool_stats(struct net_device *dev,
@@ -1417,14 +1183,15 @@ static int mlx5e_set_pauseparam(struct net_device *netdev,
int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv,
struct ethtool_ts_info *info)
{
+ struct mlx5_core_dev *mdev = priv->mdev;
int ret;
ret = ethtool_op_get_ts_info(priv->netdev, info);
if (ret)
return ret;
- info->phc_index = priv->tstamp.ptp ?
- ptp_clock_index(priv->tstamp.ptp) : -1;
+ info->phc_index = mdev->clock.ptp ?
+ ptp_clock_index(mdev->clock.ptp) : -1;
if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
return 0;
@@ -1573,6 +1340,16 @@ static int mlx5e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
return mlx5_set_port_wol(mdev, mlx5_wol_mode);
}
+static u32 mlx5e_get_msglevel(struct net_device *dev)
+{
+ return ((struct mlx5e_priv *)netdev_priv(dev))->msglevel;
+}
+
+static void mlx5e_set_msglevel(struct net_device *dev, u32 val)
+{
+ ((struct mlx5e_priv *)netdev_priv(dev))->msglevel = val;
+}
+
static int mlx5e_set_phys_id(struct net_device *dev,
enum ethtool_phys_id_state state)
{
@@ -1677,29 +1454,36 @@ static int mlx5e_get_module_eeprom(struct net_device *netdev,
typedef int (*mlx5e_pflag_handler)(struct net_device *netdev, bool enable);
-static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable)
+static int set_pflag_cqe_based_moder(struct net_device *netdev, bool enable,
+ bool is_rx_cq)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_channels new_channels = {};
- bool rx_mode_changed;
- u8 rx_cq_period_mode;
+ bool mode_changed;
+ u8 cq_period_mode, current_cq_period_mode;
int err = 0;
- rx_cq_period_mode = enable ?
+ cq_period_mode = enable ?
MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
- rx_mode_changed = rx_cq_period_mode != priv->channels.params.rx_cq_period_mode;
+ current_cq_period_mode = is_rx_cq ?
+ priv->channels.params.rx_cq_moderation.cq_period_mode :
+ priv->channels.params.tx_cq_moderation.cq_period_mode;
+ mode_changed = cq_period_mode != current_cq_period_mode;
- if (rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE &&
+ if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE &&
!MLX5_CAP_GEN(mdev, cq_period_start_from_cqe))
return -EOPNOTSUPP;
- if (!rx_mode_changed)
+ if (!mode_changed)
return 0;
new_channels.params = priv->channels.params;
- mlx5e_set_rx_cq_mode_params(&new_channels.params, rx_cq_period_mode);
+ if (is_rx_cq)
+ mlx5e_set_rx_cq_mode_params(&new_channels.params, cq_period_mode);
+ else
+ mlx5e_set_tx_cq_mode_params(&new_channels.params, cq_period_mode);
if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
priv->channels.params = new_channels.params;
@@ -1714,6 +1498,16 @@ static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable)
return 0;
}
+static int set_pflag_tx_cqe_based_moder(struct net_device *netdev, bool enable)
+{
+ return set_pflag_cqe_based_moder(netdev, enable, false);
+}
+
+static int set_pflag_rx_cqe_based_moder(struct net_device *netdev, bool enable)
+{
+ return set_pflag_cqe_based_moder(netdev, enable, true);
+}
+
int mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool new_val)
{
bool curr_val = MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_RX_CQE_COMPRESS);
@@ -1754,7 +1548,7 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
if (!MLX5_CAP_GEN(mdev, cqe_compression))
return -EOPNOTSUPP;
- if (enable && priv->tstamp.hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) {
+ if (enable && priv->tstamp.rx_filter != HWTSTAMP_FILTER_NONE) {
netdev_err(netdev, "Can't enable cqe compression while timestamping is enabled.\n");
return -EINVAL;
}
@@ -1802,6 +1596,12 @@ static int mlx5e_set_priv_flags(struct net_device *netdev, u32 pflags)
goto out;
err = mlx5e_handle_pflag(netdev, pflags,
+ MLX5E_PFLAG_TX_CQE_BASED_MODER,
+ set_pflag_tx_cqe_based_moder);
+ if (err)
+ goto out;
+
+ err = mlx5e_handle_pflag(netdev, pflags,
MLX5E_PFLAG_RX_CQE_COMPRESS,
set_pflag_rx_cqe_compress);
@@ -1905,4 +1705,7 @@ const struct ethtool_ops mlx5e_ethtool_ops = {
.get_priv_flags = mlx5e_get_priv_flags,
.set_priv_flags = mlx5e_set_priv_flags,
.self_test = mlx5e_self_test,
+ .get_msglevel = mlx5e_get_msglevel,
+ .set_msglevel = mlx5e_set_msglevel,
+
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 4837045..def5134 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -118,7 +118,7 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
int i;
list_size = 0;
- for_each_set_bit(vlan, priv->fs.vlan.active_vlans, VLAN_N_VID)
+ for_each_set_bit(vlan, priv->fs.vlan.active_cvlans, VLAN_N_VID)
list_size++;
max_list_size = 1 << MLX5_CAP_GEN(priv->mdev, log_max_vlan_list);
@@ -135,7 +135,7 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
return -ENOMEM;
i = 0;
- for_each_set_bit(vlan, priv->fs.vlan.active_vlans, VLAN_N_VID) {
+ for_each_set_bit(vlan, priv->fs.vlan.active_cvlans, VLAN_N_VID) {
if (i >= list_size)
break;
vlans[i++] = vlan;
@@ -154,7 +154,8 @@ enum mlx5e_vlan_rule_type {
MLX5E_VLAN_RULE_TYPE_UNTAGGED,
MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID,
MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID,
- MLX5E_VLAN_RULE_TYPE_MATCH_VID,
+ MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID,
+ MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID,
};
static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
@@ -162,7 +163,7 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
u16 vid, struct mlx5_flow_spec *spec)
{
struct mlx5_flow_table *ft = priv->fs.vlan.ft.t;
- struct mlx5_flow_destination dest;
+ struct mlx5_flow_destination dest = {};
struct mlx5_flow_handle **rule_p;
MLX5_DECLARE_FLOW_ACT(flow_act);
int err = 0;
@@ -174,6 +175,10 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
switch (rule_type) {
case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
+ /* cvlan_tag enabled in match criteria and
+ * disabled in match value means both S & C tags
+ * don't exist (untagged of both)
+ */
rule_p = &priv->fs.vlan.untagged_rule;
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.cvlan_tag);
@@ -190,8 +195,18 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
outer_headers.svlan_tag);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
break;
- default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
- rule_p = &priv->fs.vlan.active_vlans_rule[vid];
+ case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
+ rule_p = &priv->fs.vlan.active_svlans_rule[vid];
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.svlan_tag);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.first_vid);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid,
+ vid);
+ break;
+ default: /* MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID */
+ rule_p = &priv->fs.vlan.active_cvlans_rule[vid];
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.cvlan_tag);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
@@ -223,7 +238,7 @@ static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
if (!spec)
return -ENOMEM;
- if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_VID)
+ if (rule_type == MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID)
mlx5e_vport_context_update_vlans(priv);
err = __mlx5e_add_vlan_rule(priv, rule_type, vid, spec);
@@ -255,11 +270,17 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
priv->fs.vlan.any_svlan_rule = NULL;
}
break;
- case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
+ case MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID:
+ if (priv->fs.vlan.active_svlans_rule[vid]) {
+ mlx5_del_flow_rules(priv->fs.vlan.active_svlans_rule[vid]);
+ priv->fs.vlan.active_svlans_rule[vid] = NULL;
+ }
+ break;
+ case MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID:
mlx5e_vport_context_update_vlans(priv);
- if (priv->fs.vlan.active_vlans_rule[vid]) {
- mlx5_del_flow_rules(priv->fs.vlan.active_vlans_rule[vid]);
- priv->fs.vlan.active_vlans_rule[vid] = NULL;
+ if (priv->fs.vlan.active_cvlans_rule[vid]) {
+ mlx5_del_flow_rules(priv->fs.vlan.active_cvlans_rule[vid]);
+ priv->fs.vlan.active_cvlans_rule[vid] = NULL;
}
mlx5e_vport_context_update_vlans(priv);
break;
@@ -283,46 +304,83 @@ static int mlx5e_add_any_vid_rules(struct mlx5e_priv *priv)
return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
}
-void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
+void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv)
{
- if (!priv->fs.vlan.filter_disabled)
+ if (!priv->fs.vlan.cvlan_filter_disabled)
return;
- priv->fs.vlan.filter_disabled = false;
+ priv->fs.vlan.cvlan_filter_disabled = false;
if (priv->netdev->flags & IFF_PROMISC)
return;
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
}
-void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
+void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv)
{
- if (priv->fs.vlan.filter_disabled)
+ if (priv->fs.vlan.cvlan_filter_disabled)
return;
- priv->fs.vlan.filter_disabled = true;
+ priv->fs.vlan.cvlan_filter_disabled = true;
if (priv->netdev->flags & IFF_PROMISC)
return;
mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
}
-int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
- u16 vid)
+static int mlx5e_vlan_rx_add_cvid(struct mlx5e_priv *priv, u16 vid)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
+ int err;
+
+ set_bit(vid, priv->fs.vlan.active_cvlans);
- set_bit(vid, priv->fs.vlan.active_vlans);
+ err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
+ if (err)
+ clear_bit(vid, priv->fs.vlan.active_cvlans);
+
+ return err;
+}
+
+static int mlx5e_vlan_rx_add_svid(struct mlx5e_priv *priv, u16 vid)
+{
+ struct net_device *netdev = priv->netdev;
+ int err;
+
+ set_bit(vid, priv->fs.vlan.active_svlans);
+
+ err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
+ if (err) {
+ clear_bit(vid, priv->fs.vlan.active_svlans);
+ return err;
+ }
- return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
+ /* Need to fix some features.. */
+ netdev_update_features(netdev);
+ return err;
}
-int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
- u16 vid)
+int mlx5e_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- clear_bit(vid, priv->fs.vlan.active_vlans);
+ if (be16_to_cpu(proto) == ETH_P_8021Q)
+ return mlx5e_vlan_rx_add_cvid(priv, vid);
+ else if (be16_to_cpu(proto) == ETH_P_8021AD)
+ return mlx5e_vlan_rx_add_svid(priv, vid);
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
+ return -EOPNOTSUPP;
+}
+
+int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ if (be16_to_cpu(proto) == ETH_P_8021Q) {
+ clear_bit(vid, priv->fs.vlan.active_cvlans);
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, vid);
+ } else if (be16_to_cpu(proto) == ETH_P_8021AD) {
+ clear_bit(vid, priv->fs.vlan.active_svlans);
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, vid);
+ netdev_update_features(dev);
+ }
return 0;
}
@@ -333,11 +391,14 @@ static void mlx5e_add_vlan_rules(struct mlx5e_priv *priv)
mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
- for_each_set_bit(i, priv->fs.vlan.active_vlans, VLAN_N_VID) {
- mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, i);
+ for_each_set_bit(i, priv->fs.vlan.active_cvlans, VLAN_N_VID) {
+ mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
}
- if (priv->fs.vlan.filter_disabled &&
+ for_each_set_bit(i, priv->fs.vlan.active_svlans, VLAN_N_VID)
+ mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
+
+ if (priv->fs.vlan.cvlan_filter_disabled &&
!(priv->netdev->flags & IFF_PROMISC))
mlx5e_add_any_vid_rules(priv);
}
@@ -348,11 +409,14 @@ static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv)
mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
- for_each_set_bit(i, priv->fs.vlan.active_vlans, VLAN_N_VID) {
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, i);
+ for_each_set_bit(i, priv->fs.vlan.active_cvlans, VLAN_N_VID) {
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_CTAG_VID, i);
}
- if (priv->fs.vlan.filter_disabled &&
+ for_each_set_bit(i, priv->fs.vlan.active_svlans, VLAN_N_VID)
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_STAG_VID, i);
+
+ if (priv->fs.vlan.cvlan_filter_disabled &&
!(priv->netdev->flags & IFF_PROMISC))
mlx5e_del_any_vid_rules(priv);
}
@@ -548,8 +612,11 @@ void mlx5e_set_rx_mode_work(struct work_struct *work)
bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
if (enable_promisc) {
+ if (!priv->channels.params.vlan_strip_disable)
+ netdev_warn_once(ndev,
+ "S-tagged traffic will be dropped while C-tag vlan stripping is enabled\n");
mlx5e_add_l2_flow_rule(priv, &ea->promisc, MLX5E_PROMISC);
- if (!priv->fs.vlan.filter_disabled)
+ if (!priv->fs.vlan.cvlan_filter_disabled)
mlx5e_add_any_vid_rules(priv);
}
if (enable_allmulti)
@@ -564,7 +631,7 @@ void mlx5e_set_rx_mode_work(struct work_struct *work)
if (disable_allmulti)
mlx5e_del_l2_flow_rule(priv, &ea->allmulti);
if (disable_promisc) {
- if (!priv->fs.vlan.filter_disabled)
+ if (!priv->fs.vlan.cvlan_filter_disabled)
mlx5e_del_any_vid_rules(priv);
mlx5e_del_l2_flow_rule(priv, &ea->promisc);
}
@@ -741,7 +808,7 @@ mlx5e_generate_ttc_rule(struct mlx5e_priv *priv,
static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv)
{
- struct mlx5_flow_destination dest;
+ struct mlx5_flow_destination dest = {};
struct mlx5e_ttc_table *ttc;
struct mlx5_flow_handle **rules;
struct mlx5_flow_table *ft;
@@ -912,7 +979,7 @@ mlx5e_generate_inner_ttc_rule(struct mlx5e_priv *priv,
static int mlx5e_generate_inner_ttc_table_rules(struct mlx5e_priv *priv)
{
- struct mlx5_flow_destination dest;
+ struct mlx5_flow_destination dest = {};
struct mlx5_flow_handle **rules;
struct mlx5e_ttc_table *ttc;
struct mlx5_flow_table *ft;
@@ -1008,7 +1075,7 @@ err:
return err;
}
-static int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv)
+int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv)
{
struct mlx5e_ttc_table *ttc = &priv->fs.inner_ttc;
struct mlx5_flow_table_attr ft_attr = {};
@@ -1044,7 +1111,7 @@ err:
return err;
}
-static void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv)
+void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv)
{
struct mlx5e_ttc_table *ttc = &priv->fs.inner_ttc;
@@ -1109,7 +1176,7 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv,
struct mlx5e_l2_rule *ai, int type)
{
struct mlx5_flow_table *ft = priv->fs.l2.ft.t;
- struct mlx5_flow_destination dest;
+ struct mlx5_flow_destination dest = {};
MLX5_DECLARE_FLOW_ACT(flow_act);
struct mlx5_flow_spec *spec;
int err = 0;
@@ -1268,13 +1335,15 @@ err_destroy_flow_table:
return err;
}
-#define MLX5E_NUM_VLAN_GROUPS 3
+#define MLX5E_NUM_VLAN_GROUPS 4
#define MLX5E_VLAN_GROUP0_SIZE BIT(12)
-#define MLX5E_VLAN_GROUP1_SIZE BIT(1)
-#define MLX5E_VLAN_GROUP2_SIZE BIT(0)
+#define MLX5E_VLAN_GROUP1_SIZE BIT(12)
+#define MLX5E_VLAN_GROUP2_SIZE BIT(1)
+#define MLX5E_VLAN_GROUP3_SIZE BIT(0)
#define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\
MLX5E_VLAN_GROUP1_SIZE +\
- MLX5E_VLAN_GROUP2_SIZE)
+ MLX5E_VLAN_GROUP2_SIZE +\
+ MLX5E_VLAN_GROUP3_SIZE)
static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in,
int inlen)
@@ -1297,7 +1366,8 @@ static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in
memset(in, 0, inlen);
MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
MLX5_SET_CFG(in, start_flow_index, ix);
ix += MLX5E_VLAN_GROUP1_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1);
@@ -1308,7 +1378,7 @@ static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in
memset(in, 0, inlen);
MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
MLX5_SET_CFG(in, start_flow_index, ix);
ix += MLX5E_VLAN_GROUP2_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1);
@@ -1317,6 +1387,17 @@ static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in
goto err_destroy_groups;
ft->num_groups++;
+ memset(in, 0, inlen);
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += MLX5E_VLAN_GROUP3_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err_destroy_groups;
+ ft->num_groups++;
+
return 0;
err_destroy_groups:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index cc11bbb..d2b057a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -196,6 +196,7 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
s->rx_bytes += rq_stats->bytes;
s->rx_lro_packets += rq_stats->lro_packets;
s->rx_lro_bytes += rq_stats->lro_bytes;
+ s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets;
s->rx_csum_none += rq_stats->csum_none;
s->rx_csum_complete += rq_stats->csum_complete;
s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
@@ -224,6 +225,7 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
s->tx_tso_bytes += sq_stats->tso_bytes;
s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
+ s->tx_added_vlan_packets += sq_stats->added_vlan_packets;
s->tx_queue_stopped += sq_stats->stopped;
s->tx_queue_wake += sq_stats->wake;
s->tx_queue_dropped += sq_stats->dropped;
@@ -373,8 +375,6 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
enum mlx5_dev_event event, unsigned long param)
{
struct mlx5e_priv *priv = vpriv;
- struct ptp_clock_event ptp_event;
- struct mlx5_eqe *eqe = NULL;
if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state))
return;
@@ -384,14 +384,6 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
case MLX5_DEV_EVENT_PORT_DOWN:
queue_work(priv->wq, &priv->update_carrier_work);
break;
- case MLX5_DEV_EVENT_PPS:
- eqe = (struct mlx5_eqe *)param;
- ptp_event.index = eqe->data.pps.pin;
- ptp_event.timestamp =
- timecounter_cyc2time(&priv->tstamp.clock,
- be64_to_cpu(eqe->data.pps.time_stamp));
- mlx5e_pps_event_handler(vpriv, &ptp_event);
- break;
default:
break;
}
@@ -585,6 +577,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
rq->pdev = c->pdev;
rq->netdev = c->netdev;
rq->tstamp = c->tstamp;
+ rq->clock = &mdev->clock;
rq->channel = c;
rq->ix = c->ix;
rq->mdev = mdev;
@@ -690,7 +683,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c,
}
INIT_WORK(&rq->am.work, mlx5e_rx_am_work);
- rq->am.mode = params->rx_cq_period_mode;
+ rq->am.mode = params->rx_cq_moderation.cq_period_mode;
rq->page_cache.head = 0;
rq->page_cache.tail = 0;
@@ -1123,6 +1116,7 @@ static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
sq->pdev = c->pdev;
sq->tstamp = c->tstamp;
+ sq->clock = &mdev->clock;
sq->mkey_be = c->mkey_be;
sq->channel = c;
sq->txq_ix = txq_ix;
@@ -1982,7 +1976,7 @@ static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
}
mlx5e_build_common_cq_param(priv, param);
- param->cq_period_mode = params->rx_cq_period_mode;
+ param->cq_period_mode = params->rx_cq_moderation.cq_period_mode;
}
static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
@@ -1994,8 +1988,7 @@ static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
MLX5_SET(cqc, cqc, log_cq_size, params->log_sq_size);
mlx5e_build_common_cq_param(priv, param);
-
- param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
+ param->cq_period_mode = params->tx_cq_moderation.cq_period_mode;
}
static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
@@ -2678,6 +2671,12 @@ void mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
netif_carrier_on(netdev);
}
+void mlx5e_timestamp_set(struct mlx5e_priv *priv)
+{
+ priv->tstamp.tx_type = HWTSTAMP_TX_OFF;
+ priv->tstamp.rx_filter = HWTSTAMP_FILTER_NONE;
+}
+
int mlx5e_open_locked(struct net_device *netdev)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -2693,7 +2692,7 @@ int mlx5e_open_locked(struct net_device *netdev)
mlx5e_activate_priv_channels(priv);
if (priv->profile->update_carrier)
priv->profile->update_carrier(priv);
- mlx5e_timestamp_init(priv);
+ mlx5e_timestamp_set(priv);
if (priv->profile->update_stats)
queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
@@ -2731,7 +2730,6 @@ int mlx5e_close_locked(struct net_device *netdev)
clear_bit(MLX5E_STATE_OPENED, &priv->state);
- mlx5e_timestamp_cleanup(priv);
netif_carrier_off(priv->netdev);
mlx5e_deactivate_priv_channels(priv);
mlx5e_close_channels(&priv->channels);
@@ -3086,13 +3084,10 @@ out:
}
#ifdef CONFIG_MLX5_ESWITCH
-static int mlx5e_setup_tc_cls_flower(struct net_device *dev,
+static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv,
struct tc_cls_flower_offload *cls_flower)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
-
- if (!is_classid_clsact_ingress(cls_flower->common.classid) ||
- cls_flower->common.chain_index)
+ if (cls_flower->common.chain_index)
return -EOPNOTSUPP;
switch (cls_flower->command) {
@@ -3106,17 +3101,54 @@ static int mlx5e_setup_tc_cls_flower(struct net_device *dev,
return -EOPNOTSUPP;
}
}
+
+int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct mlx5e_priv *priv = cb_priv;
+
+ if (!tc_can_offload(priv->netdev))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return mlx5e_setup_tc_cls_flower(priv, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int mlx5e_setup_tc_block(struct net_device *dev,
+ struct tc_block_offload *f)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ switch (f->command) {
+ case TC_BLOCK_BIND:
+ return tcf_block_cb_register(f->block, mlx5e_setup_tc_block_cb,
+ priv, priv);
+ case TC_BLOCK_UNBIND:
+ tcf_block_cb_unregister(f->block, mlx5e_setup_tc_block_cb,
+ priv);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
#endif
-static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
- void *type_data)
+int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
+ void *type_data)
{
switch (type) {
#ifdef CONFIG_MLX5_ESWITCH
- case TC_SETUP_CLSFLOWER:
- return mlx5e_setup_tc_cls_flower(dev, type_data);
+ case TC_SETUP_BLOCK:
+ return mlx5e_setup_tc_block(dev, type_data);
#endif
- case TC_SETUP_MQPRIO:
+ case TC_SETUP_QDISC_MQPRIO:
return mlx5e_setup_tc_mqprio(dev, type_data);
default:
return -EOPNOTSUPP;
@@ -3230,14 +3262,14 @@ out:
return err;
}
-static int set_feature_vlan_filter(struct net_device *netdev, bool enable)
+static int set_feature_cvlan_filter(struct net_device *netdev, bool enable)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
if (enable)
- mlx5e_enable_vlan_filter(priv);
+ mlx5e_enable_cvlan_filter(priv);
else
- mlx5e_disable_vlan_filter(priv);
+ mlx5e_disable_cvlan_filter(priv);
return 0;
}
@@ -3348,7 +3380,7 @@ static int mlx5e_set_features(struct net_device *netdev,
set_feature_lro);
err |= mlx5e_handle_feature(netdev, features,
NETIF_F_HW_VLAN_CTAG_FILTER,
- set_feature_vlan_filter);
+ set_feature_cvlan_filter);
err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_TC,
set_feature_tc_num_filters);
err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXALL,
@@ -3365,6 +3397,25 @@ static int mlx5e_set_features(struct net_device *netdev,
return err ? -EINVAL : 0;
}
+static netdev_features_t mlx5e_fix_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ mutex_lock(&priv->state_lock);
+ if (!bitmap_empty(priv->fs.vlan.active_svlans, VLAN_N_VID)) {
+ /* HW strips the outer C-tag header, this is a problem
+ * for S-tag traffic.
+ */
+ features &= ~NETIF_F_HW_VLAN_CTAG_RX;
+ if (!priv->channels.params.vlan_strip_disable)
+ netdev_warn(netdev, "Dropping C-tag vlan stripping offload due to S-tag vlan\n");
+ }
+ mutex_unlock(&priv->state_lock);
+
+ return features;
+}
+
static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -3403,6 +3454,80 @@ out:
return err;
}
+int mlx5e_hwstamp_set(struct mlx5e_priv *priv, struct ifreq *ifr)
+{
+ struct hwtstamp_config config;
+ int err;
+
+ if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
+ return -EOPNOTSUPP;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ /* TX HW timestamp */
+ switch (config.tx_type) {
+ case HWTSTAMP_TX_OFF:
+ case HWTSTAMP_TX_ON:
+ break;
+ default:
+ return -ERANGE;
+ }
+
+ mutex_lock(&priv->state_lock);
+ /* RX HW timestamp */
+ switch (config.rx_filter) {
+ case HWTSTAMP_FILTER_NONE:
+ /* Reset CQE compression to Admin default */
+ mlx5e_modify_rx_cqe_compression_locked(priv, priv->channels.params.rx_cqe_compress_def);
+ break;
+ case HWTSTAMP_FILTER_ALL:
+ case HWTSTAMP_FILTER_SOME:
+ case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+ case HWTSTAMP_FILTER_PTP_V2_EVENT:
+ case HWTSTAMP_FILTER_PTP_V2_SYNC:
+ case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+ case HWTSTAMP_FILTER_NTP_ALL:
+ /* Disable CQE compression */
+ netdev_warn(priv->netdev, "Disabling cqe compression");
+ err = mlx5e_modify_rx_cqe_compression_locked(priv, false);
+ if (err) {
+ netdev_err(priv->netdev, "Failed disabling cqe compression err=%d\n", err);
+ mutex_unlock(&priv->state_lock);
+ return err;
+ }
+ config.rx_filter = HWTSTAMP_FILTER_ALL;
+ break;
+ default:
+ mutex_unlock(&priv->state_lock);
+ return -ERANGE;
+ }
+
+ memcpy(&priv->tstamp, &config, sizeof(config));
+ mutex_unlock(&priv->state_lock);
+
+ return copy_to_user(ifr->ifr_data, &config,
+ sizeof(config)) ? -EFAULT : 0;
+}
+
+int mlx5e_hwstamp_get(struct mlx5e_priv *priv, struct ifreq *ifr)
+{
+ struct hwtstamp_config *cfg = &priv->tstamp;
+
+ if (!MLX5_CAP_GEN(priv->mdev, device_frequency_khz))
+ return -EOPNOTSUPP;
+
+ return copy_to_user(ifr->ifr_data, cfg, sizeof(*cfg)) ? -EFAULT : 0;
+}
+
static int mlx5e_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct mlx5e_priv *priv = netdev_priv(dev);
@@ -3726,7 +3851,7 @@ static u32 mlx5e_xdp_query(struct net_device *dev)
return prog_id;
}
-static int mlx5e_xdp(struct net_device *dev, struct netdev_xdp *xdp)
+static int mlx5e_xdp(struct net_device *dev, struct netdev_bpf *xdp)
{
switch (xdp->command) {
case XDP_SETUP_PROG:
@@ -3768,6 +3893,7 @@ static const struct net_device_ops mlx5e_netdev_ops = {
.ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid,
.ndo_set_features = mlx5e_set_features,
+ .ndo_fix_features = mlx5e_fix_features,
.ndo_change_mtu = mlx5e_change_mtu,
.ndo_do_ioctl = mlx5e_ioctl,
.ndo_set_tx_maxrate = mlx5e_set_tx_maxrate,
@@ -3778,7 +3904,7 @@ static const struct net_device_ops mlx5e_netdev_ops = {
.ndo_rx_flow_steer = mlx5e_rx_flow_steer,
#endif
.ndo_tx_timeout = mlx5e_tx_timeout,
- .ndo_xdp = mlx5e_xdp,
+ .ndo_bpf = mlx5e_xdp,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = mlx5e_netpoll,
#endif
@@ -3882,14 +4008,32 @@ static bool hw_lro_heuristic(u32 link_speed, u32 pci_bw)
(pci_bw <= 16000) && (pci_bw < link_speed));
}
+void mlx5e_set_tx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
+{
+ params->tx_cq_moderation.cq_period_mode = cq_period_mode;
+
+ params->tx_cq_moderation.pkts =
+ MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
+ params->tx_cq_moderation.usec =
+ MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
+
+ if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
+ params->tx_cq_moderation.usec =
+ MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC_FROM_CQE;
+
+ MLX5E_SET_PFLAG(params, MLX5E_PFLAG_TX_CQE_BASED_MODER,
+ params->tx_cq_moderation.cq_period_mode ==
+ MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
+}
+
void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
{
- params->rx_cq_period_mode = cq_period_mode;
+ params->rx_cq_moderation.cq_period_mode = cq_period_mode;
params->rx_cq_moderation.pkts =
MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
params->rx_cq_moderation.usec =
- MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
+ MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
params->rx_cq_moderation.usec =
@@ -3897,10 +4041,11 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
if (params->rx_am_enabled)
params->rx_cq_moderation =
- mlx5e_am_get_def_profile(params->rx_cq_period_mode);
+ mlx5e_am_get_def_profile(cq_period_mode);
MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER,
- params->rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
+ params->rx_cq_moderation.cq_period_mode ==
+ MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
}
u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
@@ -3960,16 +4105,11 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
params->rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
-
- params->tx_cq_moderation.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
- params->tx_cq_moderation.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
+ mlx5e_set_tx_cq_mode_params(params, cq_period_mode);
/* TX inline */
params->tx_max_inline = mlx5e_get_max_inline_cap(mdev);
- mlx5_query_min_inline(mdev, &params->tx_min_inline_mode);
- if (params->tx_min_inline_mode == MLX5_INLINE_MODE_NONE &&
- !MLX5_CAP_ETH(mdev, wqe_vlan_insert))
- params->tx_min_inline_mode = MLX5_INLINE_MODE_L2;
+ params->tx_min_inline_mode = mlx5e_params_calculate_tx_min_inline(mdev);
/* RSS */
params->rss_hfunc = ETH_RSS_HASH_XOR;
@@ -3989,6 +4129,7 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
priv->netdev = netdev;
priv->profile = profile;
priv->ppriv = ppriv;
+ priv->msglevel = MLX5E_MSG_LEVEL;
priv->hard_mtu = MLX5E_ETH_HARD_MTU;
mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev));
@@ -4055,6 +4196,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+ netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX;
if (mlx5e_vxlan_allowed(mdev) || MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) {
netdev->hw_features |= NETIF_F_GSO_PARTIAL;
@@ -4112,6 +4254,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
}
netdev->features |= NETIF_F_HIGHDMA;
+ netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
netdev->priv_flags |= IFF_UNICAST_FLT;
@@ -4269,7 +4412,9 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
if (netdev->reg_state != NETREG_REGISTERED)
return;
-
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+ mlx5e_dcbnl_init_app(priv);
+#endif
/* Device already registered: sync netdev system state */
if (mlx5e_vxlan_allowed(mdev)) {
rtnl_lock();
@@ -4290,6 +4435,11 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
{
struct mlx5_core_dev *mdev = priv->mdev;
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+ if (priv->netdev->reg_state == NETREG_REGISTERED)
+ mlx5e_dcbnl_delete_app(priv);
+#endif
+
rtnl_lock();
if (netif_running(priv->netdev))
mlx5e_close(priv->netdev);
@@ -4510,6 +4660,9 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev)
goto err_detach;
}
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+ mlx5e_dcbnl_init_app(priv);
+#endif
return priv;
err_detach:
@@ -4526,6 +4679,9 @@ static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
struct mlx5e_priv *priv = vpriv;
void *ppriv = priv->ppriv;
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+ mlx5e_dcbnl_delete_app(priv);
+#endif
unregister_netdev(priv->netdev);
mlx5e_detach(mdev, vpriv);
mlx5e_destroy_netdev(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 45e03c4..2c43606 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -34,6 +34,7 @@
#include <linux/mlx5/fs.h>
#include <net/switchdev.h>
#include <net/pkt_cls.h>
+#include <net/act_api.h>
#include <net/netevent.h>
#include <net/arp.h>
@@ -658,23 +659,12 @@ static int mlx5e_rep_get_phys_port_name(struct net_device *dev,
}
static int
-mlx5e_rep_setup_tc_cls_flower(struct net_device *dev,
+mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv,
struct tc_cls_flower_offload *cls_flower)
{
- struct mlx5e_priv *priv = netdev_priv(dev);
-
- if (!is_classid_clsact_ingress(cls_flower->common.classid) ||
- cls_flower->common.chain_index)
+ if (cls_flower->common.chain_index)
return -EOPNOTSUPP;
- if (cls_flower->egress_dev) {
- struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
-
- dev = mlx5_eswitch_get_uplink_netdev(esw);
- return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_CLSFLOWER,
- cls_flower);
- }
-
switch (cls_flower->command) {
case TC_CLSFLOWER_REPLACE:
return mlx5e_configure_flower(priv, cls_flower);
@@ -687,12 +677,48 @@ mlx5e_rep_setup_tc_cls_flower(struct net_device *dev,
}
}
+static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ struct mlx5e_priv *priv = cb_priv;
+
+ if (!tc_can_offload(priv->netdev))
+ return -EOPNOTSUPP;
+
+ switch (type) {
+ case TC_SETUP_CLSFLOWER:
+ return mlx5e_rep_setup_tc_cls_flower(priv, type_data);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int mlx5e_rep_setup_tc_block(struct net_device *dev,
+ struct tc_block_offload *f)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
+ return -EOPNOTSUPP;
+
+ switch (f->command) {
+ case TC_BLOCK_BIND:
+ return tcf_block_cb_register(f->block, mlx5e_rep_setup_tc_cb,
+ priv, priv);
+ case TC_BLOCK_UNBIND:
+ tcf_block_cb_unregister(f->block, mlx5e_rep_setup_tc_cb, priv);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
static int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type,
void *type_data)
{
switch (type) {
- case TC_SETUP_CLSFLOWER:
- return mlx5e_rep_setup_tc_cls_flower(dev, type_data);
+ case TC_SETUP_BLOCK:
+ return mlx5e_rep_setup_tc_block(dev, type_data);
default:
return -EOPNOTSUPP;
}
@@ -986,6 +1012,7 @@ mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
{
struct mlx5e_rep_priv *rpriv;
struct net_device *netdev;
+ struct mlx5e_priv *upriv;
int err;
rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL);
@@ -1017,15 +1044,25 @@ mlx5e_vport_rep_load(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
goto err_detach_netdev;
}
+ upriv = netdev_priv(mlx5_eswitch_get_uplink_netdev(esw));
+ err = tc_setup_cb_egdev_register(netdev, mlx5e_setup_tc_block_cb,
+ upriv);
+ if (err)
+ goto err_neigh_cleanup;
+
err = register_netdev(netdev);
if (err) {
pr_warn("Failed to register representor netdev for vport %d\n",
rep->vport);
- goto err_neigh_cleanup;
+ goto err_egdev_cleanup;
}
return 0;
+err_egdev_cleanup:
+ tc_setup_cb_egdev_unregister(netdev, mlx5e_setup_tc_block_cb,
+ upriv);
+
err_neigh_cleanup:
mlx5e_rep_neigh_cleanup(rpriv);
@@ -1045,9 +1082,12 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep)
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5e_rep_priv *rpriv = priv->ppriv;
void *ppriv = priv->ppriv;
+ struct mlx5e_priv *upriv;
unregister_netdev(rep->netdev);
-
+ upriv = netdev_priv(mlx5_eswitch_get_uplink_netdev(esw));
+ tc_setup_cb_egdev_unregister(netdev, mlx5e_setup_tc_block_cb,
+ upriv);
mlx5e_rep_neigh_cleanup(rpriv);
mlx5e_detach_netdev(priv);
mlx5e_destroy_netdev(priv);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 91b1b09..5b499c7 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -42,10 +42,11 @@
#include "en_rep.h"
#include "ipoib/ipoib.h"
#include "en_accel/ipsec_rxtx.h"
+#include "lib/clock.h"
-static inline bool mlx5e_rx_hw_stamp(struct mlx5e_tstamp *tstamp)
+static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config)
{
- return tstamp->hwtstamp_config.rx_filter == HWTSTAMP_FILTER_ALL;
+ return config->rx_filter == HWTSTAMP_FILTER_ALL;
}
static inline void mlx5e_read_cqe_slot(struct mlx5e_cq *cq, u32 cqcc,
@@ -560,7 +561,6 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
u8 tcp_ack = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) ||
(l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA);
- skb->mac_len = ETH_HLEN;
proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth);
tot_len = cqe_bcnt - network_depth;
@@ -607,10 +607,11 @@ static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe,
skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht);
}
-static inline bool is_first_ethertype_ip(struct sk_buff *skb)
+static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth)
{
__be16 ethertype = ((struct ethhdr *)skb->data)->h_proto;
+ ethertype = __vlan_get_protocol(skb, ethertype, network_depth);
return (ethertype == htons(ETH_P_IP) || ethertype == htons(ETH_P_IPV6));
}
@@ -620,6 +621,8 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
struct sk_buff *skb,
bool lro)
{
+ int network_depth = 0;
+
if (unlikely(!(netdev->features & NETIF_F_RXCSUM)))
goto csum_none;
@@ -629,9 +632,17 @@ static inline void mlx5e_handle_csum(struct net_device *netdev,
return;
}
- if (is_first_ethertype_ip(skb)) {
+ if (is_last_ethertype_ip(skb, &network_depth)) {
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
+ if (network_depth > ETH_HLEN)
+ /* CQE csum is calculated from the IP header and does
+ * not cover VLAN headers (if present). This will add
+ * the checksum manually.
+ */
+ skb->csum = csum_partial(skb->data + ETH_HLEN,
+ network_depth - ETH_HLEN,
+ skb->csum);
rq->stats.csum_complete++;
return;
}
@@ -659,9 +670,9 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
struct sk_buff *skb)
{
struct net_device *netdev = rq->netdev;
- struct mlx5e_tstamp *tstamp = rq->tstamp;
int lro_num_seg;
+ skb->mac_len = ETH_HLEN;
lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
if (lro_num_seg > 1) {
mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
@@ -674,17 +685,20 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
rq->stats.lro_bytes += cqe_bcnt;
}
- if (unlikely(mlx5e_rx_hw_stamp(tstamp)))
- mlx5e_fill_hwstamp(tstamp, get_cqe_ts(cqe), skb_hwtstamps(skb));
+ if (unlikely(mlx5e_rx_hw_stamp(rq->tstamp)))
+ skb_hwtstamps(skb)->hwtstamp =
+ mlx5_timecounter_cyc2time(rq->clock, get_cqe_ts(cqe));
skb_record_rx_queue(skb, rq->ix);
if (likely(netdev->features & NETIF_F_RXHASH))
mlx5e_skb_set_hash(cqe, skb);
- if (cqe_has_vlan(cqe))
+ if (cqe_has_vlan(cqe)) {
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
be16_to_cpu(cqe->vlan_info));
+ rq->stats.removed_vlan_packets++;
+ }
skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK;
@@ -795,6 +809,7 @@ static inline int mlx5e_xdp_handle(struct mlx5e_rq *rq,
return false;
xdp.data = va + *rx_headroom;
+ xdp_set_data_meta_invalid(&xdp);
xdp.data_end = xdp.data + *len;
xdp.data_hard_start = va;
@@ -1160,12 +1175,25 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
u32 cqe_bcnt,
struct sk_buff *skb)
{
- struct net_device *netdev = rq->netdev;
- struct mlx5e_tstamp *tstamp = rq->tstamp;
+ struct net_device *netdev;
char *pseudo_header;
+ u32 qpn;
u8 *dgid;
u8 g;
+ qpn = be32_to_cpu(cqe->sop_drop_qpn) & 0xffffff;
+ netdev = mlx5i_pkey_get_netdev(rq->netdev, qpn);
+
+ /* No mapping present, cannot process SKB. This might happen if a child
+ * interface is going down while having unprocessed CQEs on parent RQ
+ */
+ if (unlikely(!netdev)) {
+ /* TODO: add drop counters support */
+ skb->dev = NULL;
+ pr_warn_once("Unable to map QPN %u to dev - dropping skb\n", qpn);
+ return;
+ }
+
g = (be32_to_cpu(cqe->flags_rqpn) >> 28) & 3;
dgid = skb->data + MLX5_IB_GRH_DGID_OFFSET;
if ((!g) || dgid[0] != 0xff)
@@ -1186,8 +1214,9 @@ static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
- if (unlikely(mlx5e_rx_hw_stamp(tstamp)))
- mlx5e_fill_hwstamp(tstamp, get_cqe_ts(cqe), skb_hwtstamps(skb));
+ if (unlikely(mlx5e_rx_hw_stamp(rq->tstamp)))
+ skb_hwtstamps(skb)->hwtstamp =
+ mlx5_timecounter_cyc2time(rq->clock, get_cqe_ts(cqe));
skb_record_rx_queue(skb, rq->ix);
@@ -1227,6 +1256,10 @@ void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
goto wq_free_wqe;
mlx5i_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
+ if (unlikely(!skb->dev)) {
+ dev_kfree_skb_any(skb);
+ goto wq_free_wqe;
+ }
napi_gro_receive(rq->cq.napi, skb);
wq_free_wqe:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
index acf32fe..e401d9d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
@@ -63,7 +63,11 @@ profile[MLX5_CQ_PERIOD_NUM_MODES][MLX5E_PARAMS_AM_NUM_PROFILES] = {
static inline struct mlx5e_cq_moder mlx5e_am_get_profile(u8 cq_period_mode, int ix)
{
- return profile[cq_period_mode][ix];
+ struct mlx5e_cq_moder cq_moder;
+
+ cq_moder = profile[cq_period_mode][ix];
+ cq_moder.cq_period_mode = cq_period_mode;
+ return cq_moder;
}
struct mlx5e_cq_moder mlx5e_am_get_def_profile(u8 rx_cq_period_mode)
@@ -75,7 +79,7 @@ struct mlx5e_cq_moder mlx5e_am_get_def_profile(u8 rx_cq_period_mode)
else /* MLX5_CQ_PERIOD_MODE_START_FROM_EQE */
default_profile_ix = MLX5E_RX_AM_DEF_PROFILE_EQE;
- return profile[rx_cq_period_mode][default_profile_ix];
+ return mlx5e_am_get_profile(rx_cq_period_mode, default_profile_ix);
}
/* Adaptive moderation logic */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
new file mode 100644
index 0000000..b74ddc7
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c
@@ -0,0 +1,899 @@
+/*
+ * Copyright (c) 2017, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "en.h"
+#include "en_accel/ipsec.h"
+
+static const struct counter_desc sw_stats_desc[] = {
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_added_vlan_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_page_reuse) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_reuse) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_full) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events_phy) },
+};
+
+#define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc)
+
+static int mlx5e_grp_sw_get_num_stats(struct mlx5e_priv *priv)
+{
+ return NUM_SW_COUNTERS;
+}
+
+static int mlx5e_grp_sw_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx)
+{
+ int i;
+
+ for (i = 0; i < NUM_SW_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN, sw_stats_desc[i].format);
+ return idx;
+}
+
+static int mlx5e_grp_sw_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
+{
+ int i;
+
+ for (i = 0; i < NUM_SW_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR64_CPU(&priv->stats.sw, sw_stats_desc, i);
+ return idx;
+}
+
+static const struct counter_desc q_stats_desc[] = {
+ { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) },
+};
+
+#define NUM_Q_COUNTERS ARRAY_SIZE(q_stats_desc)
+
+static int mlx5e_grp_q_get_num_stats(struct mlx5e_priv *priv)
+{
+ return priv->q_counter ? NUM_Q_COUNTERS : 0;
+}
+
+static int mlx5e_grp_q_fill_strings(struct mlx5e_priv *priv, u8 *data, int idx)
+{
+ int i;
+
+ for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN, q_stats_desc[i].format);
+ return idx;
+}
+
+static int mlx5e_grp_q_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
+{
+ int i;
+
+ for (i = 0; i < NUM_Q_COUNTERS && priv->q_counter; i++)
+ data[idx++] = MLX5E_READ_CTR32_CPU(&priv->stats.qcnt, q_stats_desc, i);
+ return idx;
+}
+
+#define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c)
+static const struct counter_desc vport_stats_desc[] = {
+ { "rx_vport_unicast_packets",
+ VPORT_COUNTER_OFF(received_eth_unicast.packets) },
+ { "rx_vport_unicast_bytes",
+ VPORT_COUNTER_OFF(received_eth_unicast.octets) },
+ { "tx_vport_unicast_packets",
+ VPORT_COUNTER_OFF(transmitted_eth_unicast.packets) },
+ { "tx_vport_unicast_bytes",
+ VPORT_COUNTER_OFF(transmitted_eth_unicast.octets) },
+ { "rx_vport_multicast_packets",
+ VPORT_COUNTER_OFF(received_eth_multicast.packets) },
+ { "rx_vport_multicast_bytes",
+ VPORT_COUNTER_OFF(received_eth_multicast.octets) },
+ { "tx_vport_multicast_packets",
+ VPORT_COUNTER_OFF(transmitted_eth_multicast.packets) },
+ { "tx_vport_multicast_bytes",
+ VPORT_COUNTER_OFF(transmitted_eth_multicast.octets) },
+ { "rx_vport_broadcast_packets",
+ VPORT_COUNTER_OFF(received_eth_broadcast.packets) },
+ { "rx_vport_broadcast_bytes",
+ VPORT_COUNTER_OFF(received_eth_broadcast.octets) },
+ { "tx_vport_broadcast_packets",
+ VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) },
+ { "tx_vport_broadcast_bytes",
+ VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) },
+ { "rx_vport_rdma_unicast_packets",
+ VPORT_COUNTER_OFF(received_ib_unicast.packets) },
+ { "rx_vport_rdma_unicast_bytes",
+ VPORT_COUNTER_OFF(received_ib_unicast.octets) },
+ { "tx_vport_rdma_unicast_packets",
+ VPORT_COUNTER_OFF(transmitted_ib_unicast.packets) },
+ { "tx_vport_rdma_unicast_bytes",
+ VPORT_COUNTER_OFF(transmitted_ib_unicast.octets) },
+ { "rx_vport_rdma_multicast_packets",
+ VPORT_COUNTER_OFF(received_ib_multicast.packets) },
+ { "rx_vport_rdma_multicast_bytes",
+ VPORT_COUNTER_OFF(received_ib_multicast.octets) },
+ { "tx_vport_rdma_multicast_packets",
+ VPORT_COUNTER_OFF(transmitted_ib_multicast.packets) },
+ { "tx_vport_rdma_multicast_bytes",
+ VPORT_COUNTER_OFF(transmitted_ib_multicast.octets) },
+};
+
+#define NUM_VPORT_COUNTERS ARRAY_SIZE(vport_stats_desc)
+
+static int mlx5e_grp_vport_get_num_stats(struct mlx5e_priv *priv)
+{
+ return NUM_VPORT_COUNTERS;
+}
+
+static int mlx5e_grp_vport_fill_strings(struct mlx5e_priv *priv, u8 *data,
+ int idx)
+{
+ int i;
+
+ for (i = 0; i < NUM_VPORT_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN, vport_stats_desc[i].format);
+ return idx;
+}
+
+static int mlx5e_grp_vport_fill_stats(struct mlx5e_priv *priv, u64 *data,
+ int idx)
+{
+ int i;
+
+ for (i = 0; i < NUM_VPORT_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR64_BE(priv->stats.vport.query_vport_out,
+ vport_stats_desc, i);
+ return idx;
+}
+
+#define PPORT_802_3_OFF(c) \
+ MLX5_BYTE_OFF(ppcnt_reg, \
+ counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
+static const struct counter_desc pport_802_3_stats_desc[] = {
+ { "tx_packets_phy", PPORT_802_3_OFF(a_frames_transmitted_ok) },
+ { "rx_packets_phy", PPORT_802_3_OFF(a_frames_received_ok) },
+ { "rx_crc_errors_phy", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
+ { "tx_bytes_phy", PPORT_802_3_OFF(a_octets_transmitted_ok) },
+ { "rx_bytes_phy", PPORT_802_3_OFF(a_octets_received_ok) },
+ { "tx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
+ { "tx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
+ { "rx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
+ { "rx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
+ { "rx_in_range_len_errors_phy", PPORT_802_3_OFF(a_in_range_length_errors) },
+ { "rx_out_of_range_len_phy", PPORT_802_3_OFF(a_out_of_range_length_field) },
+ { "rx_oversize_pkts_phy", PPORT_802_3_OFF(a_frame_too_long_errors) },
+ { "rx_symbol_err_phy", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
+ { "tx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
+ { "rx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_received) },
+ { "rx_unsupported_op_phy", PPORT_802_3_OFF(a_unsupported_opcodes_received) },
+ { "rx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
+ { "tx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
+};
+
+#define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc)
+
+static int mlx5e_grp_802_3_get_num_stats(struct mlx5e_priv *priv)
+{
+ return NUM_PPORT_802_3_COUNTERS;
+}
+
+static int mlx5e_grp_802_3_fill_strings(struct mlx5e_priv *priv, u8 *data,
+ int idx)
+{
+ int i;
+
+ for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_802_3_stats_desc[i].format);
+ return idx;
+}
+
+static int mlx5e_grp_802_3_fill_stats(struct mlx5e_priv *priv, u64 *data,
+ int idx)
+{
+ int i;
+
+ for (i = 0; i < NUM_PPORT_802_3_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.IEEE_802_3_counters,
+ pport_802_3_stats_desc, i);
+ return idx;
+}
+
+#define PPORT_2863_OFF(c) \
+ MLX5_BYTE_OFF(ppcnt_reg, \
+ counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
+static const struct counter_desc pport_2863_stats_desc[] = {
+ { "rx_discards_phy", PPORT_2863_OFF(if_in_discards) },
+ { "tx_discards_phy", PPORT_2863_OFF(if_out_discards) },
+ { "tx_errors_phy", PPORT_2863_OFF(if_out_errors) },
+};
+
+#define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc)
+
+static int mlx5e_grp_2863_get_num_stats(struct mlx5e_priv *priv)
+{
+ return NUM_PPORT_2863_COUNTERS;
+}
+
+static int mlx5e_grp_2863_fill_strings(struct mlx5e_priv *priv, u8 *data,
+ int idx)
+{
+ int i;
+
+ for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2863_stats_desc[i].format);
+ return idx;
+}
+
+static int mlx5e_grp_2863_fill_stats(struct mlx5e_priv *priv, u64 *data,
+ int idx)
+{
+ int i;
+
+ for (i = 0; i < NUM_PPORT_2863_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2863_counters,
+ pport_2863_stats_desc, i);
+ return idx;
+}
+
+#define PPORT_2819_OFF(c) \
+ MLX5_BYTE_OFF(ppcnt_reg, \
+ counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
+static const struct counter_desc pport_2819_stats_desc[] = {
+ { "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) },
+ { "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) },
+ { "rx_jabbers_phy", PPORT_2819_OFF(ether_stats_jabbers) },
+ { "rx_64_bytes_phy", PPORT_2819_OFF(ether_stats_pkts64octets) },
+ { "rx_65_to_127_bytes_phy", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
+ { "rx_128_to_255_bytes_phy", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
+ { "rx_256_to_511_bytes_phy", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
+ { "rx_512_to_1023_bytes_phy", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
+ { "rx_1024_to_1518_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
+ { "rx_1519_to_2047_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
+ { "rx_2048_to_4095_bytes_phy", PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
+ { "rx_4096_to_8191_bytes_phy", PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
+ { "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
+};
+
+#define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc)
+
+static int mlx5e_grp_2819_get_num_stats(struct mlx5e_priv *priv)
+{
+ return NUM_PPORT_2819_COUNTERS;
+}
+
+static int mlx5e_grp_2819_fill_strings(struct mlx5e_priv *priv, u8 *data,
+ int idx)
+{
+ int i;
+
+ for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN, pport_2819_stats_desc[i].format);
+ return idx;
+}
+
+static int mlx5e_grp_2819_fill_stats(struct mlx5e_priv *priv, u64 *data,
+ int idx)
+{
+ int i;
+
+ for (i = 0; i < NUM_PPORT_2819_COUNTERS; i++)
+ data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters,
+ pport_2819_stats_desc, i);
+ return idx;
+}
+
+#define PPORT_PHY_STATISTICAL_OFF(c) \
+ MLX5_BYTE_OFF(ppcnt_reg, \
+ counter_set.phys_layer_statistical_cntrs.c##_high)
+static const struct counter_desc pport_phy_statistical_stats_desc[] = {
+ { "rx_pcs_symbol_err_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) },
+ { "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) },
+};
+
+#define NUM_PPORT_PHY_COUNTERS ARRAY_SIZE(pport_phy_statistical_stats_desc)
+
+static int mlx5e_grp_phy_get_num_stats(struct mlx5e_priv *priv)
+{
+ return MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group) ?
+ NUM_PPORT_PHY_COUNTERS : 0;
+}
+
+static int mlx5e_grp_phy_fill_strings(struct mlx5e_priv *priv, u8 *data,
+ int idx)
+{
+ int i;
+
+ if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group))
+ for (i = 0; i < NUM_PPORT_PHY_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ pport_phy_statistical_stats_desc[i].format);
+ return idx;
+}
+
+static int mlx5e_grp_phy_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx)
+{
+ int i;
+
+ if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group))
+ for (i = 0; i < NUM_PPORT_PHY_COUNTERS; i++)
+ data[idx++] =
+ MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
+ pport_phy_statistical_stats_desc, i);
+ return idx;
+}
+
+#define PPORT_ETH_EXT_OFF(c) \
+ MLX5_BYTE_OFF(ppcnt_reg, \
+ counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
+static const struct counter_desc pport_eth_ext_stats_desc[] = {
+ { "rx_buffer_passed_thres_phy", PPORT_ETH_EXT_OFF(rx_buffer_almost_full) },
+};
+
+#define NUM_PPORT_ETH_EXT_COUNTERS ARRAY_SIZE(pport_eth_ext_stats_desc)
+
+static int mlx5e_grp_eth_ext_get_num_stats(struct mlx5e_priv *priv)
+{
+ if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
+ return NUM_PPORT_ETH_EXT_COUNTERS;
+
+ return 0;
+}
+
+static int mlx5e_grp_eth_ext_fill_strings(struct mlx5e_priv *priv, u8 *data,
+ int idx)
+{
+ int i;
+
+ if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
+ for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ pport_eth_ext_stats_desc[i].format);
+ return idx;
+}
+
+static int mlx5e_grp_eth_ext_fill_stats(struct mlx5e_priv *priv, u64 *data,
+ int idx)
+{
+ int i;
+
+ if (MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
+ for (i = 0; i < NUM_PPORT_ETH_EXT_COUNTERS; i++)
+ data[idx++] =
+ MLX5E_READ_CTR64_BE(&priv->stats.pport.eth_ext_counters,
+ pport_eth_ext_stats_desc, i);
+ return idx;
+}
+
+#define PCIE_PERF_OFF(c) \
+ MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
+static const struct counter_desc pcie_perf_stats_desc[] = {
+ { "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) },
+ { "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) },
+};
+
+#define PCIE_PERF_OFF64(c) \
+ MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c##_high)
+static const struct counter_desc pcie_perf_stats_desc64[] = {
+ { "outbound_pci_buffer_overflow", PCIE_PERF_OFF64(tx_overflow_buffer_pkt) },
+};
+
+static const struct counter_desc pcie_perf_stall_stats_desc[] = {
+ { "outbound_pci_stalled_rd", PCIE_PERF_OFF(outbound_stalled_reads) },
+ { "outbound_pci_stalled_wr", PCIE_PERF_OFF(outbound_stalled_writes) },
+ { "outbound_pci_stalled_rd_events", PCIE_PERF_OFF(outbound_stalled_reads_events) },
+ { "outbound_pci_stalled_wr_events", PCIE_PERF_OFF(outbound_stalled_writes_events) },
+};
+
+#define NUM_PCIE_PERF_COUNTERS ARRAY_SIZE(pcie_perf_stats_desc)
+#define NUM_PCIE_PERF_COUNTERS64 ARRAY_SIZE(pcie_perf_stats_desc64)
+#define NUM_PCIE_PERF_STALL_COUNTERS ARRAY_SIZE(pcie_perf_stall_stats_desc)
+
+static int mlx5e_grp_pcie_get_num_stats(struct mlx5e_priv *priv)
+{
+ int num_stats = 0;
+
+ if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
+ num_stats += NUM_PCIE_PERF_COUNTERS;
+
+ if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
+ num_stats += NUM_PCIE_PERF_COUNTERS64;
+
+ if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
+ num_stats += NUM_PCIE_PERF_STALL_COUNTERS;
+
+ return num_stats;
+}
+
+static int mlx5e_grp_pcie_fill_strings(struct mlx5e_priv *priv, u8 *data,
+ int idx)
+{
+ int i;
+
+ if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
+ for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ pcie_perf_stats_desc[i].format);
+
+ if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
+ for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ pcie_perf_stats_desc64[i].format);
+
+ if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
+ for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ pcie_perf_stall_stats_desc[i].format);
+ return idx;
+}
+
+static int mlx5e_grp_pcie_fill_stats(struct mlx5e_priv *priv, u64 *data,
+ int idx)
+{
+ int i;
+
+ if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
+ for (i = 0; i < NUM_PCIE_PERF_COUNTERS; i++)
+ data[idx++] =
+ MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
+ pcie_perf_stats_desc, i);
+
+ if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
+ for (i = 0; i < NUM_PCIE_PERF_COUNTERS64; i++)
+ data[idx++] =
+ MLX5E_READ_CTR64_BE(&priv->stats.pcie.pcie_perf_counters,
+ pcie_perf_stats_desc64, i);
+
+ if (MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
+ for (i = 0; i < NUM_PCIE_PERF_STALL_COUNTERS; i++)
+ data[idx++] =
+ MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
+ pcie_perf_stall_stats_desc, i);
+ return idx;
+}
+
+#define PPORT_PER_PRIO_OFF(c) \
+ MLX5_BYTE_OFF(ppcnt_reg, \
+ counter_set.eth_per_prio_grp_data_layout.c##_high)
+static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
+ { "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
+ { "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
+ { "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) },
+ { "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) },
+};
+
+#define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
+
+static int mlx5e_grp_per_prio_traffic_get_num_stats(struct mlx5e_priv *priv)
+{
+ return NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * NUM_PPORT_PRIO;
+}
+
+static int mlx5e_grp_per_prio_traffic_fill_strings(struct mlx5e_priv *priv,
+ u8 *data,
+ int idx)
+{
+ int i, prio;
+
+ for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
+ for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ pport_per_prio_traffic_stats_desc[i].format, prio);
+ }
+
+ return idx;
+}
+
+static int mlx5e_grp_per_prio_traffic_fill_stats(struct mlx5e_priv *priv,
+ u64 *data,
+ int idx)
+{
+ int i, prio;
+
+ for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
+ for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
+ data[idx++] =
+ MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
+ pport_per_prio_traffic_stats_desc, i);
+ }
+
+ return idx;
+}
+
+static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
+ /* %s is "global" or "prio{i}" */
+ { "rx_%s_pause", PPORT_PER_PRIO_OFF(rx_pause) },
+ { "rx_%s_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
+ { "tx_%s_pause", PPORT_PER_PRIO_OFF(tx_pause) },
+ { "tx_%s_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
+ { "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
+};
+
+#define NUM_PPORT_PER_PRIO_PFC_COUNTERS ARRAY_SIZE(pport_per_prio_pfc_stats_desc)
+
+static unsigned long mlx5e_query_pfc_combined(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u8 pfc_en_tx;
+ u8 pfc_en_rx;
+ int err;
+
+ if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+ return 0;
+
+ err = mlx5_query_port_pfc(mdev, &pfc_en_tx, &pfc_en_rx);
+
+ return err ? 0 : pfc_en_tx | pfc_en_rx;
+}
+
+static bool mlx5e_query_global_pause_combined(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 rx_pause;
+ u32 tx_pause;
+ int err;
+
+ if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+ return false;
+
+ err = mlx5_query_port_pause(mdev, &rx_pause, &tx_pause);
+
+ return err ? false : rx_pause | tx_pause;
+}
+
+static int mlx5e_grp_per_prio_pfc_get_num_stats(struct mlx5e_priv *priv)
+{
+ return (mlx5e_query_global_pause_combined(priv) +
+ hweight8(mlx5e_query_pfc_combined(priv))) *
+ NUM_PPORT_PER_PRIO_PFC_COUNTERS;
+}
+
+static int mlx5e_grp_per_prio_pfc_fill_strings(struct mlx5e_priv *priv,
+ u8 *data,
+ int idx)
+{
+ unsigned long pfc_combined;
+ int i, prio;
+
+ pfc_combined = mlx5e_query_pfc_combined(priv);
+ for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
+ for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
+ char pfc_string[ETH_GSTRING_LEN];
+
+ snprintf(pfc_string, sizeof(pfc_string), "prio%d", prio);
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ pport_per_prio_pfc_stats_desc[i].format, pfc_string);
+ }
+ }
+
+ if (mlx5e_query_global_pause_combined(priv)) {
+ for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ pport_per_prio_pfc_stats_desc[i].format, "global");
+ }
+ }
+
+ return idx;
+}
+
+static int mlx5e_grp_per_prio_pfc_fill_stats(struct mlx5e_priv *priv,
+ u64 *data,
+ int idx)
+{
+ unsigned long pfc_combined;
+ int i, prio;
+
+ pfc_combined = mlx5e_query_pfc_combined(priv);
+ for_each_set_bit(prio, &pfc_combined, NUM_PPORT_PRIO) {
+ for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
+ data[idx++] =
+ MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
+ pport_per_prio_pfc_stats_desc, i);
+ }
+ }
+
+ if (mlx5e_query_global_pause_combined(priv)) {
+ for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
+ data[idx++] =
+ MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
+ pport_per_prio_pfc_stats_desc, i);
+ }
+ }
+
+ return idx;
+}
+
+static const struct counter_desc mlx5e_pme_status_desc[] = {
+ { "module_unplug", 8 },
+};
+
+static const struct counter_desc mlx5e_pme_error_desc[] = {
+ { "module_bus_stuck", 16 }, /* bus stuck (I2C or data shorted) */
+ { "module_high_temp", 48 }, /* high temperature */
+ { "module_bad_shorted", 56 }, /* bad or shorted cable/module */
+};
+
+#define NUM_PME_STATUS_STATS ARRAY_SIZE(mlx5e_pme_status_desc)
+#define NUM_PME_ERR_STATS ARRAY_SIZE(mlx5e_pme_error_desc)
+
+static int mlx5e_grp_pme_get_num_stats(struct mlx5e_priv *priv)
+{
+ return NUM_PME_STATUS_STATS + NUM_PME_ERR_STATS;
+}
+
+static int mlx5e_grp_pme_fill_strings(struct mlx5e_priv *priv, u8 *data,
+ int idx)
+{
+ int i;
+
+ for (i = 0; i < NUM_PME_STATUS_STATS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_status_desc[i].format);
+
+ for (i = 0; i < NUM_PME_ERR_STATS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN, mlx5e_pme_error_desc[i].format);
+
+ return idx;
+}
+
+static int mlx5e_grp_pme_fill_stats(struct mlx5e_priv *priv, u64 *data,
+ int idx)
+{
+ struct mlx5_priv *mlx5_priv = &priv->mdev->priv;
+ int i;
+
+ for (i = 0; i < NUM_PME_STATUS_STATS; i++)
+ data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_priv->pme_stats.status_counters,
+ mlx5e_pme_status_desc, i);
+
+ for (i = 0; i < NUM_PME_ERR_STATS; i++)
+ data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_priv->pme_stats.error_counters,
+ mlx5e_pme_error_desc, i);
+
+ return idx;
+}
+
+static int mlx5e_grp_ipsec_get_num_stats(struct mlx5e_priv *priv)
+{
+ return mlx5e_ipsec_get_count(priv);
+}
+
+static int mlx5e_grp_ipsec_fill_strings(struct mlx5e_priv *priv, u8 *data,
+ int idx)
+{
+ return idx + mlx5e_ipsec_get_strings(priv,
+ data + idx * ETH_GSTRING_LEN);
+}
+
+static int mlx5e_grp_ipsec_fill_stats(struct mlx5e_priv *priv, u64 *data,
+ int idx)
+{
+ return idx + mlx5e_ipsec_get_stats(priv, data + idx);
+}
+
+static const struct counter_desc rq_stats_desc[] = {
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx_full) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, page_reuse) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_reuse) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_full) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) },
+ { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) },
+};
+
+static const struct counter_desc sq_stats_desc[] = {
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, added_vlan_packets) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
+};
+
+#define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc)
+#define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc)
+
+static int mlx5e_grp_channels_get_num_stats(struct mlx5e_priv *priv)
+{
+ return (NUM_RQ_STATS * priv->channels.num) +
+ (NUM_SQ_STATS * priv->channels.num * priv->channels.params.num_tc);
+}
+
+static int mlx5e_grp_channels_fill_strings(struct mlx5e_priv *priv, u8 *data,
+ int idx)
+{
+ int i, j, tc;
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ return idx;
+
+ for (i = 0; i < priv->channels.num; i++)
+ for (j = 0; j < NUM_RQ_STATS; j++)
+ sprintf(data + (idx++) * ETH_GSTRING_LEN, rq_stats_desc[j].format, i);
+
+ for (tc = 0; tc < priv->channels.params.num_tc; tc++)
+ for (i = 0; i < priv->channels.num; i++)
+ for (j = 0; j < NUM_SQ_STATS; j++)
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ sq_stats_desc[j].format,
+ priv->channel_tc2txq[i][tc]);
+
+ return idx;
+}
+
+static int mlx5e_grp_channels_fill_stats(struct mlx5e_priv *priv, u64 *data,
+ int idx)
+{
+ struct mlx5e_channels *channels = &priv->channels;
+ int i, j, tc;
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ return idx;
+
+ for (i = 0; i < channels->num; i++)
+ for (j = 0; j < NUM_RQ_STATS; j++)
+ data[idx++] =
+ MLX5E_READ_CTR64_CPU(&channels->c[i]->rq.stats,
+ rq_stats_desc, j);
+
+ for (tc = 0; tc < priv->channels.params.num_tc; tc++)
+ for (i = 0; i < channels->num; i++)
+ for (j = 0; j < NUM_SQ_STATS; j++)
+ data[idx++] =
+ MLX5E_READ_CTR64_CPU(&channels->c[i]->sq[tc].stats,
+ sq_stats_desc, j);
+
+ return idx;
+}
+
+const struct mlx5e_stats_grp mlx5e_stats_grps[] = {
+ {
+ .get_num_stats = mlx5e_grp_sw_get_num_stats,
+ .fill_strings = mlx5e_grp_sw_fill_strings,
+ .fill_stats = mlx5e_grp_sw_fill_stats,
+ },
+ {
+ .get_num_stats = mlx5e_grp_q_get_num_stats,
+ .fill_strings = mlx5e_grp_q_fill_strings,
+ .fill_stats = mlx5e_grp_q_fill_stats,
+ },
+ {
+ .get_num_stats = mlx5e_grp_vport_get_num_stats,
+ .fill_strings = mlx5e_grp_vport_fill_strings,
+ .fill_stats = mlx5e_grp_vport_fill_stats,
+ },
+ {
+ .get_num_stats = mlx5e_grp_802_3_get_num_stats,
+ .fill_strings = mlx5e_grp_802_3_fill_strings,
+ .fill_stats = mlx5e_grp_802_3_fill_stats,
+ },
+ {
+ .get_num_stats = mlx5e_grp_2863_get_num_stats,
+ .fill_strings = mlx5e_grp_2863_fill_strings,
+ .fill_stats = mlx5e_grp_2863_fill_stats,
+ },
+ {
+ .get_num_stats = mlx5e_grp_2819_get_num_stats,
+ .fill_strings = mlx5e_grp_2819_fill_strings,
+ .fill_stats = mlx5e_grp_2819_fill_stats,
+ },
+ {
+ .get_num_stats = mlx5e_grp_phy_get_num_stats,
+ .fill_strings = mlx5e_grp_phy_fill_strings,
+ .fill_stats = mlx5e_grp_phy_fill_stats,
+ },
+ {
+ .get_num_stats = mlx5e_grp_eth_ext_get_num_stats,
+ .fill_strings = mlx5e_grp_eth_ext_fill_strings,
+ .fill_stats = mlx5e_grp_eth_ext_fill_stats,
+ },
+ {
+ .get_num_stats = mlx5e_grp_pcie_get_num_stats,
+ .fill_strings = mlx5e_grp_pcie_fill_strings,
+ .fill_stats = mlx5e_grp_pcie_fill_stats,
+ },
+ {
+ .get_num_stats = mlx5e_grp_per_prio_traffic_get_num_stats,
+ .fill_strings = mlx5e_grp_per_prio_traffic_fill_strings,
+ .fill_stats = mlx5e_grp_per_prio_traffic_fill_stats,
+ },
+ {
+ .get_num_stats = mlx5e_grp_per_prio_pfc_get_num_stats,
+ .fill_strings = mlx5e_grp_per_prio_pfc_fill_strings,
+ .fill_stats = mlx5e_grp_per_prio_pfc_fill_stats,
+ },
+ {
+ .get_num_stats = mlx5e_grp_pme_get_num_stats,
+ .fill_strings = mlx5e_grp_pme_fill_strings,
+ .fill_stats = mlx5e_grp_pme_fill_stats,
+ },
+ {
+ .get_num_stats = mlx5e_grp_ipsec_get_num_stats,
+ .fill_strings = mlx5e_grp_ipsec_fill_strings,
+ .fill_stats = mlx5e_grp_ipsec_fill_stats,
+ },
+ {
+ .get_num_stats = mlx5e_grp_channels_get_num_stats,
+ .fill_strings = mlx5e_grp_channels_fill_strings,
+ .fill_stats = mlx5e_grp_channels_fill_stats,
+ }
+};
+
+const int mlx5e_num_stats_grps = ARRAY_SIZE(mlx5e_stats_grps);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index f863721..d679e21 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -59,8 +59,10 @@ struct mlx5e_sw_stats {
u64 tx_tso_bytes;
u64 tx_tso_inner_packets;
u64 tx_tso_inner_bytes;
+ u64 tx_added_vlan_packets;
u64 rx_lro_packets;
u64 rx_lro_bytes;
+ u64 rx_removed_vlan_packets;
u64 rx_csum_unnecessary;
u64 rx_csum_none;
u64 rx_csum_complete;
@@ -91,54 +93,10 @@ struct mlx5e_sw_stats {
u64 link_down_events_phy;
};
-static const struct counter_desc sw_stats_desc[] = {
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_bytes) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_bytes) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_bytes) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_tso_inner_bytes) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_packets) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_lro_bytes) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_none) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_complete) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary_inner) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_drop) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_xdp_tx_full) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_none) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_csum_partial_inner) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_buff_alloc_err) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_blks) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cqe_compress_pkts) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_page_reuse) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_reuse) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_full) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_empty) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_busy) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_cache_waive) },
- { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, link_down_events_phy) },
-};
-
struct mlx5e_qcounter_stats {
u32 rx_out_of_buffer;
};
-static const struct counter_desc q_stats_desc[] = {
- { MLX5E_DECLARE_STAT(struct mlx5e_qcounter_stats, rx_out_of_buffer) },
-};
-
-#define VPORT_COUNTER_OFF(c) MLX5_BYTE_OFF(query_vport_counter_out, c)
#define VPORT_COUNTER_GET(vstats, c) MLX5_GET64(query_vport_counter_out, \
vstats->query_vport_out, c)
@@ -146,83 +104,22 @@ struct mlx5e_vport_stats {
__be64 query_vport_out[MLX5_ST_SZ_QW(query_vport_counter_out)];
};
-static const struct counter_desc vport_stats_desc[] = {
- { "rx_vport_unicast_packets",
- VPORT_COUNTER_OFF(received_eth_unicast.packets) },
- { "rx_vport_unicast_bytes",
- VPORT_COUNTER_OFF(received_eth_unicast.octets) },
- { "tx_vport_unicast_packets",
- VPORT_COUNTER_OFF(transmitted_eth_unicast.packets) },
- { "tx_vport_unicast_bytes",
- VPORT_COUNTER_OFF(transmitted_eth_unicast.octets) },
- { "rx_vport_multicast_packets",
- VPORT_COUNTER_OFF(received_eth_multicast.packets) },
- { "rx_vport_multicast_bytes",
- VPORT_COUNTER_OFF(received_eth_multicast.octets) },
- { "tx_vport_multicast_packets",
- VPORT_COUNTER_OFF(transmitted_eth_multicast.packets) },
- { "tx_vport_multicast_bytes",
- VPORT_COUNTER_OFF(transmitted_eth_multicast.octets) },
- { "rx_vport_broadcast_packets",
- VPORT_COUNTER_OFF(received_eth_broadcast.packets) },
- { "rx_vport_broadcast_bytes",
- VPORT_COUNTER_OFF(received_eth_broadcast.octets) },
- { "tx_vport_broadcast_packets",
- VPORT_COUNTER_OFF(transmitted_eth_broadcast.packets) },
- { "tx_vport_broadcast_bytes",
- VPORT_COUNTER_OFF(transmitted_eth_broadcast.octets) },
- { "rx_vport_rdma_unicast_packets",
- VPORT_COUNTER_OFF(received_ib_unicast.packets) },
- { "rx_vport_rdma_unicast_bytes",
- VPORT_COUNTER_OFF(received_ib_unicast.octets) },
- { "tx_vport_rdma_unicast_packets",
- VPORT_COUNTER_OFF(transmitted_ib_unicast.packets) },
- { "tx_vport_rdma_unicast_bytes",
- VPORT_COUNTER_OFF(transmitted_ib_unicast.octets) },
- { "rx_vport_rdma_multicast_packets",
- VPORT_COUNTER_OFF(received_ib_multicast.packets) },
- { "rx_vport_rdma_multicast_bytes",
- VPORT_COUNTER_OFF(received_ib_multicast.octets) },
- { "tx_vport_rdma_multicast_packets",
- VPORT_COUNTER_OFF(transmitted_ib_multicast.packets) },
- { "tx_vport_rdma_multicast_bytes",
- VPORT_COUNTER_OFF(transmitted_ib_multicast.octets) },
-};
-
-#define PPORT_802_3_OFF(c) \
- MLX5_BYTE_OFF(ppcnt_reg, \
- counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
#define PPORT_802_3_GET(pstats, c) \
MLX5_GET64(ppcnt_reg, pstats->IEEE_802_3_counters, \
counter_set.eth_802_3_cntrs_grp_data_layout.c##_high)
-#define PPORT_2863_OFF(c) \
- MLX5_BYTE_OFF(ppcnt_reg, \
- counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
#define PPORT_2863_GET(pstats, c) \
MLX5_GET64(ppcnt_reg, pstats->RFC_2863_counters, \
counter_set.eth_2863_cntrs_grp_data_layout.c##_high)
-#define PPORT_2819_OFF(c) \
- MLX5_BYTE_OFF(ppcnt_reg, \
- counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
#define PPORT_2819_GET(pstats, c) \
MLX5_GET64(ppcnt_reg, pstats->RFC_2819_counters, \
counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
-#define PPORT_PHY_STATISTICAL_OFF(c) \
- MLX5_BYTE_OFF(ppcnt_reg, \
- counter_set.phys_layer_statistical_cntrs.c##_high)
#define PPORT_PHY_STATISTICAL_GET(pstats, c) \
MLX5_GET64(ppcnt_reg, (pstats)->phy_statistical_counters, \
counter_set.phys_layer_statistical_cntrs.c##_high)
-#define PPORT_PER_PRIO_OFF(c) \
- MLX5_BYTE_OFF(ppcnt_reg, \
- counter_set.eth_per_prio_grp_data_layout.c##_high)
#define PPORT_PER_PRIO_GET(pstats, prio, c) \
MLX5_GET64(ppcnt_reg, pstats->per_prio_counters[prio], \
counter_set.eth_per_prio_grp_data_layout.c##_high)
#define NUM_PPORT_PRIO 8
-#define PPORT_ETH_EXT_OFF(c) \
- MLX5_BYTE_OFF(ppcnt_reg, \
- counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
#define PPORT_ETH_EXT_GET(pstats, c) \
MLX5_GET64(ppcnt_reg, (pstats)->eth_ext_counters, \
counter_set.eth_extended_cntrs_grp_data_layout.c##_high)
@@ -237,82 +134,10 @@ struct mlx5e_pport_stats {
__be64 eth_ext_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
};
-static const struct counter_desc pport_802_3_stats_desc[] = {
- { "tx_packets_phy", PPORT_802_3_OFF(a_frames_transmitted_ok) },
- { "rx_packets_phy", PPORT_802_3_OFF(a_frames_received_ok) },
- { "rx_crc_errors_phy", PPORT_802_3_OFF(a_frame_check_sequence_errors) },
- { "tx_bytes_phy", PPORT_802_3_OFF(a_octets_transmitted_ok) },
- { "rx_bytes_phy", PPORT_802_3_OFF(a_octets_received_ok) },
- { "tx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_xmitted_ok) },
- { "tx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_xmitted_ok) },
- { "rx_multicast_phy", PPORT_802_3_OFF(a_multicast_frames_received_ok) },
- { "rx_broadcast_phy", PPORT_802_3_OFF(a_broadcast_frames_received_ok) },
- { "rx_in_range_len_errors_phy", PPORT_802_3_OFF(a_in_range_length_errors) },
- { "rx_out_of_range_len_phy", PPORT_802_3_OFF(a_out_of_range_length_field) },
- { "rx_oversize_pkts_phy", PPORT_802_3_OFF(a_frame_too_long_errors) },
- { "rx_symbol_err_phy", PPORT_802_3_OFF(a_symbol_error_during_carrier) },
- { "tx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_transmitted) },
- { "rx_mac_control_phy", PPORT_802_3_OFF(a_mac_control_frames_received) },
- { "rx_unsupported_op_phy", PPORT_802_3_OFF(a_unsupported_opcodes_received) },
- { "rx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_received) },
- { "tx_pause_ctrl_phy", PPORT_802_3_OFF(a_pause_mac_ctrl_frames_transmitted) },
-};
-
-static const struct counter_desc pport_2863_stats_desc[] = {
- { "rx_discards_phy", PPORT_2863_OFF(if_in_discards) },
- { "tx_discards_phy", PPORT_2863_OFF(if_out_discards) },
- { "tx_errors_phy", PPORT_2863_OFF(if_out_errors) },
-};
-
-static const struct counter_desc pport_2819_stats_desc[] = {
- { "rx_undersize_pkts_phy", PPORT_2819_OFF(ether_stats_undersize_pkts) },
- { "rx_fragments_phy", PPORT_2819_OFF(ether_stats_fragments) },
- { "rx_jabbers_phy", PPORT_2819_OFF(ether_stats_jabbers) },
- { "rx_64_bytes_phy", PPORT_2819_OFF(ether_stats_pkts64octets) },
- { "rx_65_to_127_bytes_phy", PPORT_2819_OFF(ether_stats_pkts65to127octets) },
- { "rx_128_to_255_bytes_phy", PPORT_2819_OFF(ether_stats_pkts128to255octets) },
- { "rx_256_to_511_bytes_phy", PPORT_2819_OFF(ether_stats_pkts256to511octets) },
- { "rx_512_to_1023_bytes_phy", PPORT_2819_OFF(ether_stats_pkts512to1023octets) },
- { "rx_1024_to_1518_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1024to1518octets) },
- { "rx_1519_to_2047_bytes_phy", PPORT_2819_OFF(ether_stats_pkts1519to2047octets) },
- { "rx_2048_to_4095_bytes_phy", PPORT_2819_OFF(ether_stats_pkts2048to4095octets) },
- { "rx_4096_to_8191_bytes_phy", PPORT_2819_OFF(ether_stats_pkts4096to8191octets) },
- { "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
-};
-
-static const struct counter_desc pport_phy_statistical_stats_desc[] = {
- { "rx_pcs_symbol_err_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) },
- { "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) },
-};
-
-static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
- { "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
- { "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
- { "tx_prio%d_bytes", PPORT_PER_PRIO_OFF(tx_octets) },
- { "tx_prio%d_packets", PPORT_PER_PRIO_OFF(tx_frames) },
-};
-
-static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
- /* %s is "global" or "prio{i}" */
- { "rx_%s_pause", PPORT_PER_PRIO_OFF(rx_pause) },
- { "rx_%s_pause_duration", PPORT_PER_PRIO_OFF(rx_pause_duration) },
- { "tx_%s_pause", PPORT_PER_PRIO_OFF(tx_pause) },
- { "tx_%s_pause_duration", PPORT_PER_PRIO_OFF(tx_pause_duration) },
- { "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
-};
-
-static const struct counter_desc pport_eth_ext_stats_desc[] = {
- { "rx_buffer_passed_thres_phy", PPORT_ETH_EXT_OFF(rx_buffer_almost_full) },
-};
-
-#define PCIE_PERF_OFF(c) \
- MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
#define PCIE_PERF_GET(pcie_stats, c) \
MLX5_GET(mpcnt_reg, (pcie_stats)->pcie_perf_counters, \
counter_set.pcie_perf_cntrs_grp_data_layout.c)
-#define PCIE_PERF_OFF64(c) \
- MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c##_high)
#define PCIE_PERF_GET64(pcie_stats, c) \
MLX5_GET64(mpcnt_reg, (pcie_stats)->pcie_perf_counters, \
counter_set.pcie_perf_cntrs_grp_data_layout.c##_high)
@@ -321,22 +146,6 @@ struct mlx5e_pcie_stats {
__be64 pcie_perf_counters[MLX5_ST_SZ_QW(mpcnt_reg)];
};
-static const struct counter_desc pcie_perf_stats_desc[] = {
- { "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) },
- { "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) },
-};
-
-static const struct counter_desc pcie_perf_stats_desc64[] = {
- { "outbound_pci_buffer_overflow", PCIE_PERF_OFF64(tx_overflow_buffer_pkt) },
-};
-
-static const struct counter_desc pcie_perf_stall_stats_desc[] = {
- { "outbound_pci_stalled_rd", PCIE_PERF_OFF(outbound_stalled_reads) },
- { "outbound_pci_stalled_wr", PCIE_PERF_OFF(outbound_stalled_writes) },
- { "outbound_pci_stalled_rd_events", PCIE_PERF_OFF(outbound_stalled_reads_events) },
- { "outbound_pci_stalled_wr_events", PCIE_PERF_OFF(outbound_stalled_writes_events) },
-};
-
struct mlx5e_rq_stats {
u64 packets;
u64 bytes;
@@ -346,6 +155,7 @@ struct mlx5e_rq_stats {
u64 csum_none;
u64 lro_packets;
u64 lro_bytes;
+ u64 removed_vlan_packets;
u64 xdp_drop;
u64 xdp_tx;
u64 xdp_tx_full;
@@ -362,31 +172,6 @@ struct mlx5e_rq_stats {
u64 cache_waive;
};
-static const struct counter_desc rq_stats_desc[] = {
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, packets) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, bytes) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_complete) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_unnecessary_inner) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, csum_none) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_drop) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, xdp_tx_full) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_packets) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, lro_bytes) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, mpwqe_filler) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, buff_alloc_err) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_blks) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cqe_compress_pkts) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, page_reuse) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_reuse) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_full) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_empty) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_busy) },
- { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, cache_waive) },
-};
-
struct mlx5e_sq_stats {
/* commonly accessed in data path */
u64 packets;
@@ -398,6 +183,7 @@ struct mlx5e_sq_stats {
u64 tso_inner_bytes;
u64 csum_partial;
u64 csum_partial_inner;
+ u64 added_vlan_packets;
u64 nop;
/* less likely accessed in data path */
u64 csum_none;
@@ -406,61 +192,6 @@ struct mlx5e_sq_stats {
u64 dropped;
};
-static const struct counter_desc sq_stats_desc[] = {
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, packets) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, bytes) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_packets) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_bytes) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_packets) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, tso_inner_bytes) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_partial_inner) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, nop) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, csum_none) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
- { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
-};
-
-#define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc)
-#define NUM_Q_COUNTERS ARRAY_SIZE(q_stats_desc)
-#define NUM_VPORT_COUNTERS ARRAY_SIZE(vport_stats_desc)
-#define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc)
-#define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc)
-#define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc)
-#define NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv) \
- (ARRAY_SIZE(pport_phy_statistical_stats_desc) * \
- MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group))
-#define NUM_PCIE_PERF_COUNTERS(priv) \
- (ARRAY_SIZE(pcie_perf_stats_desc) * \
- MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
-#define NUM_PCIE_PERF_COUNTERS64(priv) \
- (ARRAY_SIZE(pcie_perf_stats_desc64) * \
- MLX5_CAP_MCAM_FEATURE((priv)->mdev, tx_overflow_buffer_pkt))
-#define NUM_PCIE_PERF_STALL_COUNTERS(priv) \
- (ARRAY_SIZE(pcie_perf_stall_stats_desc) * \
- MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_outbound_stalled))
-#define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS \
- ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
-#define NUM_PPORT_PER_PRIO_PFC_COUNTERS \
- ARRAY_SIZE(pport_per_prio_pfc_stats_desc)
-#define NUM_PPORT_ETH_EXT_COUNTERS(priv) \
- (ARRAY_SIZE(pport_eth_ext_stats_desc) * \
- MLX5_CAP_PCAM_FEATURE((priv)->mdev, rx_buffer_fullness_counters))
-#define NUM_PPORT_COUNTERS(priv) (NUM_PPORT_802_3_COUNTERS + \
- NUM_PPORT_2863_COUNTERS + \
- NUM_PPORT_2819_COUNTERS + \
- NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv) + \
- NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * \
- NUM_PPORT_PRIO + \
- NUM_PPORT_ETH_EXT_COUNTERS(priv))
-#define NUM_PCIE_COUNTERS(priv) (NUM_PCIE_PERF_COUNTERS(priv) + \
- NUM_PCIE_PERF_COUNTERS64(priv) +\
- NUM_PCIE_PERF_STALL_COUNTERS(priv))
-#define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc)
-#define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc)
-
struct mlx5e_stats {
struct mlx5e_sw_stats sw;
struct mlx5e_qcounter_stats qcnt;
@@ -470,14 +201,14 @@ struct mlx5e_stats {
struct mlx5e_pcie_stats pcie;
};
-static const struct counter_desc mlx5e_pme_status_desc[] = {
- { "module_unplug", 8 },
+struct mlx5e_priv;
+struct mlx5e_stats_grp {
+ int (*get_num_stats)(struct mlx5e_priv *priv);
+ int (*fill_strings)(struct mlx5e_priv *priv, u8 *data, int idx);
+ int (*fill_stats)(struct mlx5e_priv *priv, u64 *data, int idx);
};
-static const struct counter_desc mlx5e_pme_error_desc[] = {
- { "module_bus_stuck", 16 }, /* bus stuck (I2C or data shorted) */
- { "module_high_temp", 48 }, /* high temperature */
- { "module_bad_shorted", 56 }, /* bad or shorted cable/module */
-};
+extern const struct mlx5e_stats_grp mlx5e_stats_grps[];
+extern const int mlx5e_num_stats_grps;
#endif /* __MLX5_EN_STATS_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 9ba1f720..55979ec 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -90,8 +90,8 @@ enum {
MLX5_HEADER_TYPE_NVGRE = 0x1,
};
-#define MLX5E_TC_TABLE_NUM_ENTRIES 1024
#define MLX5E_TC_TABLE_NUM_GROUPS 4
+#define MLX5E_TC_TABLE_MAX_GROUP_SIZE (1 << 16)
struct mod_hdr_key {
int num_actions;
@@ -263,10 +263,21 @@ mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv,
}
if (IS_ERR_OR_NULL(priv->fs.tc.t)) {
+ int tc_grp_size, tc_tbl_size;
+ u32 max_flow_counter;
+
+ max_flow_counter = (MLX5_CAP_GEN(dev, max_flow_counter_31_16) << 16) |
+ MLX5_CAP_GEN(dev, max_flow_counter_15_0);
+
+ tc_grp_size = min_t(int, max_flow_counter, MLX5E_TC_TABLE_MAX_GROUP_SIZE);
+
+ tc_tbl_size = min_t(int, tc_grp_size * MLX5E_TC_TABLE_NUM_GROUPS,
+ BIT(MLX5_CAP_FLOWTABLE_NIC_RX(dev, log_max_ft_size)));
+
priv->fs.tc.t =
mlx5_create_auto_grouped_flow_table(priv->fs.ns,
MLX5E_TC_PRIO,
- MLX5E_TC_TABLE_NUM_ENTRIES,
+ tc_tbl_size,
MLX5E_TC_TABLE_NUM_GROUPS,
0, 0);
if (IS_ERR(priv->fs.tc.t)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index 1d6925d..569b42a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -32,9 +32,11 @@
#include <linux/tcp.h>
#include <linux/if_vlan.h>
+#include <net/dsfield.h>
#include "en.h"
#include "ipoib/ipoib.h"
#include "en_accel/ipsec_rxtx.h"
+#include "lib/clock.h"
#define MLX5E_SQ_NOPS_ROOM MLX5_SEND_WQE_MAX_WQEBBS
#define MLX5E_SQ_STOP_ROOM (MLX5_SEND_WQE_MAX_WQEBBS +\
@@ -85,6 +87,20 @@ static void mlx5e_dma_unmap_wqe_err(struct mlx5e_txqsq *sq, u8 num_dma)
}
}
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+static inline int mlx5e_get_dscp_up(struct mlx5e_priv *priv, struct sk_buff *skb)
+{
+ int dscp_cp = 0;
+
+ if (skb->protocol == htons(ETH_P_IP))
+ dscp_cp = ipv4_get_dsfield(ip_hdr(skb)) >> 2;
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ dscp_cp = ipv6_get_dsfield(ipv6_hdr(skb)) >> 2;
+
+ return priv->dcbx_dp.dscp2prio[dscp_cp];
+}
+#endif
+
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback)
{
@@ -96,8 +112,13 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
if (!netdev_get_num_tc(dev))
return channel_ix;
- if (skb_vlan_tag_present(skb))
- up = skb->vlan_tci >> VLAN_PRIO_SHIFT;
+#ifdef CONFIG_MLX5_CORE_EN_DCB
+ if (priv->dcbx_dp.trust_state == MLX5_QPTS_TRUST_DSCP)
+ up = mlx5e_get_dscp_up(priv, skb);
+ else
+#endif
+ if (skb_vlan_tag_present(skb))
+ up = skb->vlan_tci >> VLAN_PRIO_SHIFT;
/* channel_ix can be larger than num_channels since
* dev->num_real_tx_queues = num_channels * num_tc
@@ -340,6 +361,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
if (skb_vlan_tag_present(skb)) {
mlx5e_insert_vlan(eseg->inline_hdr.start, skb, ihs, &skb_data, &skb_len);
ihs += VLAN_HLEN;
+ sq->stats.added_vlan_packets++;
} else {
memcpy(eseg->inline_hdr.start, skb_data, ihs);
mlx5e_tx_skb_pull_inline(&skb_data, &skb_len, ihs);
@@ -348,7 +370,10 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_txqsq *sq, struct sk_buff *skb,
ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr.start), MLX5_SEND_WQE_DS);
} else if (skb_vlan_tag_present(skb)) {
eseg->insert.type = cpu_to_be16(MLX5_ETH_WQE_INSERT_VLAN);
+ if (skb->vlan_proto == cpu_to_be16(ETH_P_8021AD))
+ eseg->insert.type |= cpu_to_be16(MLX5_ETH_WQE_SVLAN);
eseg->insert.vlan_tci = cpu_to_be16(skb_vlan_tag_get(skb));
+ sq->stats.added_vlan_packets++;
}
headlen = skb_len - skb->data_len;
@@ -452,8 +477,9 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
SKBTX_HW_TSTAMP)) {
struct skb_shared_hwtstamps hwts = {};
- mlx5e_fill_hwstamp(sq->tstamp,
- get_cqe_ts(cqe), &hwts);
+ hwts.hwtstamp =
+ mlx5_timecounter_cyc2time(sq->clock,
+ get_cqe_ts(cqe));
skb_tstamp_tx(skb, &hwts);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index fc606bf..6077186 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -491,8 +491,7 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
break;
case MLX5_EVENT_TYPE_PPS_EVENT:
- if (dev->event)
- dev->event(dev, MLX5_DEV_EVENT_PPS, (unsigned long)eqe);
+ mlx5_pps_event(dev, eqe);
break;
case MLX5_EVENT_TYPE_FPGA_ERROR:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index c77f4c0..bbb140f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -157,7 +157,7 @@ __esw_fdb_set_vport_rule(struct mlx5_eswitch *esw, u32 vport, bool rx_rule,
MLX5_MATCH_OUTER_HEADERS);
struct mlx5_flow_handle *flow_rule = NULL;
struct mlx5_flow_act flow_act = {0};
- struct mlx5_flow_destination dest;
+ struct mlx5_flow_destination dest = {};
struct mlx5_flow_spec *spec;
void *mv_misc = NULL;
void *mc_misc = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index d9fd857..1143d80 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -306,7 +306,7 @@ static struct mlx5_flow_handle *
mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn)
{
struct mlx5_flow_act flow_act = {0};
- struct mlx5_flow_destination dest;
+ struct mlx5_flow_destination dest = {};
struct mlx5_flow_handle *flow_rule;
struct mlx5_flow_spec *spec;
void *misc;
@@ -395,7 +395,7 @@ out_err:
static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw)
{
struct mlx5_flow_act flow_act = {0};
- struct mlx5_flow_destination dest;
+ struct mlx5_flow_destination dest = {};
struct mlx5_flow_handle *flow_rule = NULL;
struct mlx5_flow_spec *spec;
int err = 0;
@@ -670,7 +670,7 @@ struct mlx5_flow_handle *
mlx5_eswitch_create_vport_rx_rule(struct mlx5_eswitch *esw, int vport, u32 tirn)
{
struct mlx5_flow_act flow_act = {0};
- struct mlx5_flow_destination dest;
+ struct mlx5_flow_destination dest = {};
struct mlx5_flow_handle *flow_rule;
struct mlx5_flow_spec *spec;
void *misc;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 36ecc2b..881e2e5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -40,7 +40,8 @@
#include "eswitch.h"
int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
- struct mlx5_flow_table *ft, u32 underlay_qpn)
+ struct mlx5_flow_table *ft, u32 underlay_qpn,
+ bool disconnect)
{
u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {0};
u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {0};
@@ -52,7 +53,15 @@ int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
MLX5_SET(set_flow_table_root_in, in, opcode,
MLX5_CMD_OP_SET_FLOW_TABLE_ROOT);
MLX5_SET(set_flow_table_root_in, in, table_type, ft->type);
- MLX5_SET(set_flow_table_root_in, in, table_id, ft->id);
+
+ if (disconnect) {
+ MLX5_SET(set_flow_table_root_in, in, op_mod, 1);
+ MLX5_SET(set_flow_table_root_in, in, table_id, 0);
+ } else {
+ MLX5_SET(set_flow_table_root_in, in, op_mod, 0);
+ MLX5_SET(set_flow_table_root_in, in, table_id, ft->id);
+ }
+
MLX5_SET(set_flow_table_root_in, in, underlay_qpn, underlay_qpn);
if (ft->vport) {
MLX5_SET(set_flow_table_root_in, in, vport_number, ft->vport);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
index c6d7bdf..71e2d0f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.h
@@ -71,8 +71,8 @@ int mlx5_cmd_delete_fte(struct mlx5_core_dev *dev,
unsigned int index);
int mlx5_cmd_update_root_ft(struct mlx5_core_dev *dev,
- struct mlx5_flow_table *ft,
- u32 underlay_qpn);
+ struct mlx5_flow_table *ft, u32 underlay_qpn,
+ bool disconnect);
int mlx5_cmd_fc_alloc(struct mlx5_core_dev *dev, u32 *id);
int mlx5_cmd_fc_free(struct mlx5_core_dev *dev, u32 id);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 5a7bea6..c70fd66 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -145,10 +145,10 @@ static struct init_tree_node {
}
};
-enum fs_i_mutex_lock_class {
- FS_MUTEX_GRANDPARENT,
- FS_MUTEX_PARENT,
- FS_MUTEX_CHILD
+enum fs_i_lock_class {
+ FS_LOCK_GRANDPARENT,
+ FS_LOCK_PARENT,
+ FS_LOCK_CHILD
};
static const struct rhashtable_params rhash_fte = {
@@ -168,10 +168,16 @@ static const struct rhashtable_params rhash_fg = {
};
-static void del_rule(struct fs_node *node);
-static void del_flow_table(struct fs_node *node);
-static void del_flow_group(struct fs_node *node);
-static void del_fte(struct fs_node *node);
+static void del_hw_flow_table(struct fs_node *node);
+static void del_hw_flow_group(struct fs_node *node);
+static void del_hw_fte(struct fs_node *node);
+static void del_sw_flow_table(struct fs_node *node);
+static void del_sw_flow_group(struct fs_node *node);
+static void del_sw_fte(struct fs_node *node);
+/* Delete rule (destination) is special case that
+ * requires to lock the FTE for all the deletion process.
+ */
+static void del_sw_hw_rule(struct fs_node *node);
static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
struct mlx5_flow_destination *d2);
static struct mlx5_flow_rule *
@@ -179,20 +185,22 @@ find_flow_rule(struct fs_fte *fte,
struct mlx5_flow_destination *dest);
static void tree_init_node(struct fs_node *node,
- unsigned int refcount,
- void (*remove_func)(struct fs_node *))
+ void (*del_hw_func)(struct fs_node *),
+ void (*del_sw_func)(struct fs_node *))
{
- atomic_set(&node->refcount, refcount);
+ refcount_set(&node->refcount, 1);
INIT_LIST_HEAD(&node->list);
INIT_LIST_HEAD(&node->children);
- mutex_init(&node->lock);
- node->remove_func = remove_func;
+ init_rwsem(&node->lock);
+ node->del_hw_func = del_hw_func;
+ node->del_sw_func = del_sw_func;
+ node->active = false;
}
static void tree_add_node(struct fs_node *node, struct fs_node *parent)
{
if (parent)
- atomic_inc(&parent->refcount);
+ refcount_inc(&parent->refcount);
node->parent = parent;
/* Parent is the root */
@@ -202,58 +210,78 @@ static void tree_add_node(struct fs_node *node, struct fs_node *parent)
node->root = parent->root;
}
-static void tree_get_node(struct fs_node *node)
+static int tree_get_node(struct fs_node *node)
{
- atomic_inc(&node->refcount);
+ return refcount_inc_not_zero(&node->refcount);
}
-static void nested_lock_ref_node(struct fs_node *node,
- enum fs_i_mutex_lock_class class)
+static void nested_down_read_ref_node(struct fs_node *node,
+ enum fs_i_lock_class class)
{
if (node) {
- mutex_lock_nested(&node->lock, class);
- atomic_inc(&node->refcount);
+ down_read_nested(&node->lock, class);
+ refcount_inc(&node->refcount);
}
}
-static void lock_ref_node(struct fs_node *node)
+static void nested_down_write_ref_node(struct fs_node *node,
+ enum fs_i_lock_class class)
{
if (node) {
- mutex_lock(&node->lock);
- atomic_inc(&node->refcount);
+ down_write_nested(&node->lock, class);
+ refcount_inc(&node->refcount);
}
}
-static void unlock_ref_node(struct fs_node *node)
+static void down_write_ref_node(struct fs_node *node)
{
if (node) {
- atomic_dec(&node->refcount);
- mutex_unlock(&node->lock);
+ down_write(&node->lock);
+ refcount_inc(&node->refcount);
}
}
+static void up_read_ref_node(struct fs_node *node)
+{
+ refcount_dec(&node->refcount);
+ up_read(&node->lock);
+}
+
+static void up_write_ref_node(struct fs_node *node)
+{
+ refcount_dec(&node->refcount);
+ up_write(&node->lock);
+}
+
static void tree_put_node(struct fs_node *node)
{
struct fs_node *parent_node = node->parent;
- lock_ref_node(parent_node);
- if (atomic_dec_and_test(&node->refcount)) {
- if (parent_node)
+ if (refcount_dec_and_test(&node->refcount)) {
+ if (node->del_hw_func)
+ node->del_hw_func(node);
+ if (parent_node) {
+ /* Only root namespace doesn't have parent and we just
+ * need to free its node.
+ */
+ down_write_ref_node(parent_node);
list_del_init(&node->list);
- if (node->remove_func)
- node->remove_func(node);
- kfree(node);
+ if (node->del_sw_func)
+ node->del_sw_func(node);
+ up_write_ref_node(parent_node);
+ } else {
+ kfree(node);
+ }
node = NULL;
}
- unlock_ref_node(parent_node);
if (!node && parent_node)
tree_put_node(parent_node);
}
static int tree_remove_node(struct fs_node *node)
{
- if (atomic_read(&node->refcount) > 1) {
- atomic_dec(&node->refcount);
+ if (refcount_read(&node->refcount) > 1) {
+ refcount_dec(&node->refcount);
return -EEXIST;
}
tree_put_node(node);
@@ -362,6 +390,15 @@ static struct mlx5_flow_root_namespace *find_root(struct fs_node *node)
return container_of(ns, struct mlx5_flow_root_namespace, ns);
}
+static inline struct mlx5_flow_steering *get_steering(struct fs_node *node)
+{
+ struct mlx5_flow_root_namespace *root = find_root(node);
+
+ if (root)
+ return root->dev->priv.steering;
+ return NULL;
+}
+
static inline struct mlx5_core_dev *get_dev(struct fs_node *node)
{
struct mlx5_flow_root_namespace *root = find_root(node);
@@ -371,26 +408,36 @@ static inline struct mlx5_core_dev *get_dev(struct fs_node *node)
return NULL;
}
-static void del_flow_table(struct fs_node *node)
+static void del_hw_flow_table(struct fs_node *node)
{
struct mlx5_flow_table *ft;
struct mlx5_core_dev *dev;
- struct fs_prio *prio;
int err;
fs_get_obj(ft, node);
dev = get_dev(&ft->node);
- err = mlx5_cmd_destroy_flow_table(dev, ft);
- if (err)
- mlx5_core_warn(dev, "flow steering can't destroy ft\n");
- ida_destroy(&ft->fte_allocator);
+ if (node->active) {
+ err = mlx5_cmd_destroy_flow_table(dev, ft);
+ if (err)
+ mlx5_core_warn(dev, "flow steering can't destroy ft\n");
+ }
+}
+
+static void del_sw_flow_table(struct fs_node *node)
+{
+ struct mlx5_flow_table *ft;
+ struct fs_prio *prio;
+
+ fs_get_obj(ft, node);
+
rhltable_destroy(&ft->fgs_hash);
fs_get_obj(prio, ft->node.parent);
prio->num_ft--;
+ kfree(ft);
}
-static void del_rule(struct fs_node *node)
+static void del_sw_hw_rule(struct fs_node *node)
{
struct mlx5_flow_rule *rule;
struct mlx5_flow_table *ft;
@@ -406,7 +453,6 @@ static void del_rule(struct fs_node *node)
fs_get_obj(fg, fte->node.parent);
fs_get_obj(ft, fg->node.parent);
trace_mlx5_fs_del_rule(rule);
- list_del(&rule->node.list);
if (rule->sw_action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
mutex_lock(&rule->dest_attr.ft->lock);
list_del(&rule->next_ft);
@@ -434,117 +480,203 @@ out:
"%s can't del rule fg id=%d fte_index=%d\n",
__func__, fg->id, fte->index);
}
+ kfree(rule);
}
-static void destroy_fte(struct fs_fte *fte, struct mlx5_flow_group *fg)
+static void del_hw_fte(struct fs_node *node)
{
struct mlx5_flow_table *ft;
- int ret;
+ struct mlx5_flow_group *fg;
+ struct mlx5_core_dev *dev;
+ struct fs_fte *fte;
+ int err;
- ret = rhashtable_remove_fast(&fg->ftes_hash, &fte->hash, rhash_fte);
- WARN_ON(ret);
- fte->status = 0;
+ fs_get_obj(fte, node);
+ fs_get_obj(fg, fte->node.parent);
fs_get_obj(ft, fg->node.parent);
- ida_simple_remove(&ft->fte_allocator, fte->index);
+
+ trace_mlx5_fs_del_fte(fte);
+ dev = get_dev(&ft->node);
+ if (node->active) {
+ err = mlx5_cmd_delete_fte(dev, ft,
+ fte->index);
+ if (err)
+ mlx5_core_warn(dev,
+ "flow steering can't delete fte in index %d of flow group id %d\n",
+ fte->index, fg->id);
+ }
}
-static void del_fte(struct fs_node *node)
+static void del_sw_fte(struct fs_node *node)
{
- struct mlx5_flow_table *ft;
+ struct mlx5_flow_steering *steering = get_steering(node);
struct mlx5_flow_group *fg;
- struct mlx5_core_dev *dev;
struct fs_fte *fte;
int err;
fs_get_obj(fte, node);
fs_get_obj(fg, fte->node.parent);
- fs_get_obj(ft, fg->node.parent);
- trace_mlx5_fs_del_fte(fte);
- dev = get_dev(&ft->node);
- err = mlx5_cmd_delete_fte(dev, ft,
- fte->index);
- if (err)
- mlx5_core_warn(dev,
- "flow steering can't delete fte in index %d of flow group id %d\n",
- fte->index, fg->id);
-
- destroy_fte(fte, fg);
+ err = rhashtable_remove_fast(&fg->ftes_hash,
+ &fte->hash,
+ rhash_fte);
+ WARN_ON(err);
+ ida_simple_remove(&fg->fte_allocator, fte->index - fg->start_index);
+ kmem_cache_free(steering->ftes_cache, fte);
}
-static void del_flow_group(struct fs_node *node)
+static void del_hw_flow_group(struct fs_node *node)
{
struct mlx5_flow_group *fg;
struct mlx5_flow_table *ft;
struct mlx5_core_dev *dev;
- int err;
fs_get_obj(fg, node);
fs_get_obj(ft, fg->node.parent);
dev = get_dev(&ft->node);
trace_mlx5_fs_del_fg(fg);
- if (ft->autogroup.active)
- ft->autogroup.num_groups--;
+ if (fg->node.active && mlx5_cmd_destroy_flow_group(dev, ft, fg->id))
+ mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n",
+ fg->id, ft->id);
+}
+
+static void del_sw_flow_group(struct fs_node *node)
+{
+ struct mlx5_flow_steering *steering = get_steering(node);
+ struct mlx5_flow_group *fg;
+ struct mlx5_flow_table *ft;
+ int err;
+
+ fs_get_obj(fg, node);
+ fs_get_obj(ft, fg->node.parent);
rhashtable_destroy(&fg->ftes_hash);
+ ida_destroy(&fg->fte_allocator);
+ if (ft->autogroup.active)
+ ft->autogroup.num_groups--;
err = rhltable_remove(&ft->fgs_hash,
&fg->hash,
rhash_fg);
WARN_ON(err);
- if (mlx5_cmd_destroy_flow_group(dev, ft, fg->id))
- mlx5_core_warn(dev, "flow steering can't destroy fg %d of ft %d\n",
- fg->id, ft->id);
+ kmem_cache_free(steering->fgs_cache, fg);
+}
+
+static int insert_fte(struct mlx5_flow_group *fg, struct fs_fte *fte)
+{
+ int index;
+ int ret;
+
+ index = ida_simple_get(&fg->fte_allocator, 0, fg->max_ftes, GFP_KERNEL);
+ if (index < 0)
+ return index;
+
+ fte->index = index + fg->start_index;
+ ret = rhashtable_insert_fast(&fg->ftes_hash,
+ &fte->hash,
+ rhash_fte);
+ if (ret)
+ goto err_ida_remove;
+
+ tree_add_node(&fte->node, &fg->node);
+ list_add_tail(&fte->node.list, &fg->node.children);
+ return 0;
+
+err_ida_remove:
+ ida_simple_remove(&fg->fte_allocator, index);
+ return ret;
}
-static struct fs_fte *alloc_fte(struct mlx5_flow_act *flow_act,
+static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft,
u32 *match_value,
- unsigned int index)
+ struct mlx5_flow_act *flow_act)
{
+ struct mlx5_flow_steering *steering = get_steering(&ft->node);
struct fs_fte *fte;
- fte = kzalloc(sizeof(*fte), GFP_KERNEL);
+ fte = kmem_cache_zalloc(steering->ftes_cache, GFP_KERNEL);
if (!fte)
return ERR_PTR(-ENOMEM);
memcpy(fte->val, match_value, sizeof(fte->val));
fte->node.type = FS_TYPE_FLOW_ENTRY;
fte->flow_tag = flow_act->flow_tag;
- fte->index = index;
fte->action = flow_act->action;
fte->encap_id = flow_act->encap_id;
fte->modify_id = flow_act->modify_id;
+ tree_init_node(&fte->node, del_hw_fte, del_sw_fte);
+
return fte;
}
-static struct mlx5_flow_group *alloc_flow_group(u32 *create_fg_in)
+static void dealloc_flow_group(struct mlx5_flow_steering *steering,
+ struct mlx5_flow_group *fg)
+{
+ rhashtable_destroy(&fg->ftes_hash);
+ kmem_cache_free(steering->fgs_cache, fg);
+}
+
+static struct mlx5_flow_group *alloc_flow_group(struct mlx5_flow_steering *steering,
+ u8 match_criteria_enable,
+ void *match_criteria,
+ int start_index,
+ int end_index)
{
struct mlx5_flow_group *fg;
- void *match_criteria = MLX5_ADDR_OF(create_flow_group_in,
- create_fg_in, match_criteria);
- u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
- create_fg_in,
- match_criteria_enable);
int ret;
- fg = kzalloc(sizeof(*fg), GFP_KERNEL);
+ fg = kmem_cache_zalloc(steering->fgs_cache, GFP_KERNEL);
if (!fg)
return ERR_PTR(-ENOMEM);
ret = rhashtable_init(&fg->ftes_hash, &rhash_fte);
if (ret) {
- kfree(fg);
+ kmem_cache_free(steering->fgs_cache, fg);
return ERR_PTR(ret);
- }
+}
+ ida_init(&fg->fte_allocator);
fg->mask.match_criteria_enable = match_criteria_enable;
memcpy(&fg->mask.match_criteria, match_criteria,
sizeof(fg->mask.match_criteria));
fg->node.type = FS_TYPE_FLOW_GROUP;
- fg->start_index = MLX5_GET(create_flow_group_in, create_fg_in,
- start_flow_index);
- fg->max_ftes = MLX5_GET(create_flow_group_in, create_fg_in,
- end_flow_index) - fg->start_index + 1;
+ fg->start_index = start_index;
+ fg->max_ftes = end_index - start_index + 1;
+
+ return fg;
+}
+
+static struct mlx5_flow_group *alloc_insert_flow_group(struct mlx5_flow_table *ft,
+ u8 match_criteria_enable,
+ void *match_criteria,
+ int start_index,
+ int end_index,
+ struct list_head *prev)
+{
+ struct mlx5_flow_steering *steering = get_steering(&ft->node);
+ struct mlx5_flow_group *fg;
+ int ret;
+
+ fg = alloc_flow_group(steering, match_criteria_enable, match_criteria,
+ start_index, end_index);
+ if (IS_ERR(fg))
+ return fg;
+
+ /* initialize refcnt, add to parent list */
+ ret = rhltable_insert(&ft->fgs_hash,
+ &fg->hash,
+ rhash_fg);
+ if (ret) {
+ dealloc_flow_group(steering, fg);
+ return ERR_PTR(ret);
+ }
+
+ tree_init_node(&fg->node, del_hw_flow_group, del_sw_flow_group);
+ tree_add_node(&fg->node, &ft->node);
+ /* Add node to group list */
+ list_add(&fg->node.list, prev);
+ atomic_inc(&ft->node.version);
+
return fg;
}
@@ -575,7 +707,6 @@ static struct mlx5_flow_table *alloc_flow_table(int level, u16 vport, int max_ft
ft->flags = flags;
INIT_LIST_HEAD(&ft->fwd_rules);
mutex_init(&ft->lock);
- ida_init(&ft->fte_allocator);
return ft;
}
@@ -693,8 +824,10 @@ static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
*prio)
{
struct mlx5_flow_root_namespace *root = find_root(&prio->node);
+ struct mlx5_ft_underlay_qp *uqp;
int min_level = INT_MAX;
int err;
+ u32 qpn;
if (root->root_ft)
min_level = root->root_ft->level;
@@ -702,10 +835,24 @@ static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio
if (ft->level >= min_level)
return 0;
- err = mlx5_cmd_update_root_ft(root->dev, ft, root->underlay_qpn);
+ if (list_empty(&root->underlay_qpns)) {
+ /* Don't set any QPN (zero) in case QPN list is empty */
+ qpn = 0;
+ err = mlx5_cmd_update_root_ft(root->dev, ft, qpn, false);
+ } else {
+ list_for_each_entry(uqp, &root->underlay_qpns, list) {
+ qpn = uqp->qpn;
+ err = mlx5_cmd_update_root_ft(root->dev, ft, qpn,
+ false);
+ if (err)
+ break;
+ }
+ }
+
if (err)
- mlx5_core_warn(root->dev, "Update root flow table of id=%u failed\n",
- ft->id);
+ mlx5_core_warn(root->dev,
+ "Update root flow table of id(%u) qpn(%d) failed\n",
+ ft->id, qpn);
else
root->root_ft = ft;
@@ -724,7 +871,7 @@ static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
fs_get_obj(fte, rule->node.parent);
if (!(fte->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST))
return -EINVAL;
- lock_ref_node(&fte->node);
+ down_write_ref_node(&fte->node);
fs_get_obj(fg, fte->node.parent);
fs_get_obj(ft, fg->node.parent);
@@ -733,7 +880,7 @@ static int _mlx5_modify_rule_destination(struct mlx5_flow_rule *rule,
ft, fg->id,
modify_mask,
fte);
- unlock_ref_node(&fte->node);
+ up_write_ref_node(&fte->node);
return err;
}
@@ -765,7 +912,7 @@ static int connect_fwd_rules(struct mlx5_core_dev *dev,
struct mlx5_flow_table *new_next_ft,
struct mlx5_flow_table *old_next_ft)
{
- struct mlx5_flow_destination dest;
+ struct mlx5_flow_destination dest = {};
struct mlx5_flow_rule *iter;
int err = 0;
@@ -870,7 +1017,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
goto unlock_root;
}
- tree_init_node(&ft->node, 1, del_flow_table);
+ tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table);
log_table_sz = ft->max_fte ? ilog2(ft->max_fte) : 0;
next_ft = find_next_chained_ft(fs_prio);
err = mlx5_cmd_create_flow_table(root->dev, ft->vport, ft->op_mod, ft->type,
@@ -882,17 +1029,17 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa
err = connect_flow_table(root->dev, ft, fs_prio);
if (err)
goto destroy_ft;
- lock_ref_node(&fs_prio->node);
+ ft->node.active = true;
+ down_write_ref_node(&fs_prio->node);
tree_add_node(&ft->node, &fs_prio->node);
list_add_flow_table(ft, fs_prio);
fs_prio->num_ft++;
- unlock_ref_node(&fs_prio->node);
+ up_write_ref_node(&fs_prio->node);
mutex_unlock(&root->chain_lock);
return ft;
destroy_ft:
mlx5_cmd_destroy_flow_table(root->dev, ft);
free_ft:
- ida_destroy(&ft->fte_allocator);
kfree(ft);
unlock_root:
mutex_unlock(&root->chain_lock);
@@ -960,54 +1107,6 @@ mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
}
EXPORT_SYMBOL(mlx5_create_auto_grouped_flow_table);
-/* Flow table should be locked */
-static struct mlx5_flow_group *create_flow_group_common(struct mlx5_flow_table *ft,
- u32 *fg_in,
- struct list_head
- *prev_fg,
- bool is_auto_fg)
-{
- struct mlx5_flow_group *fg;
- struct mlx5_core_dev *dev = get_dev(&ft->node);
- int err;
-
- if (!dev)
- return ERR_PTR(-ENODEV);
-
- fg = alloc_flow_group(fg_in);
- if (IS_ERR(fg))
- return fg;
-
- err = rhltable_insert(&ft->fgs_hash, &fg->hash, rhash_fg);
- if (err)
- goto err_free_fg;
-
- err = mlx5_cmd_create_flow_group(dev, ft, fg_in, &fg->id);
- if (err)
- goto err_remove_fg;
-
- if (ft->autogroup.active)
- ft->autogroup.num_groups++;
- /* Add node to tree */
- tree_init_node(&fg->node, !is_auto_fg, del_flow_group);
- tree_add_node(&fg->node, &ft->node);
- /* Add node to group list */
- list_add(&fg->node.list, prev_fg);
-
- trace_mlx5_fs_add_fg(fg);
- return fg;
-
-err_remove_fg:
- WARN_ON(rhltable_remove(&ft->fgs_hash,
- &fg->hash,
- rhash_fg));
-err_free_fg:
- rhashtable_destroy(&fg->ftes_hash);
- kfree(fg);
-
- return ERR_PTR(err);
-}
-
struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
u32 *fg_in)
{
@@ -1016,7 +1115,13 @@ struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
u8 match_criteria_enable = MLX5_GET(create_flow_group_in,
fg_in,
match_criteria_enable);
+ int start_index = MLX5_GET(create_flow_group_in, fg_in,
+ start_flow_index);
+ int end_index = MLX5_GET(create_flow_group_in, fg_in,
+ end_flow_index);
+ struct mlx5_core_dev *dev = get_dev(&ft->node);
struct mlx5_flow_group *fg;
+ int err;
if (!check_valid_mask(match_criteria_enable, match_criteria))
return ERR_PTR(-EINVAL);
@@ -1024,9 +1129,21 @@ struct mlx5_flow_group *mlx5_create_flow_group(struct mlx5_flow_table *ft,
if (ft->autogroup.active)
return ERR_PTR(-EPERM);
- lock_ref_node(&ft->node);
- fg = create_flow_group_common(ft, fg_in, ft->node.children.prev, false);
- unlock_ref_node(&ft->node);
+ down_write_ref_node(&ft->node);
+ fg = alloc_insert_flow_group(ft, match_criteria_enable, match_criteria,
+ start_index, end_index,
+ ft->node.children.prev);
+ up_write_ref_node(&ft->node);
+ if (IS_ERR(fg))
+ return fg;
+
+ err = mlx5_cmd_create_flow_group(dev, ft, fg_in, &fg->id);
+ if (err) {
+ tree_put_node(&fg->node);
+ return ERR_PTR(err);
+ }
+ trace_mlx5_fs_add_fg(fg);
+ fg->node.active = true;
return fg;
}
@@ -1067,7 +1184,7 @@ static void destroy_flow_handle(struct fs_fte *fte,
int i)
{
for (; --i >= 0;) {
- if (atomic_dec_and_test(&handle->rule[i]->node.refcount)) {
+ if (refcount_dec_and_test(&handle->rule[i]->node.refcount)) {
fte->dests_size--;
list_del(&handle->rule[i]->node.list);
kfree(handle->rule[i]);
@@ -1098,7 +1215,7 @@ create_flow_handle(struct fs_fte *fte,
if (dest) {
rule = find_flow_rule(fte, dest + i);
if (rule) {
- atomic_inc(&rule->node.refcount);
+ refcount_inc(&rule->node.refcount);
goto rule_found;
}
}
@@ -1111,7 +1228,7 @@ create_flow_handle(struct fs_fte *fte,
/* Add dest to dests list- we need flow tables to be in the
* end of the list for forward to next prio rules.
*/
- tree_init_node(&rule->node, 1, del_rule);
+ tree_init_node(&rule->node, NULL, del_sw_hw_rule);
if (dest &&
dest[i].type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE)
list_add(&rule->node.list, &fte->node.children);
@@ -1167,7 +1284,9 @@ add_rule_fte(struct fs_fte *fte,
if (err)
goto free_handle;
+ fte->node.active = true;
fte->status |= FS_FTE_STATUS_EXISTING;
+ atomic_inc(&fte->node.version);
out:
return handle;
@@ -1177,59 +1296,17 @@ free_handle:
return ERR_PTR(err);
}
-static struct fs_fte *create_fte(struct mlx5_flow_group *fg,
- u32 *match_value,
- struct mlx5_flow_act *flow_act)
-{
- struct mlx5_flow_table *ft;
- struct fs_fte *fte;
- int index;
- int ret;
-
- fs_get_obj(ft, fg->node.parent);
- index = ida_simple_get(&ft->fte_allocator, fg->start_index,
- fg->start_index + fg->max_ftes,
- GFP_KERNEL);
- if (index < 0)
- return ERR_PTR(index);
-
- fte = alloc_fte(flow_act, match_value, index);
- if (IS_ERR(fte)) {
- ret = PTR_ERR(fte);
- goto err_alloc;
- }
- ret = rhashtable_insert_fast(&fg->ftes_hash, &fte->hash, rhash_fte);
- if (ret)
- goto err_hash;
-
- return fte;
-
-err_hash:
- kfree(fte);
-err_alloc:
- ida_simple_remove(&ft->fte_allocator, index);
- return ERR_PTR(ret);
-}
-
-static struct mlx5_flow_group *create_autogroup(struct mlx5_flow_table *ft,
- u8 match_criteria_enable,
- u32 *match_criteria)
+static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft,
+ struct mlx5_flow_spec *spec)
{
- int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
struct list_head *prev = &ft->node.children;
- unsigned int candidate_index = 0;
struct mlx5_flow_group *fg;
- void *match_criteria_addr;
+ unsigned int candidate_index = 0;
unsigned int group_size = 0;
- u32 *in;
if (!ft->autogroup.active)
return ERR_PTR(-ENOENT);
- in = kvzalloc(inlen, GFP_KERNEL);
- if (!in)
- return ERR_PTR(-ENOMEM);
-
if (ft->autogroup.num_groups < ft->autogroup.required_groups)
/* We save place for flow groups in addition to max types */
group_size = ft->max_fte / (ft->autogroup.required_groups + 1);
@@ -1247,25 +1324,55 @@ static struct mlx5_flow_group *create_autogroup(struct mlx5_flow_table *ft,
prev = &fg->node.list;
}
- if (candidate_index + group_size > ft->max_fte) {
- fg = ERR_PTR(-ENOSPC);
+ if (candidate_index + group_size > ft->max_fte)
+ return ERR_PTR(-ENOSPC);
+
+ fg = alloc_insert_flow_group(ft,
+ spec->match_criteria_enable,
+ spec->match_criteria,
+ candidate_index,
+ candidate_index + group_size - 1,
+ prev);
+ if (IS_ERR(fg))
goto out;
- }
+
+ ft->autogroup.num_groups++;
+
+out:
+ return fg;
+}
+
+static int create_auto_flow_group(struct mlx5_flow_table *ft,
+ struct mlx5_flow_group *fg)
+{
+ struct mlx5_core_dev *dev = get_dev(&ft->node);
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ void *match_criteria_addr;
+ int err;
+ u32 *in;
+
+ in = kvzalloc(inlen, GFP_KERNEL);
+ if (!in)
+ return -ENOMEM;
MLX5_SET(create_flow_group_in, in, match_criteria_enable,
- match_criteria_enable);
- MLX5_SET(create_flow_group_in, in, start_flow_index, candidate_index);
- MLX5_SET(create_flow_group_in, in, end_flow_index, candidate_index +
- group_size - 1);
+ fg->mask.match_criteria_enable);
+ MLX5_SET(create_flow_group_in, in, start_flow_index, fg->start_index);
+ MLX5_SET(create_flow_group_in, in, end_flow_index, fg->start_index +
+ fg->max_ftes - 1);
match_criteria_addr = MLX5_ADDR_OF(create_flow_group_in,
in, match_criteria);
- memcpy(match_criteria_addr, match_criteria,
- MLX5_ST_SZ_BYTES(fte_match_param));
+ memcpy(match_criteria_addr, fg->mask.match_criteria,
+ sizeof(fg->mask.match_criteria));
+
+ err = mlx5_cmd_create_flow_group(dev, ft, in, &fg->id);
+ if (!err) {
+ fg->node.active = true;
+ trace_mlx5_fs_add_fg(fg);
+ }
- fg = create_flow_group_common(ft, in, prev, true);
-out:
kvfree(in);
- return fg;
+ return err;
}
static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1,
@@ -1340,60 +1447,30 @@ static struct mlx5_flow_handle *add_rule_fg(struct mlx5_flow_group *fg,
struct fs_fte *fte)
{
struct mlx5_flow_handle *handle;
- struct mlx5_flow_table *ft;
+ int old_action;
int i;
+ int ret;
- if (fte) {
- int old_action;
- int ret;
-
- nested_lock_ref_node(&fte->node, FS_MUTEX_CHILD);
- ret = check_conflicting_ftes(fte, flow_act);
- if (ret) {
- handle = ERR_PTR(ret);
- goto unlock_fte;
- }
-
- old_action = fte->action;
- fte->action |= flow_act->action;
- handle = add_rule_fte(fte, fg, dest, dest_num,
- old_action != flow_act->action);
- if (IS_ERR(handle)) {
- fte->action = old_action;
- goto unlock_fte;
- } else {
- trace_mlx5_fs_set_fte(fte, false);
- goto add_rules;
- }
- }
- fs_get_obj(ft, fg->node.parent);
+ ret = check_conflicting_ftes(fte, flow_act);
+ if (ret)
+ return ERR_PTR(ret);
- fte = create_fte(fg, match_value, flow_act);
- if (IS_ERR(fte))
- return (void *)fte;
- tree_init_node(&fte->node, 0, del_fte);
- nested_lock_ref_node(&fte->node, FS_MUTEX_CHILD);
- handle = add_rule_fte(fte, fg, dest, dest_num, false);
+ old_action = fte->action;
+ fte->action |= flow_act->action;
+ handle = add_rule_fte(fte, fg, dest, dest_num,
+ old_action != flow_act->action);
if (IS_ERR(handle)) {
- unlock_ref_node(&fte->node);
- destroy_fte(fte, fg);
- kfree(fte);
+ fte->action = old_action;
return handle;
}
+ trace_mlx5_fs_set_fte(fte, false);
- tree_add_node(&fte->node, &fg->node);
- /* fte list isn't sorted */
- list_add_tail(&fte->node.list, &fg->node.children);
- trace_mlx5_fs_set_fte(fte, true);
-add_rules:
for (i = 0; i < handle->num_rules; i++) {
- if (atomic_read(&handle->rule[i]->node.refcount) == 1) {
+ if (refcount_read(&handle->rule[i]->node.refcount) == 1) {
tree_add_node(&handle->rule[i]->node, &fte->node);
trace_mlx5_fs_add_rule(handle->rule[i]);
}
}
-unlock_fte:
- unlock_ref_node(&fte->node);
return handle;
}
@@ -1441,93 +1518,197 @@ static bool dest_is_valid(struct mlx5_flow_destination *dest,
return true;
}
-static struct mlx5_flow_handle *
-try_add_to_existing_fg(struct mlx5_flow_table *ft,
- struct mlx5_flow_spec *spec,
- struct mlx5_flow_act *flow_act,
- struct mlx5_flow_destination *dest,
- int dest_num)
-{
+struct match_list {
+ struct list_head list;
struct mlx5_flow_group *g;
- struct mlx5_flow_handle *rule = ERR_PTR(-ENOENT);
+};
+
+struct match_list_head {
+ struct list_head list;
+ struct match_list first;
+};
+
+static void free_match_list(struct match_list_head *head)
+{
+ if (!list_empty(&head->list)) {
+ struct match_list *iter, *match_tmp;
+
+ list_del(&head->first.list);
+ tree_put_node(&head->first.g->node);
+ list_for_each_entry_safe(iter, match_tmp, &head->list,
+ list) {
+ tree_put_node(&iter->g->node);
+ list_del(&iter->list);
+ kfree(iter);
+ }
+ }
+}
+
+static int build_match_list(struct match_list_head *match_head,
+ struct mlx5_flow_table *ft,
+ struct mlx5_flow_spec *spec)
+{
struct rhlist_head *tmp, *list;
- struct match_list {
- struct list_head list;
- struct mlx5_flow_group *g;
- } match_list, *iter;
- LIST_HEAD(match_head);
+ struct mlx5_flow_group *g;
+ int err = 0;
rcu_read_lock();
+ INIT_LIST_HEAD(&match_head->list);
/* Collect all fgs which has a matching match_criteria */
list = rhltable_lookup(&ft->fgs_hash, spec, rhash_fg);
+ /* RCU is atomic, we can't execute FW commands here */
rhl_for_each_entry_rcu(g, tmp, list, hash) {
struct match_list *curr_match;
- if (likely(list_empty(&match_head))) {
- match_list.g = g;
- list_add_tail(&match_list.list, &match_head);
+ if (likely(list_empty(&match_head->list))) {
+ if (!tree_get_node(&g->node))
+ continue;
+ match_head->first.g = g;
+ list_add_tail(&match_head->first.list,
+ &match_head->list);
continue;
}
- curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC);
+ curr_match = kmalloc(sizeof(*curr_match), GFP_ATOMIC);
if (!curr_match) {
- rcu_read_unlock();
- rule = ERR_PTR(-ENOMEM);
- goto free_list;
+ free_match_list(match_head);
+ err = -ENOMEM;
+ goto out;
+ }
+ if (!tree_get_node(&g->node)) {
+ kfree(curr_match);
+ continue;
}
curr_match->g = g;
- list_add_tail(&curr_match->list, &match_head);
+ list_add_tail(&curr_match->list, &match_head->list);
}
+out:
rcu_read_unlock();
+ return err;
+}
+
+static u64 matched_fgs_get_version(struct list_head *match_head)
+{
+ struct match_list *iter;
+ u64 version = 0;
+
+ list_for_each_entry(iter, match_head, list)
+ version += (u64)atomic_read(&iter->g->node.version);
+ return version;
+}
+static struct mlx5_flow_handle *
+try_add_to_existing_fg(struct mlx5_flow_table *ft,
+ struct list_head *match_head,
+ struct mlx5_flow_spec *spec,
+ struct mlx5_flow_act *flow_act,
+ struct mlx5_flow_destination *dest,
+ int dest_num,
+ int ft_version)
+{
+ struct mlx5_flow_steering *steering = get_steering(&ft->node);
+ struct mlx5_flow_group *g;
+ struct mlx5_flow_handle *rule;
+ struct match_list *iter;
+ bool take_write = false;
+ struct fs_fte *fte;
+ u64 version;
+ int err;
+
+ fte = alloc_fte(ft, spec->match_value, flow_act);
+ if (IS_ERR(fte))
+ return ERR_PTR(-ENOMEM);
+
+ list_for_each_entry(iter, match_head, list) {
+ nested_down_read_ref_node(&iter->g->node, FS_LOCK_PARENT);
+ ida_pre_get(&iter->g->fte_allocator, GFP_KERNEL);
+ }
+
+search_again_locked:
+ version = matched_fgs_get_version(match_head);
/* Try to find a fg that already contains a matching fte */
- list_for_each_entry(iter, &match_head, list) {
- struct fs_fte *fte;
+ list_for_each_entry(iter, match_head, list) {
+ struct fs_fte *fte_tmp;
g = iter->g;
- nested_lock_ref_node(&g->node, FS_MUTEX_PARENT);
- fte = rhashtable_lookup_fast(&g->ftes_hash, spec->match_value,
- rhash_fte);
- if (fte) {
- rule = add_rule_fg(g, spec->match_value,
- flow_act, dest, dest_num, fte);
- unlock_ref_node(&g->node);
- goto free_list;
+ fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, spec->match_value,
+ rhash_fte);
+ if (!fte_tmp || !tree_get_node(&fte_tmp->node))
+ continue;
+
+ nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD);
+ if (!take_write) {
+ list_for_each_entry(iter, match_head, list)
+ up_read_ref_node(&iter->g->node);
+ } else {
+ list_for_each_entry(iter, match_head, list)
+ up_write_ref_node(&iter->g->node);
}
- unlock_ref_node(&g->node);
+
+ rule = add_rule_fg(g, spec->match_value,
+ flow_act, dest, dest_num, fte_tmp);
+ up_write_ref_node(&fte_tmp->node);
+ tree_put_node(&fte_tmp->node);
+ kmem_cache_free(steering->ftes_cache, fte);
+ return rule;
}
/* No group with matching fte found. Try to add a new fte to any
* matching fg.
*/
- list_for_each_entry(iter, &match_head, list) {
- g = iter->g;
- nested_lock_ref_node(&g->node, FS_MUTEX_PARENT);
- rule = add_rule_fg(g, spec->match_value,
- flow_act, dest, dest_num, NULL);
- if (!IS_ERR(rule) || PTR_ERR(rule) != -ENOSPC) {
- unlock_ref_node(&g->node);
- goto free_list;
- }
- unlock_ref_node(&g->node);
+ if (!take_write) {
+ list_for_each_entry(iter, match_head, list)
+ up_read_ref_node(&iter->g->node);
+ list_for_each_entry(iter, match_head, list)
+ nested_down_write_ref_node(&iter->g->node,
+ FS_LOCK_PARENT);
+ take_write = true;
}
-free_list:
- if (!list_empty(&match_head)) {
- struct match_list *match_tmp;
+ /* Check the ft version, for case that new flow group
+ * was added while the fgs weren't locked
+ */
+ if (atomic_read(&ft->node.version) != ft_version) {
+ rule = ERR_PTR(-EAGAIN);
+ goto out;
+ }
- /* The most common case is having one FG. Since we want to
- * optimize this case, we save the first on the stack.
- * Therefore, no need to free it.
- */
- list_del(&list_first_entry(&match_head, typeof(*iter), list)->list);
- list_for_each_entry_safe(iter, match_tmp, &match_head, list) {
- list_del(&iter->list);
- kfree(iter);
+ /* Check the fgs version, for case the new FTE with the
+ * same values was added while the fgs weren't locked
+ */
+ if (version != matched_fgs_get_version(match_head))
+ goto search_again_locked;
+
+ list_for_each_entry(iter, match_head, list) {
+ g = iter->g;
+
+ if (!g->node.active)
+ continue;
+ err = insert_fte(g, fte);
+ if (err) {
+ if (err == -ENOSPC)
+ continue;
+ list_for_each_entry(iter, match_head, list)
+ up_write_ref_node(&iter->g->node);
+ kmem_cache_free(steering->ftes_cache, fte);
+ return ERR_PTR(err);
}
- }
+ nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
+ list_for_each_entry(iter, match_head, list)
+ up_write_ref_node(&iter->g->node);
+ rule = add_rule_fg(g, spec->match_value,
+ flow_act, dest, dest_num, fte);
+ up_write_ref_node(&fte->node);
+ tree_put_node(&fte->node);
+ return rule;
+ }
+ rule = ERR_PTR(-ENOENT);
+out:
+ list_for_each_entry(iter, match_head, list)
+ up_write_ref_node(&iter->g->node);
+ kmem_cache_free(steering->ftes_cache, fte);
return rule;
}
@@ -1539,8 +1720,14 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
int dest_num)
{
+ struct mlx5_flow_steering *steering = get_steering(&ft->node);
struct mlx5_flow_group *g;
struct mlx5_flow_handle *rule;
+ struct match_list_head match_head;
+ bool take_write = false;
+ struct fs_fte *fte;
+ int version;
+ int err;
int i;
if (!check_valid_spec(spec))
@@ -1550,33 +1737,73 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
if (!dest_is_valid(&dest[i], flow_act->action, ft))
return ERR_PTR(-EINVAL);
}
+ nested_down_read_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
+search_again_locked:
+ version = atomic_read(&ft->node.version);
+
+ /* Collect all fgs which has a matching match_criteria */
+ err = build_match_list(&match_head, ft, spec);
+ if (err)
+ return ERR_PTR(err);
- nested_lock_ref_node(&ft->node, FS_MUTEX_GRANDPARENT);
- rule = try_add_to_existing_fg(ft, spec, flow_act, dest, dest_num);
- if (!IS_ERR(rule))
- goto unlock;
+ if (!take_write)
+ up_read_ref_node(&ft->node);
- g = create_autogroup(ft, spec->match_criteria_enable,
- spec->match_criteria);
+ rule = try_add_to_existing_fg(ft, &match_head.list, spec, flow_act, dest,
+ dest_num, version);
+ free_match_list(&match_head);
+ if (!IS_ERR(rule) ||
+ (PTR_ERR(rule) != -ENOENT && PTR_ERR(rule) != -EAGAIN))
+ return rule;
+
+ if (!take_write) {
+ nested_down_write_ref_node(&ft->node, FS_LOCK_GRANDPARENT);
+ take_write = true;
+ }
+
+ if (PTR_ERR(rule) == -EAGAIN ||
+ version != atomic_read(&ft->node.version))
+ goto search_again_locked;
+
+ g = alloc_auto_flow_group(ft, spec);
if (IS_ERR(g)) {
rule = (void *)g;
- goto unlock;
+ up_write_ref_node(&ft->node);
+ return rule;
}
- rule = add_rule_fg(g, spec->match_value, flow_act, dest,
- dest_num, NULL);
- if (IS_ERR(rule)) {
- /* Remove assumes refcount > 0 and autogroup creates a group
- * with a refcount = 0.
- */
- unlock_ref_node(&ft->node);
- tree_get_node(&g->node);
- tree_remove_node(&g->node);
- return rule;
+ nested_down_write_ref_node(&g->node, FS_LOCK_PARENT);
+ up_write_ref_node(&ft->node);
+
+ err = create_auto_flow_group(ft, g);
+ if (err)
+ goto err_release_fg;
+
+ fte = alloc_fte(ft, spec->match_value, flow_act);
+ if (IS_ERR(fte)) {
+ err = PTR_ERR(fte);
+ goto err_release_fg;
}
-unlock:
- unlock_ref_node(&ft->node);
+
+ err = insert_fte(g, fte);
+ if (err) {
+ kmem_cache_free(steering->ftes_cache, fte);
+ goto err_release_fg;
+ }
+
+ nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD);
+ up_write_ref_node(&g->node);
+ rule = add_rule_fg(g, spec->match_value, flow_act, dest,
+ dest_num, fte);
+ up_write_ref_node(&fte->node);
+ tree_put_node(&fte->node);
+ tree_put_node(&g->node);
return rule;
+
+err_release_fg:
+ up_write_ref_node(&g->node);
+ tree_put_node(&g->node);
+ return ERR_PTR(err);
}
static bool fwd_next_prio_supported(struct mlx5_flow_table *ft)
@@ -1593,7 +1820,7 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft,
int dest_num)
{
struct mlx5_flow_root_namespace *root = find_root(&ft->node);
- struct mlx5_flow_destination gen_dest;
+ struct mlx5_flow_destination gen_dest = {};
struct mlx5_flow_table *next_ft = NULL;
struct mlx5_flow_handle *handle = NULL;
u32 sw_action = flow_act->action;
@@ -1661,23 +1888,43 @@ static struct mlx5_flow_table *find_next_ft(struct mlx5_flow_table *ft)
static int update_root_ft_destroy(struct mlx5_flow_table *ft)
{
struct mlx5_flow_root_namespace *root = find_root(&ft->node);
+ struct mlx5_ft_underlay_qp *uqp;
struct mlx5_flow_table *new_root_ft = NULL;
+ int err = 0;
+ u32 qpn;
if (root->root_ft != ft)
return 0;
new_root_ft = find_next_ft(ft);
- if (new_root_ft) {
- int err = mlx5_cmd_update_root_ft(root->dev, new_root_ft,
- root->underlay_qpn);
- if (err) {
- mlx5_core_warn(root->dev, "Update root flow table of id=%u failed\n",
- ft->id);
- return err;
+ if (!new_root_ft) {
+ root->root_ft = NULL;
+ return 0;
+ }
+
+ if (list_empty(&root->underlay_qpns)) {
+ /* Don't set any QPN (zero) in case QPN list is empty */
+ qpn = 0;
+ err = mlx5_cmd_update_root_ft(root->dev, new_root_ft, qpn,
+ false);
+ } else {
+ list_for_each_entry(uqp, &root->underlay_qpns, list) {
+ qpn = uqp->qpn;
+ err = mlx5_cmd_update_root_ft(root->dev, new_root_ft,
+ qpn, false);
+ if (err)
+ break;
}
}
- root->root_ft = new_root_ft;
+
+ if (err)
+ mlx5_core_warn(root->dev,
+ "Update root flow table of id(%u) qpn(%d) failed\n",
+ ft->id, qpn);
+ else
+ root->root_ft = new_root_ft;
+
return 0;
}
@@ -1817,7 +2064,7 @@ static struct fs_prio *fs_create_prio(struct mlx5_flow_namespace *ns,
return ERR_PTR(-ENOMEM);
fs_prio->node.type = FS_TYPE_PRIO;
- tree_init_node(&fs_prio->node, 1, NULL);
+ tree_init_node(&fs_prio->node, NULL, NULL);
tree_add_node(&fs_prio->node, &ns->node);
fs_prio->num_levels = num_levels;
fs_prio->prio = prio;
@@ -1843,7 +2090,7 @@ static struct mlx5_flow_namespace *fs_create_namespace(struct fs_prio *prio)
return ERR_PTR(-ENOMEM);
fs_init_namespace(ns);
- tree_init_node(&ns->node, 1, NULL);
+ tree_init_node(&ns->node, NULL, NULL);
tree_add_node(&ns->node, &prio->node);
list_add_tail(&ns->node.list, &prio->node.children);
@@ -1965,10 +2212,12 @@ static struct mlx5_flow_root_namespace *create_root_ns(struct mlx5_flow_steering
root_ns->dev = steering->dev;
root_ns->table_type = table_type;
+ INIT_LIST_HEAD(&root_ns->underlay_qpns);
+
ns = &root_ns->ns;
fs_init_namespace(ns);
mutex_init(&root_ns->chain_lock);
- tree_init_node(&ns->node, 1, NULL);
+ tree_init_node(&ns->node, NULL, NULL);
tree_add_node(&ns->node, NULL);
return root_ns;
@@ -2066,8 +2315,10 @@ static void clean_tree(struct fs_node *node)
struct fs_node *iter;
struct fs_node *temp;
+ tree_get_node(node);
list_for_each_entry_safe(iter, temp, &node->children, list)
clean_tree(iter);
+ tree_put_node(node);
tree_remove_node(node);
}
}
@@ -2091,6 +2342,8 @@ void mlx5_cleanup_fs(struct mlx5_core_dev *dev)
cleanup_root_ns(steering->sniffer_rx_root_ns);
cleanup_root_ns(steering->sniffer_tx_root_ns);
mlx5_cleanup_fc_stats(dev);
+ kmem_cache_destroy(steering->ftes_cache);
+ kmem_cache_destroy(steering->fgs_cache);
kfree(steering);
}
@@ -2196,6 +2449,16 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
steering->dev = dev;
dev->priv.steering = steering;
+ steering->fgs_cache = kmem_cache_create("mlx5_fs_fgs",
+ sizeof(struct mlx5_flow_group), 0,
+ 0, NULL);
+ steering->ftes_cache = kmem_cache_create("mlx5_fs_ftes", sizeof(struct fs_fte), 0,
+ 0, NULL);
+ if (!steering->ftes_cache || !steering->fgs_cache) {
+ err = -ENOMEM;
+ goto err;
+ }
+
if ((((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH) &&
(MLX5_CAP_GEN(dev, nic_flow_table))) ||
((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) &&
@@ -2245,17 +2508,76 @@ err:
int mlx5_fs_add_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
{
struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
+ struct mlx5_ft_underlay_qp *new_uqp;
+ int err = 0;
+
+ new_uqp = kzalloc(sizeof(*new_uqp), GFP_KERNEL);
+ if (!new_uqp)
+ return -ENOMEM;
+
+ mutex_lock(&root->chain_lock);
+
+ if (!root->root_ft) {
+ err = -EINVAL;
+ goto update_ft_fail;
+ }
+
+ err = mlx5_cmd_update_root_ft(dev, root->root_ft, underlay_qpn, false);
+ if (err) {
+ mlx5_core_warn(dev, "Failed adding underlay QPN (%u) to root FT err(%d)\n",
+ underlay_qpn, err);
+ goto update_ft_fail;
+ }
+
+ new_uqp->qpn = underlay_qpn;
+ list_add_tail(&new_uqp->list, &root->underlay_qpns);
+
+ mutex_unlock(&root->chain_lock);
- root->underlay_qpn = underlay_qpn;
return 0;
+
+update_ft_fail:
+ mutex_unlock(&root->chain_lock);
+ kfree(new_uqp);
+ return err;
}
EXPORT_SYMBOL(mlx5_fs_add_rx_underlay_qpn);
int mlx5_fs_remove_rx_underlay_qpn(struct mlx5_core_dev *dev, u32 underlay_qpn)
{
struct mlx5_flow_root_namespace *root = dev->priv.steering->root_ns;
+ struct mlx5_ft_underlay_qp *uqp;
+ bool found = false;
+ int err = 0;
+
+ mutex_lock(&root->chain_lock);
+ list_for_each_entry(uqp, &root->underlay_qpns, list) {
+ if (uqp->qpn == underlay_qpn) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ mlx5_core_warn(dev, "Failed finding underlay qp (%u) in qpn list\n",
+ underlay_qpn);
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = mlx5_cmd_update_root_ft(dev, root->root_ft, underlay_qpn, true);
+ if (err)
+ mlx5_core_warn(dev, "Failed removing underlay QPN (%u) from root FT err(%d)\n",
+ underlay_qpn, err);
+
+ list_del(&uqp->list);
+ mutex_unlock(&root->chain_lock);
+ kfree(uqp);
- root->underlay_qpn = 0;
return 0;
+
+out:
+ mutex_unlock(&root->chain_lock);
+ return err;
}
EXPORT_SYMBOL(mlx5_fs_remove_rx_underlay_qpn);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
index 48dd789..397d24a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h
@@ -33,6 +33,7 @@
#ifndef _MLX5_FS_CORE_
#define _MLX5_FS_CORE_
+#include <linux/refcount.h>
#include <linux/mlx5/fs.h>
#include <linux/rhashtable.h>
@@ -66,6 +67,8 @@ enum fs_fte_status {
struct mlx5_flow_steering {
struct mlx5_core_dev *dev;
+ struct kmem_cache *fgs_cache;
+ struct kmem_cache *ftes_cache;
struct mlx5_flow_root_namespace *root_ns;
struct mlx5_flow_root_namespace *fdb_root_ns;
struct mlx5_flow_root_namespace *esw_egress_root_ns;
@@ -81,9 +84,12 @@ struct fs_node {
struct fs_node *parent;
struct fs_node *root;
/* lock the node for writing and traversing */
- struct mutex lock;
- atomic_t refcount;
- void (*remove_func)(struct fs_node *);
+ struct rw_semaphore lock;
+ refcount_t refcount;
+ bool active;
+ void (*del_hw_func)(struct fs_node *);
+ void (*del_sw_func)(struct fs_node *);
+ atomic_t version;
};
struct mlx5_flow_rule {
@@ -120,7 +126,6 @@ struct mlx5_flow_table {
/* FWD rules that point on this flow table */
struct list_head fwd_rules;
u32 flags;
- struct ida fte_allocator;
struct rhltable fgs_hash;
};
@@ -147,6 +152,11 @@ struct mlx5_fc {
struct mlx5_fc_cache cache ____cacheline_aligned_in_smp;
};
+struct mlx5_ft_underlay_qp {
+ struct list_head list;
+ u32 qpn;
+};
+
#define MLX5_FTE_MATCH_PARAM_RESERVED reserved_at_600
/* Calculate the fte_match_param length and without the reserved length.
* Make sure the reserved field is the last.
@@ -200,6 +210,7 @@ struct mlx5_flow_group {
struct mlx5_flow_group_mask mask;
u32 start_index;
u32 max_ftes;
+ struct ida fte_allocator;
u32 id;
struct rhashtable ftes_hash;
struct rhlist_head hash;
@@ -212,7 +223,7 @@ struct mlx5_flow_root_namespace {
struct mlx5_flow_table *root_ft;
/* Should be held when chaining flow tables */
struct mutex chain_lock;
- u32 underlay_qpn;
+ struct list_head underlay_qpns;
};
int mlx5_init_fc_stats(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 2c71557..5ef1b56b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -106,6 +106,13 @@ static int mlx5_get_mcam_reg(struct mlx5_core_dev *dev)
MLX5_MCAM_REGS_FIRST_128);
}
+static int mlx5_get_qcam_reg(struct mlx5_core_dev *dev)
+{
+ return mlx5_query_qcam_reg(dev, dev->caps.qcam,
+ MLX5_QCAM_FEATURE_ENHANCED_FEATURES,
+ MLX5_QCAM_REGS_FIRST_128);
+}
+
int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
{
int err;
@@ -182,6 +189,9 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
if (MLX5_CAP_GEN(dev, mcam_reg))
mlx5_get_mcam_reg(dev);
+ if (MLX5_CAP_GEN(dev, qcam_reg))
+ mlx5_get_qcam_reg(dev);
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index db86e15..1a0e797a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -285,9 +285,9 @@ void mlx5_trigger_health_work(struct mlx5_core_dev *dev)
spin_unlock_irqrestore(&health->wq_lock, flags);
}
-static void poll_health(unsigned long data)
+static void poll_health(struct timer_list *t)
{
- struct mlx5_core_dev *dev = (struct mlx5_core_dev *)data;
+ struct mlx5_core_dev *dev = from_timer(dev, t, priv.health.timer);
struct mlx5_core_health *health = &dev->priv.health;
u32 count;
@@ -320,15 +320,13 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
- init_timer(&health->timer);
+ timer_setup(&health->timer, poll_health, 0);
health->sick = 0;
clear_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags);
clear_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags);
health->health = &dev->iseg->health;
health->health_counter = &dev->iseg->health_counter;
- health->timer.data = (unsigned long)dev;
- health->timer.function = poll_health;
health->timer.expires = round_jiffies(jiffies + MLX5_HEALTH_POLL_INTERVAL);
add_timer(&health->timer);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
index 43c126c..6f338a9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c
@@ -250,3 +250,8 @@ const struct ethtool_ops mlx5i_ethtool_ops = {
.get_link_ksettings = mlx5i_get_link_ksettings,
.get_link = ethtool_op_get_link,
};
+
+const struct ethtool_ops mlx5i_pkey_ethtool_ops = {
+ .get_drvinfo = mlx5i_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
index 145e392..d2a66dc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
@@ -40,8 +40,6 @@
static int mlx5i_open(struct net_device *netdev);
static int mlx5i_close(struct net_device *netdev);
-static int mlx5i_dev_init(struct net_device *dev);
-static void mlx5i_dev_cleanup(struct net_device *dev);
static int mlx5i_change_mtu(struct net_device *netdev, int new_mtu);
static int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
@@ -70,10 +68,10 @@ static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev,
}
/* Called directly after IPoIB netdevice was created to initialize SW structs */
-static void mlx5i_init(struct mlx5_core_dev *mdev,
- struct net_device *netdev,
- const struct mlx5e_profile *profile,
- void *ppriv)
+void mlx5i_init(struct mlx5_core_dev *mdev,
+ struct net_device *netdev,
+ const struct mlx5e_profile *profile,
+ void *ppriv)
{
struct mlx5e_priv *priv = mlx5i_epriv(netdev);
@@ -108,11 +106,69 @@ static void mlx5i_cleanup(struct mlx5e_priv *priv)
/* Do nothing .. */
}
+int mlx5i_init_underlay_qp(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5i_priv *ipriv = priv->ppriv;
+ struct mlx5_core_qp *qp = &ipriv->qp;
+ struct mlx5_qp_context *context;
+ int ret;
+
+ /* QP states */
+ context = kzalloc(sizeof(*context), GFP_KERNEL);
+ if (!context)
+ return -ENOMEM;
+
+ context->flags = cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
+ context->pri_path.port = 1;
+ context->pri_path.pkey_index = cpu_to_be16(ipriv->pkey_index);
+ context->qkey = cpu_to_be32(IB_DEFAULT_Q_KEY);
+
+ ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RST2INIT_QP, 0, context, qp);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to modify qp RST2INIT, err: %d\n", ret);
+ goto err_qp_modify_to_err;
+ }
+ memset(context, 0, sizeof(*context));
+
+ ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_INIT2RTR_QP, 0, context, qp);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to modify qp INIT2RTR, err: %d\n", ret);
+ goto err_qp_modify_to_err;
+ }
+
+ ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RTR2RTS_QP, 0, context, qp);
+ if (ret) {
+ mlx5_core_err(mdev, "Failed to modify qp RTR2RTS, err: %d\n", ret);
+ goto err_qp_modify_to_err;
+ }
+
+ kfree(context);
+ return 0;
+
+err_qp_modify_to_err:
+ mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2ERR_QP, 0, &context, qp);
+ kfree(context);
+ return ret;
+}
+
+void mlx5i_uninit_underlay_qp(struct mlx5e_priv *priv)
+{
+ struct mlx5i_priv *ipriv = priv->ppriv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_qp_context context;
+ int err;
+
+ err = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2RST_QP, 0, &context,
+ &ipriv->qp);
+ if (err)
+ mlx5_core_err(mdev, "Failed to modify qp 2RST, err: %d\n", err);
+}
+
#define MLX5_QP_ENHANCED_ULP_STATELESS_MODE 2
-static int mlx5i_create_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp)
+int mlx5i_create_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp)
{
- struct mlx5_qp_context *context = NULL;
u32 *in = NULL;
void *addr_path;
int ret = 0;
@@ -140,43 +196,12 @@ static int mlx5i_create_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core
goto out;
}
- /* QP states */
- context = kzalloc(sizeof(*context), GFP_KERNEL);
- if (!context) {
- ret = -ENOMEM;
- goto out;
- }
-
- context->flags = cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
- context->pri_path.port = 1;
- context->qkey = cpu_to_be32(IB_DEFAULT_Q_KEY);
-
- ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RST2INIT_QP, 0, context, qp);
- if (ret) {
- mlx5_core_err(mdev, "Failed to modify qp RST2INIT, err: %d\n", ret);
- goto out;
- }
- memset(context, 0, sizeof(*context));
-
- ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_INIT2RTR_QP, 0, context, qp);
- if (ret) {
- mlx5_core_err(mdev, "Failed to modify qp INIT2RTR, err: %d\n", ret);
- goto out;
- }
-
- ret = mlx5_core_qp_modify(mdev, MLX5_CMD_OP_RTR2RTS_QP, 0, context, qp);
- if (ret) {
- mlx5_core_err(mdev, "Failed to modify qp RTR2RTS, err: %d\n", ret);
- goto out;
- }
-
out:
- kfree(context);
kvfree(in);
return ret;
}
-static void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp)
+void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp)
{
mlx5_core_destroy_qp(mdev, qp);
}
@@ -195,10 +220,14 @@ static int mlx5i_init_tx(struct mlx5e_priv *priv)
err = mlx5e_create_tis(priv->mdev, 0 /* tc */, ipriv->qp.qpn, &priv->tisn[0]);
if (err) {
mlx5_core_warn(priv->mdev, "create tis failed, %d\n", err);
- return err;
+ goto err_destroy_underlay_qp;
}
return 0;
+
+err_destroy_underlay_qp:
+ mlx5i_destroy_underlay_qp(priv->mdev, &ipriv->qp);
+ return err;
}
static void mlx5i_cleanup_tx(struct mlx5e_priv *priv)
@@ -226,15 +255,24 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv)
priv->netdev->hw_features &= ~NETIF_F_NTUPLE;
}
+ err = mlx5e_create_inner_ttc_table(priv);
+ if (err) {
+ netdev_err(priv->netdev, "Failed to create inner ttc table, err=%d\n",
+ err);
+ goto err_destroy_arfs_tables;
+ }
+
err = mlx5e_create_ttc_table(priv);
if (err) {
netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n",
err);
- goto err_destroy_arfs_tables;
+ goto err_destroy_inner_ttc_table;
}
return 0;
+err_destroy_inner_ttc_table:
+ mlx5e_destroy_inner_ttc_table(priv);
err_destroy_arfs_tables:
mlx5e_arfs_destroy_tables(priv);
@@ -244,12 +282,12 @@ err_destroy_arfs_tables:
static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv)
{
mlx5e_destroy_ttc_table(priv);
+ mlx5e_destroy_inner_ttc_table(priv);
mlx5e_arfs_destroy_tables(priv);
}
static int mlx5i_init_rx(struct mlx5e_priv *priv)
{
- struct mlx5i_priv *ipriv = priv->ppriv;
int err;
err = mlx5e_create_indirect_rqt(priv);
@@ -268,18 +306,12 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv)
if (err)
goto err_destroy_indirect_tirs;
- err = mlx5_fs_add_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn);
- if (err)
- goto err_destroy_direct_tirs;
-
err = mlx5i_create_flow_steering(priv);
if (err)
- goto err_remove_rx_underlay_qpn;
+ goto err_destroy_direct_tirs;
return 0;
-err_remove_rx_underlay_qpn:
- mlx5_fs_remove_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn);
err_destroy_direct_tirs:
mlx5e_destroy_direct_tirs(priv);
err_destroy_indirect_tirs:
@@ -293,9 +325,6 @@ err_destroy_indirect_rqts:
static void mlx5i_cleanup_rx(struct mlx5e_priv *priv)
{
- struct mlx5i_priv *ipriv = priv->ppriv;
-
- mlx5_fs_remove_rx_underlay_qpn(priv->mdev, ipriv->qp.qpn);
mlx5i_destroy_flow_steering(priv);
mlx5e_destroy_direct_tirs(priv);
mlx5e_destroy_indirect_tirs(priv);
@@ -351,7 +380,7 @@ out:
return err;
}
-static int mlx5i_dev_init(struct net_device *dev)
+int mlx5i_dev_init(struct net_device *dev)
{
struct mlx5e_priv *priv = mlx5i_epriv(dev);
struct mlx5i_priv *ipriv = priv->ppriv;
@@ -361,6 +390,9 @@ static int mlx5i_dev_init(struct net_device *dev)
dev->dev_addr[2] = (ipriv->qp.qpn >> 8) & 0xff;
dev->dev_addr[3] = (ipriv->qp.qpn) & 0xff;
+ /* Add QPN to net-device mapping to HT */
+ mlx5i_pkey_add_qpn(dev ,ipriv->qp.qpn);
+
return 0;
}
@@ -378,63 +410,84 @@ static int mlx5i_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
}
}
-static void mlx5i_dev_cleanup(struct net_device *dev)
+void mlx5i_dev_cleanup(struct net_device *dev)
{
struct mlx5e_priv *priv = mlx5i_epriv(dev);
- struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5i_priv *ipriv = priv->ppriv;
- struct mlx5_qp_context context;
+ struct mlx5i_priv *ipriv = priv->ppriv;
+
+ mlx5i_uninit_underlay_qp(priv);
- /* detach qp from flow-steering by reset it */
- mlx5_core_qp_modify(mdev, MLX5_CMD_OP_2RST_QP, 0, &context, &ipriv->qp);
+ /* Delete QPN to net-device mapping from HT */
+ mlx5i_pkey_del_qpn(dev, ipriv->qp.qpn);
}
static int mlx5i_open(struct net_device *netdev)
{
- struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+ struct mlx5e_priv *epriv = mlx5i_epriv(netdev);
+ struct mlx5i_priv *ipriv = epriv->ppriv;
+ struct mlx5_core_dev *mdev = epriv->mdev;
int err;
- mutex_lock(&priv->state_lock);
+ mutex_lock(&epriv->state_lock);
- set_bit(MLX5E_STATE_OPENED, &priv->state);
+ set_bit(MLX5E_STATE_OPENED, &epriv->state);
- err = mlx5e_open_channels(priv, &priv->channels);
- if (err)
+ err = mlx5i_init_underlay_qp(epriv);
+ if (err) {
+ mlx5_core_warn(mdev, "prepare underlay qp state failed, %d\n", err);
goto err_clear_state_opened_flag;
+ }
+
+ err = mlx5_fs_add_rx_underlay_qpn(mdev, ipriv->qp.qpn);
+ if (err) {
+ mlx5_core_warn(mdev, "attach underlay qp to ft failed, %d\n", err);
+ goto err_reset_qp;
+ }
- mlx5e_refresh_tirs(priv, false);
- mlx5e_activate_priv_channels(priv);
- mlx5e_timestamp_init(priv);
+ err = mlx5e_open_channels(epriv, &epriv->channels);
+ if (err)
+ goto err_remove_fs_underlay_qp;
- mutex_unlock(&priv->state_lock);
+ mlx5e_refresh_tirs(epriv, false);
+ mlx5e_activate_priv_channels(epriv);
+ mlx5e_timestamp_set(epriv);
+
+ mutex_unlock(&epriv->state_lock);
return 0;
+err_remove_fs_underlay_qp:
+ mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn);
+err_reset_qp:
+ mlx5i_uninit_underlay_qp(epriv);
err_clear_state_opened_flag:
- clear_bit(MLX5E_STATE_OPENED, &priv->state);
- mutex_unlock(&priv->state_lock);
+ clear_bit(MLX5E_STATE_OPENED, &epriv->state);
+ mutex_unlock(&epriv->state_lock);
return err;
}
static int mlx5i_close(struct net_device *netdev)
{
- struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+ struct mlx5e_priv *epriv = mlx5i_epriv(netdev);
+ struct mlx5i_priv *ipriv = epriv->ppriv;
+ struct mlx5_core_dev *mdev = epriv->mdev;
/* May already be CLOSED in case a previous configuration operation
* (e.g RX/TX queue size change) that involves close&open failed.
*/
- mutex_lock(&priv->state_lock);
+ mutex_lock(&epriv->state_lock);
- if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ if (!test_bit(MLX5E_STATE_OPENED, &epriv->state))
goto unlock;
- clear_bit(MLX5E_STATE_OPENED, &priv->state);
+ clear_bit(MLX5E_STATE_OPENED, &epriv->state);
- mlx5e_timestamp_cleanup(priv);
- netif_carrier_off(priv->netdev);
- mlx5e_deactivate_priv_channels(priv);
- mlx5e_close_channels(&priv->channels);
+ netif_carrier_off(epriv->netdev);
+ mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn);
+ mlx5i_uninit_underlay_qp(epriv);
+ mlx5e_deactivate_priv_channels(epriv);
+ mlx5e_close_channels(&epriv->channels);;
unlock:
- mutex_unlock(&priv->state_lock);
+ mutex_unlock(&epriv->state_lock);
return 0;
}
@@ -492,6 +545,13 @@ static int mlx5i_xmit(struct net_device *dev, struct sk_buff *skb,
return mlx5i_sq_xmit(sq, skb, &mah->av, dqpn, ipriv->qkey);
}
+static void mlx5i_set_pkey_index(struct net_device *netdev, int id)
+{
+ struct mlx5i_priv *ipriv = netdev_priv(netdev);
+
+ ipriv->pkey_index = (u16)id;
+}
+
static int mlx5i_check_required_hca_cap(struct mlx5_core_dev *mdev)
{
if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_IB)
@@ -510,12 +570,13 @@ struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
const char *name,
void (*setup)(struct net_device *))
{
- const struct mlx5e_profile *profile = &mlx5i_nic_profile;
- int nch = profile->max_nch(mdev);
+ const struct mlx5e_profile *profile;
struct net_device *netdev;
struct mlx5i_priv *ipriv;
struct mlx5e_priv *epriv;
struct rdma_netdev *rn;
+ bool sub_interface;
+ int nch;
int err;
if (mlx5i_check_required_hca_cap(mdev)) {
@@ -523,10 +584,15 @@ struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
return ERR_PTR(-EOPNOTSUPP);
}
- /* This function should only be called once per mdev */
- err = mlx5e_create_mdev_resources(mdev);
- if (err)
- return NULL;
+ /* TODO: Need to find a better way to check if child device*/
+ sub_interface = (mdev->mlx5e_res.pdn != 0);
+
+ if (sub_interface)
+ profile = mlx5i_pkey_get_profile();
+ else
+ profile = &mlx5i_nic_profile;
+
+ nch = profile->max_nch(mdev);
netdev = alloc_netdev_mqs(sizeof(struct mlx5i_priv) + sizeof(struct mlx5e_priv),
name, NET_NAME_UNKNOWN,
@@ -535,7 +601,7 @@ struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
nch);
if (!netdev) {
mlx5_core_warn(mdev, "alloc_netdev_mqs failed\n");
- goto free_mdev_resources;
+ return NULL;
}
ipriv = netdev_priv(netdev);
@@ -545,6 +611,20 @@ struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
if (!epriv->wq)
goto err_free_netdev;
+ ipriv->sub_interface = sub_interface;
+ if (!ipriv->sub_interface) {
+ err = mlx5i_pkey_qpn_ht_init(netdev);
+ if (err) {
+ mlx5_core_warn(mdev, "allocate qpn_to_netdev ht failed\n");
+ goto destroy_wq;
+ }
+
+ /* This should only be called once per mdev */
+ err = mlx5e_create_mdev_resources(mdev);
+ if (err)
+ goto destroy_ht;
+ }
+
profile->init(mdev, netdev, profile, ipriv);
mlx5e_attach_netdev(epriv);
@@ -556,13 +636,16 @@ struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
rn->send = mlx5i_xmit;
rn->attach_mcast = mlx5i_attach_mcast;
rn->detach_mcast = mlx5i_detach_mcast;
+ rn->set_id = mlx5i_set_pkey_index;
return netdev;
+destroy_ht:
+ mlx5i_pkey_qpn_ht_cleanup(netdev);
+destroy_wq:
+ destroy_workqueue(epriv->wq);
err_free_netdev:
free_netdev(netdev);
-free_mdev_resources:
- mlx5e_destroy_mdev_resources(mdev);
return NULL;
}
@@ -570,15 +653,18 @@ EXPORT_SYMBOL(mlx5_rdma_netdev_alloc);
void mlx5_rdma_netdev_free(struct net_device *netdev)
{
- struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+ struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+ struct mlx5i_priv *ipriv = priv->ppriv;
const struct mlx5e_profile *profile = priv->profile;
- struct mlx5_core_dev *mdev = priv->mdev;
mlx5e_detach_netdev(priv);
profile->cleanup(priv);
destroy_workqueue(priv->wq);
- free_netdev(netdev);
- mlx5e_destroy_mdev_resources(mdev);
+ if (!ipriv->sub_interface) {
+ mlx5i_pkey_qpn_ht_cleanup(netdev);
+ mlx5e_destroy_mdev_resources(priv->mdev);
+ }
+ free_netdev(netdev);
}
EXPORT_SYMBOL(mlx5_rdma_netdev_free);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
index a0f405f..4900802 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.h
@@ -39,6 +39,7 @@
#define MLX5I_MAX_NUM_TC 1
extern const struct ethtool_ops mlx5i_ethtool_ops;
+extern const struct ethtool_ops mlx5i_pkey_ethtool_ops;
#define MLX5_IB_GRH_BYTES 40
#define MLX5_IPOIB_ENCAP_LEN 4
@@ -49,10 +50,45 @@ extern const struct ethtool_ops mlx5i_ethtool_ops;
struct mlx5i_priv {
struct rdma_netdev rn; /* keep this first */
struct mlx5_core_qp qp;
+ bool sub_interface;
u32 qkey;
+ u16 pkey_index;
+ struct mlx5i_pkey_qpn_ht *qpn_htbl;
char *mlx5e_priv[0];
};
+/* Underlay QP create/destroy functions */
+int mlx5i_create_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp);
+void mlx5i_destroy_underlay_qp(struct mlx5_core_dev *mdev, struct mlx5_core_qp *qp);
+
+/* Underlay QP state modification init/uninit functions */
+int mlx5i_init_underlay_qp(struct mlx5e_priv *priv);
+void mlx5i_uninit_underlay_qp(struct mlx5e_priv *priv);
+
+/* Allocate/Free underlay QPN to net-device hash table */
+int mlx5i_pkey_qpn_ht_init(struct net_device *netdev);
+void mlx5i_pkey_qpn_ht_cleanup(struct net_device *netdev);
+
+/* Add/Remove an underlay QPN to net-device mapping to/from the hash table */
+int mlx5i_pkey_add_qpn(struct net_device *netdev, u32 qpn);
+int mlx5i_pkey_del_qpn(struct net_device *netdev, u32 qpn);
+
+/* Get the net-device corresponding to the given underlay QPN */
+struct net_device *mlx5i_pkey_get_netdev(struct net_device *netdev, u32 qpn);
+
+/* Shared ndo functionts */
+int mlx5i_dev_init(struct net_device *dev);
+void mlx5i_dev_cleanup(struct net_device *dev);
+
+/* Parent profile functions */
+void mlx5i_init(struct mlx5_core_dev *mdev,
+ struct net_device *netdev,
+ const struct mlx5e_profile *profile,
+ void *ppriv);
+
+/* Get child interface nic profile */
+const struct mlx5e_profile *mlx5i_pkey_get_profile(void);
+
/* Extract mlx5e_priv from IPoIB netdev */
#define mlx5i_epriv(netdev) ((void *)(((struct mlx5i_priv *)netdev_priv(netdev))->mlx5e_priv))
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
new file mode 100644
index 0000000..531b02c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c
@@ -0,0 +1,350 @@
+/*
+ * Copyright (c) 2017, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/hash.h>
+#include "ipoib.h"
+
+#define MLX5I_MAX_LOG_PKEY_SUP 7
+
+struct qpn_to_netdev {
+ struct net_device *netdev;
+ struct hlist_node hlist;
+ u32 underlay_qpn;
+};
+
+struct mlx5i_pkey_qpn_ht {
+ struct hlist_head buckets[1 << MLX5I_MAX_LOG_PKEY_SUP];
+ spinlock_t ht_lock; /* Synchronise with NAPI */
+};
+
+int mlx5i_pkey_qpn_ht_init(struct net_device *netdev)
+{
+ struct mlx5i_priv *ipriv = netdev_priv(netdev);
+ struct mlx5i_pkey_qpn_ht *qpn_htbl;
+
+ qpn_htbl = kzalloc(sizeof(*qpn_htbl), GFP_KERNEL);
+ if (!qpn_htbl)
+ return -ENOMEM;
+
+ ipriv->qpn_htbl = qpn_htbl;
+ spin_lock_init(&qpn_htbl->ht_lock);
+
+ return 0;
+}
+
+void mlx5i_pkey_qpn_ht_cleanup(struct net_device *netdev)
+{
+ struct mlx5i_priv *ipriv = netdev_priv(netdev);
+
+ kfree(ipriv->qpn_htbl);
+}
+
+static struct qpn_to_netdev *mlx5i_find_qpn_to_netdev_node(struct hlist_head *buckets,
+ u32 qpn)
+{
+ struct hlist_head *h = &buckets[hash_32(qpn, MLX5I_MAX_LOG_PKEY_SUP)];
+ struct qpn_to_netdev *node;
+
+ hlist_for_each_entry(node, h, hlist) {
+ if (node->underlay_qpn == qpn)
+ return node;
+ }
+
+ return NULL;
+}
+
+int mlx5i_pkey_add_qpn(struct net_device *netdev, u32 qpn)
+{
+ struct mlx5i_priv *ipriv = netdev_priv(netdev);
+ struct mlx5i_pkey_qpn_ht *ht = ipriv->qpn_htbl;
+ u8 key = hash_32(qpn, MLX5I_MAX_LOG_PKEY_SUP);
+ struct qpn_to_netdev *new_node;
+
+ new_node = kzalloc(sizeof(*new_node), GFP_KERNEL);
+ if (!new_node)
+ return -ENOMEM;
+
+ new_node->netdev = netdev;
+ new_node->underlay_qpn = qpn;
+ spin_lock_bh(&ht->ht_lock);
+ hlist_add_head(&new_node->hlist, &ht->buckets[key]);
+ spin_unlock_bh(&ht->ht_lock);
+
+ return 0;
+}
+
+int mlx5i_pkey_del_qpn(struct net_device *netdev, u32 qpn)
+{
+ struct mlx5e_priv *epriv = mlx5i_epriv(netdev);
+ struct mlx5i_priv *ipriv = epriv->ppriv;
+ struct mlx5i_pkey_qpn_ht *ht = ipriv->qpn_htbl;
+ struct qpn_to_netdev *node;
+
+ node = mlx5i_find_qpn_to_netdev_node(ht->buckets, qpn);
+ if (!node) {
+ mlx5_core_warn(epriv->mdev, "QPN to netdev delete from HT failed\n");
+ return -EINVAL;
+ }
+
+ spin_lock_bh(&ht->ht_lock);
+ hlist_del_init(&node->hlist);
+ spin_unlock_bh(&ht->ht_lock);
+ kfree(node);
+
+ return 0;
+}
+
+struct net_device *mlx5i_pkey_get_netdev(struct net_device *netdev, u32 qpn)
+{
+ struct mlx5i_priv *ipriv = netdev_priv(netdev);
+ struct qpn_to_netdev *node;
+
+ node = mlx5i_find_qpn_to_netdev_node(ipriv->qpn_htbl->buckets, qpn);
+ if (!node)
+ return NULL;
+
+ return node->netdev;
+}
+
+static int mlx5i_pkey_open(struct net_device *netdev);
+static int mlx5i_pkey_close(struct net_device *netdev);
+static int mlx5i_pkey_dev_init(struct net_device *dev);
+static void mlx5i_pkey_dev_cleanup(struct net_device *netdev);
+static int mlx5i_pkey_change_mtu(struct net_device *netdev, int new_mtu);
+
+static const struct net_device_ops mlx5i_pkey_netdev_ops = {
+ .ndo_open = mlx5i_pkey_open,
+ .ndo_stop = mlx5i_pkey_close,
+ .ndo_init = mlx5i_pkey_dev_init,
+ .ndo_uninit = mlx5i_pkey_dev_cleanup,
+ .ndo_change_mtu = mlx5i_pkey_change_mtu,
+};
+
+/* Child NDOs */
+static int mlx5i_pkey_dev_init(struct net_device *dev)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(dev);
+ struct mlx5i_priv *ipriv, *parent_ipriv;
+ struct net_device *parent_dev;
+ int parent_ifindex;
+
+ ipriv = priv->ppriv;
+
+ /* Get QPN to netdevice hash table from parent */
+ parent_ifindex = dev->netdev_ops->ndo_get_iflink(dev);
+ parent_dev = dev_get_by_index(dev_net(dev), parent_ifindex);
+ if (!parent_dev) {
+ mlx5_core_warn(priv->mdev, "failed to get parent device\n");
+ return -EINVAL;
+ }
+
+ parent_ipriv = netdev_priv(parent_dev);
+ ipriv->qpn_htbl = parent_ipriv->qpn_htbl;
+ dev_put(parent_dev);
+
+ return mlx5i_dev_init(dev);
+}
+
+static void mlx5i_pkey_dev_cleanup(struct net_device *netdev)
+{
+ return mlx5i_dev_cleanup(netdev);
+}
+
+static int mlx5i_pkey_open(struct net_device *netdev)
+{
+ struct mlx5e_priv *epriv = mlx5i_epriv(netdev);
+ struct mlx5i_priv *ipriv = epriv->ppriv;
+ struct mlx5_core_dev *mdev = epriv->mdev;
+ int err;
+
+ mutex_lock(&epriv->state_lock);
+
+ set_bit(MLX5E_STATE_OPENED, &epriv->state);
+
+ err = mlx5i_init_underlay_qp(epriv);
+ if (err) {
+ mlx5_core_warn(mdev, "prepare child underlay qp state failed, %d\n", err);
+ goto err_release_lock;
+ }
+
+ err = mlx5_fs_add_rx_underlay_qpn(mdev, ipriv->qp.qpn);
+ if (err) {
+ mlx5_core_warn(mdev, "attach child underlay qp to ft failed, %d\n", err);
+ goto err_unint_underlay_qp;
+ }
+
+ err = mlx5e_create_tis(mdev, 0 /* tc */, ipriv->qp.qpn, &epriv->tisn[0]);
+ if (err) {
+ mlx5_core_warn(mdev, "create child tis failed, %d\n", err);
+ goto err_remove_rx_uderlay_qp;
+ }
+
+ err = mlx5e_open_channels(epriv, &epriv->channels);
+ if (err) {
+ mlx5_core_warn(mdev, "opening child channels failed, %d\n", err);
+ goto err_clear_state_opened_flag;
+ }
+ mlx5e_refresh_tirs(epriv, false);
+ mlx5e_activate_priv_channels(epriv);
+ mutex_unlock(&epriv->state_lock);
+
+ return 0;
+
+err_clear_state_opened_flag:
+ mlx5e_destroy_tis(mdev, epriv->tisn[0]);
+err_remove_rx_uderlay_qp:
+ mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn);
+err_unint_underlay_qp:
+ mlx5i_uninit_underlay_qp(epriv);
+err_release_lock:
+ clear_bit(MLX5E_STATE_OPENED, &epriv->state);
+ mutex_unlock(&epriv->state_lock);
+ return err;
+}
+
+static int mlx5i_pkey_close(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+ struct mlx5i_priv *ipriv = priv->ppriv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ mutex_lock(&priv->state_lock);
+
+ if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
+ goto unlock;
+
+ clear_bit(MLX5E_STATE_OPENED, &priv->state);
+
+ netif_carrier_off(priv->netdev);
+ mlx5_fs_remove_rx_underlay_qpn(mdev, ipriv->qp.qpn);
+ mlx5i_uninit_underlay_qp(priv);
+ mlx5e_deactivate_priv_channels(priv);
+ mlx5e_close_channels(&priv->channels);
+ mlx5e_destroy_tis(mdev, priv->tisn[0]);
+unlock:
+ mutex_unlock(&priv->state_lock);
+ return 0;
+}
+
+static int mlx5i_pkey_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+
+ mutex_lock(&priv->state_lock);
+ netdev->mtu = new_mtu;
+ mutex_unlock(&priv->state_lock);
+
+ return 0;
+}
+
+/* Called directly after IPoIB netdevice was created to initialize SW structs */
+static void mlx5i_pkey_init(struct mlx5_core_dev *mdev,
+ struct net_device *netdev,
+ const struct mlx5e_profile *profile,
+ void *ppriv)
+{
+ struct mlx5e_priv *priv = mlx5i_epriv(netdev);
+
+ mlx5i_init(mdev, netdev, profile, ppriv);
+
+ /* Override parent ndo */
+ netdev->netdev_ops = &mlx5i_pkey_netdev_ops;
+
+ /* Set child limited ethtool support */
+ netdev->ethtool_ops = &mlx5i_pkey_ethtool_ops;
+
+ /* Use dummy rqs */
+ priv->channels.params.log_rq_size = MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE;
+}
+
+/* Called directly before IPoIB netdevice is destroyed to cleanup SW structs */
+static void mlx5i_pkey_cleanup(struct mlx5e_priv *priv)
+{
+ /* Do nothing .. */
+}
+
+static int mlx5i_pkey_init_tx(struct mlx5e_priv *priv)
+{
+ struct mlx5i_priv *ipriv = priv->ppriv;
+ int err;
+
+ err = mlx5i_create_underlay_qp(priv->mdev, &ipriv->qp);
+ if (err) {
+ mlx5_core_warn(priv->mdev, "create child underlay QP failed, %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+static void mlx5i_pkey_cleanup_tx(struct mlx5e_priv *priv)
+{
+ struct mlx5i_priv *ipriv = priv->ppriv;
+
+ mlx5i_destroy_underlay_qp(priv->mdev, &ipriv->qp);
+}
+
+static int mlx5i_pkey_init_rx(struct mlx5e_priv *priv)
+{
+ /* Since the rx resources are shared between child and parent, the
+ * parent interface is taking care of rx resource allocation and init
+ */
+ return 0;
+}
+
+static void mlx5i_pkey_cleanup_rx(struct mlx5e_priv *priv)
+{
+ /* Since the rx resources are shared between child and parent, the
+ * parent interface is taking care of rx resource free and de-init
+ */
+}
+
+static const struct mlx5e_profile mlx5i_pkey_nic_profile = {
+ .init = mlx5i_pkey_init,
+ .cleanup = mlx5i_pkey_cleanup,
+ .init_tx = mlx5i_pkey_init_tx,
+ .cleanup_tx = mlx5i_pkey_cleanup_tx,
+ .init_rx = mlx5i_pkey_init_rx,
+ .cleanup_rx = mlx5i_pkey_cleanup_rx,
+ .enable = NULL,
+ .disable = NULL,
+ .update_stats = NULL,
+ .max_nch = mlx5e_get_max_num_channels,
+ .rx_handlers.handle_rx_cqe = mlx5i_handle_rx_cqe,
+ .rx_handlers.handle_rx_cqe_mpwqe = NULL, /* Not supported */
+ .max_tc = MLX5I_MAX_NUM_TC,
+};
+
+const struct mlx5e_profile *mlx5i_pkey_get_profile(void)
+{
+ return &mlx5i_pkey_nic_profile;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
new file mode 100644
index 0000000..fa8aed6
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -0,0 +1,525 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/clocksource.h>
+#include "en.h"
+
+enum {
+ MLX5_CYCLES_SHIFT = 23
+};
+
+enum {
+ MLX5_PIN_MODE_IN = 0x0,
+ MLX5_PIN_MODE_OUT = 0x1,
+};
+
+enum {
+ MLX5_OUT_PATTERN_PULSE = 0x0,
+ MLX5_OUT_PATTERN_PERIODIC = 0x1,
+};
+
+enum {
+ MLX5_EVENT_MODE_DISABLE = 0x0,
+ MLX5_EVENT_MODE_REPETETIVE = 0x1,
+ MLX5_EVENT_MODE_ONCE_TILL_ARM = 0x2,
+};
+
+enum {
+ MLX5_MTPPS_FS_ENABLE = BIT(0x0),
+ MLX5_MTPPS_FS_PATTERN = BIT(0x2),
+ MLX5_MTPPS_FS_PIN_MODE = BIT(0x3),
+ MLX5_MTPPS_FS_TIME_STAMP = BIT(0x4),
+ MLX5_MTPPS_FS_OUT_PULSE_DURATION = BIT(0x5),
+ MLX5_MTPPS_FS_ENH_OUT_PER_ADJ = BIT(0x7),
+};
+
+static u64 read_internal_timer(const struct cyclecounter *cc)
+{
+ struct mlx5_clock *clock = container_of(cc, struct mlx5_clock, cycles);
+ struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
+ clock);
+
+ return mlx5_read_internal_timer(mdev) & cc->mask;
+}
+
+static void mlx5_pps_out(struct work_struct *work)
+{
+ struct mlx5_pps *pps_info = container_of(work, struct mlx5_pps,
+ out_work);
+ struct mlx5_clock *clock = container_of(pps_info, struct mlx5_clock,
+ pps_info);
+ struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev,
+ clock);
+ u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
+ unsigned long flags;
+ int i;
+
+ for (i = 0; i < clock->ptp_info.n_pins; i++) {
+ u64 tstart;
+
+ write_lock_irqsave(&clock->lock, flags);
+ tstart = clock->pps_info.start[i];
+ clock->pps_info.start[i] = 0;
+ write_unlock_irqrestore(&clock->lock, flags);
+ if (!tstart)
+ continue;
+
+ MLX5_SET(mtpps_reg, in, pin, i);
+ MLX5_SET64(mtpps_reg, in, time_stamp, tstart);
+ MLX5_SET(mtpps_reg, in, field_select, MLX5_MTPPS_FS_TIME_STAMP);
+ mlx5_set_mtpps(mdev, in, sizeof(in));
+ }
+}
+
+static void mlx5_timestamp_overflow(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct mlx5_clock *clock = container_of(dwork, struct mlx5_clock,
+ overflow_work);
+ unsigned long flags;
+
+ write_lock_irqsave(&clock->lock, flags);
+ timecounter_read(&clock->tc);
+ write_unlock_irqrestore(&clock->lock, flags);
+ schedule_delayed_work(&clock->overflow_work, clock->overflow_period);
+}
+
+static int mlx5_ptp_settime(struct ptp_clock_info *ptp,
+ const struct timespec64 *ts)
+{
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
+ ptp_info);
+ u64 ns = timespec64_to_ns(ts);
+ unsigned long flags;
+
+ write_lock_irqsave(&clock->lock, flags);
+ timecounter_init(&clock->tc, &clock->cycles, ns);
+ write_unlock_irqrestore(&clock->lock, flags);
+
+ return 0;
+}
+
+static int mlx5_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
+ ptp_info);
+ u64 ns;
+ unsigned long flags;
+
+ write_lock_irqsave(&clock->lock, flags);
+ ns = timecounter_read(&clock->tc);
+ write_unlock_irqrestore(&clock->lock, flags);
+
+ *ts = ns_to_timespec64(ns);
+
+ return 0;
+}
+
+static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
+ ptp_info);
+ unsigned long flags;
+
+ write_lock_irqsave(&clock->lock, flags);
+ timecounter_adjtime(&clock->tc, delta);
+ write_unlock_irqrestore(&clock->lock, flags);
+
+ return 0;
+}
+
+static int mlx5_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
+{
+ u64 adj;
+ u32 diff;
+ unsigned long flags;
+ int neg_adj = 0;
+ struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock,
+ ptp_info);
+
+ if (delta < 0) {
+ neg_adj = 1;
+ delta = -delta;
+ }
+
+ adj = clock->nominal_c_mult;
+ adj *= delta;
+ diff = div_u64(adj, 1000000000ULL);
+
+ write_lock_irqsave(&clock->lock, flags);
+ timecounter_read(&clock->tc);
+ clock->cycles.mult = neg_adj ? clock->nominal_c_mult - diff :
+ clock->nominal_c_mult + diff;
+ write_unlock_irqrestore(&clock->lock, flags);
+
+ return 0;
+}
+
+static int mlx5_extts_configure(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq,
+ int on)
+{
+ struct mlx5_clock *clock =
+ container_of(ptp, struct mlx5_clock, ptp_info);
+ struct mlx5_core_dev *mdev =
+ container_of(clock, struct mlx5_core_dev, clock);
+ u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
+ u32 field_select = 0;
+ u8 pin_mode = 0;
+ u8 pattern = 0;
+ int pin = -1;
+ int err = 0;
+
+ if (!MLX5_PPS_CAP(mdev))
+ return -EOPNOTSUPP;
+
+ if (rq->extts.index >= clock->ptp_info.n_pins)
+ return -EINVAL;
+
+ if (on) {
+ pin = ptp_find_pin(clock->ptp, PTP_PF_EXTTS, rq->extts.index);
+ if (pin < 0)
+ return -EBUSY;
+ pin_mode = MLX5_PIN_MODE_IN;
+ pattern = !!(rq->extts.flags & PTP_FALLING_EDGE);
+ field_select = MLX5_MTPPS_FS_PIN_MODE |
+ MLX5_MTPPS_FS_PATTERN |
+ MLX5_MTPPS_FS_ENABLE;
+ } else {
+ pin = rq->extts.index;
+ field_select = MLX5_MTPPS_FS_ENABLE;
+ }
+
+ MLX5_SET(mtpps_reg, in, pin, pin);
+ MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
+ MLX5_SET(mtpps_reg, in, pattern, pattern);
+ MLX5_SET(mtpps_reg, in, enable, on);
+ MLX5_SET(mtpps_reg, in, field_select, field_select);
+
+ err = mlx5_set_mtpps(mdev, in, sizeof(in));
+ if (err)
+ return err;
+
+ return mlx5_set_mtppse(mdev, pin, 0,
+ MLX5_EVENT_MODE_REPETETIVE & on);
+}
+
+static int mlx5_perout_configure(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq,
+ int on)
+{
+ struct mlx5_clock *clock =
+ container_of(ptp, struct mlx5_clock, ptp_info);
+ struct mlx5_core_dev *mdev =
+ container_of(clock, struct mlx5_core_dev, clock);
+ u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
+ u64 nsec_now, nsec_delta, time_stamp = 0;
+ u64 cycles_now, cycles_delta;
+ struct timespec64 ts;
+ unsigned long flags;
+ u32 field_select = 0;
+ u8 pin_mode = 0;
+ u8 pattern = 0;
+ int pin = -1;
+ int err = 0;
+ s64 ns;
+
+ if (!MLX5_PPS_CAP(mdev))
+ return -EOPNOTSUPP;
+
+ if (rq->perout.index >= clock->ptp_info.n_pins)
+ return -EINVAL;
+
+ if (on) {
+ pin = ptp_find_pin(clock->ptp, PTP_PF_PEROUT,
+ rq->perout.index);
+ if (pin < 0)
+ return -EBUSY;
+
+ pin_mode = MLX5_PIN_MODE_OUT;
+ pattern = MLX5_OUT_PATTERN_PERIODIC;
+ ts.tv_sec = rq->perout.period.sec;
+ ts.tv_nsec = rq->perout.period.nsec;
+ ns = timespec64_to_ns(&ts);
+
+ if ((ns >> 1) != 500000000LL)
+ return -EINVAL;
+
+ ts.tv_sec = rq->perout.start.sec;
+ ts.tv_nsec = rq->perout.start.nsec;
+ ns = timespec64_to_ns(&ts);
+ cycles_now = mlx5_read_internal_timer(mdev);
+ write_lock_irqsave(&clock->lock, flags);
+ nsec_now = timecounter_cyc2time(&clock->tc, cycles_now);
+ nsec_delta = ns - nsec_now;
+ cycles_delta = div64_u64(nsec_delta << clock->cycles.shift,
+ clock->cycles.mult);
+ write_unlock_irqrestore(&clock->lock, flags);
+ time_stamp = cycles_now + cycles_delta;
+ field_select = MLX5_MTPPS_FS_PIN_MODE |
+ MLX5_MTPPS_FS_PATTERN |
+ MLX5_MTPPS_FS_ENABLE |
+ MLX5_MTPPS_FS_TIME_STAMP;
+ } else {
+ pin = rq->perout.index;
+ field_select = MLX5_MTPPS_FS_ENABLE;
+ }
+
+ MLX5_SET(mtpps_reg, in, pin, pin);
+ MLX5_SET(mtpps_reg, in, pin_mode, pin_mode);
+ MLX5_SET(mtpps_reg, in, pattern, pattern);
+ MLX5_SET(mtpps_reg, in, enable, on);
+ MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp);
+ MLX5_SET(mtpps_reg, in, field_select, field_select);
+
+ err = mlx5_set_mtpps(mdev, in, sizeof(in));
+ if (err)
+ return err;
+
+ return mlx5_set_mtppse(mdev, pin, 0,
+ MLX5_EVENT_MODE_REPETETIVE & on);
+}
+
+static int mlx5_pps_configure(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq,
+ int on)
+{
+ struct mlx5_clock *clock =
+ container_of(ptp, struct mlx5_clock, ptp_info);
+
+ clock->pps_info.enabled = !!on;
+ return 0;
+}
+
+static int mlx5_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq,
+ int on)
+{
+ switch (rq->type) {
+ case PTP_CLK_REQ_EXTTS:
+ return mlx5_extts_configure(ptp, rq, on);
+ case PTP_CLK_REQ_PEROUT:
+ return mlx5_perout_configure(ptp, rq, on);
+ case PTP_CLK_REQ_PPS:
+ return mlx5_pps_configure(ptp, rq, on);
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static int mlx5_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ return (func == PTP_PF_PHYSYNC) ? -EOPNOTSUPP : 0;
+}
+
+static const struct ptp_clock_info mlx5_ptp_clock_info = {
+ .owner = THIS_MODULE,
+ .name = "mlx5_p2p",
+ .max_adj = 100000000,
+ .n_alarm = 0,
+ .n_ext_ts = 0,
+ .n_per_out = 0,
+ .n_pins = 0,
+ .pps = 0,
+ .adjfreq = mlx5_ptp_adjfreq,
+ .adjtime = mlx5_ptp_adjtime,
+ .gettime64 = mlx5_ptp_gettime,
+ .settime64 = mlx5_ptp_settime,
+ .enable = NULL,
+ .verify = NULL,
+};
+
+static int mlx5_init_pin_config(struct mlx5_clock *clock)
+{
+ int i;
+
+ clock->ptp_info.pin_config =
+ kzalloc(sizeof(*clock->ptp_info.pin_config) *
+ clock->ptp_info.n_pins, GFP_KERNEL);
+ if (!clock->ptp_info.pin_config)
+ return -ENOMEM;
+ clock->ptp_info.enable = mlx5_ptp_enable;
+ clock->ptp_info.verify = mlx5_ptp_verify;
+ clock->ptp_info.pps = 1;
+
+ for (i = 0; i < clock->ptp_info.n_pins; i++) {
+ snprintf(clock->ptp_info.pin_config[i].name,
+ sizeof(clock->ptp_info.pin_config[i].name),
+ "mlx5_pps%d", i);
+ clock->ptp_info.pin_config[i].index = i;
+ clock->ptp_info.pin_config[i].func = PTP_PF_NONE;
+ clock->ptp_info.pin_config[i].chan = i;
+ }
+
+ return 0;
+}
+
+static void mlx5_get_pps_caps(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_clock *clock = &mdev->clock;
+ u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
+
+ mlx5_query_mtpps(mdev, out, sizeof(out));
+
+ clock->ptp_info.n_pins = MLX5_GET(mtpps_reg, out,
+ cap_number_of_pps_pins);
+ clock->ptp_info.n_ext_ts = MLX5_GET(mtpps_reg, out,
+ cap_max_num_of_pps_in_pins);
+ clock->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out,
+ cap_max_num_of_pps_out_pins);
+
+ clock->pps_info.pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode);
+ clock->pps_info.pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode);
+ clock->pps_info.pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode);
+ clock->pps_info.pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode);
+ clock->pps_info.pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode);
+ clock->pps_info.pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode);
+ clock->pps_info.pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode);
+ clock->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
+}
+
+void mlx5_pps_event(struct mlx5_core_dev *mdev,
+ struct mlx5_eqe *eqe)
+{
+ struct mlx5_clock *clock = &mdev->clock;
+ struct ptp_clock_event ptp_event;
+ struct timespec64 ts;
+ u64 nsec_now, nsec_delta;
+ u64 cycles_now, cycles_delta;
+ int pin = eqe->data.pps.pin;
+ s64 ns;
+ unsigned long flags;
+
+ switch (clock->ptp_info.pin_config[pin].func) {
+ case PTP_PF_EXTTS:
+ if (clock->pps_info.enabled) {
+ ptp_event.type = PTP_CLOCK_PPSUSR;
+ ptp_event.pps_times.ts_real = ns_to_timespec64(eqe->data.pps.time_stamp);
+ } else {
+ ptp_event.type = PTP_CLOCK_EXTTS;
+ }
+ ptp_clock_event(clock->ptp, &ptp_event);
+ break;
+ case PTP_PF_PEROUT:
+ mlx5_ptp_gettime(&clock->ptp_info, &ts);
+ cycles_now = mlx5_read_internal_timer(mdev);
+ ts.tv_sec += 1;
+ ts.tv_nsec = 0;
+ ns = timespec64_to_ns(&ts);
+ write_lock_irqsave(&clock->lock, flags);
+ nsec_now = timecounter_cyc2time(&clock->tc, cycles_now);
+ nsec_delta = ns - nsec_now;
+ cycles_delta = div64_u64(nsec_delta << clock->cycles.shift,
+ clock->cycles.mult);
+ clock->pps_info.start[pin] = cycles_now + cycles_delta;
+ schedule_work(&clock->pps_info.out_work);
+ write_unlock_irqrestore(&clock->lock, flags);
+ break;
+ default:
+ mlx5_core_err(mdev, " Unhandled event\n");
+ }
+}
+
+void mlx5_init_clock(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_clock *clock = &mdev->clock;
+ u64 ns;
+ u64 frac = 0;
+ u32 dev_freq;
+
+ dev_freq = MLX5_CAP_GEN(mdev, device_frequency_khz);
+ if (!dev_freq) {
+ mlx5_core_warn(mdev, "invalid device_frequency_khz, aborting HW clock init\n");
+ return;
+ }
+ rwlock_init(&clock->lock);
+ clock->cycles.read = read_internal_timer;
+ clock->cycles.shift = MLX5_CYCLES_SHIFT;
+ clock->cycles.mult = clocksource_khz2mult(dev_freq,
+ clock->cycles.shift);
+ clock->nominal_c_mult = clock->cycles.mult;
+ clock->cycles.mask = CLOCKSOURCE_MASK(41);
+
+ timecounter_init(&clock->tc, &clock->cycles,
+ ktime_to_ns(ktime_get_real()));
+
+ /* Calculate period in seconds to call the overflow watchdog - to make
+ * sure counter is checked at least once every wrap around.
+ */
+ ns = cyclecounter_cyc2ns(&clock->cycles, clock->cycles.mask,
+ frac, &frac);
+ do_div(ns, NSEC_PER_SEC / 2 / HZ);
+ clock->overflow_period = ns;
+
+ INIT_WORK(&clock->pps_info.out_work, mlx5_pps_out);
+ INIT_DELAYED_WORK(&clock->overflow_work, mlx5_timestamp_overflow);
+ if (clock->overflow_period)
+ schedule_delayed_work(&clock->overflow_work, 0);
+ else
+ mlx5_core_warn(mdev, "invalid overflow period, overflow_work is not scheduled\n");
+
+ /* Configure the PHC */
+ clock->ptp_info = mlx5_ptp_clock_info;
+
+ /* Initialize 1PPS data structures */
+ if (MLX5_PPS_CAP(mdev))
+ mlx5_get_pps_caps(mdev);
+ if (clock->ptp_info.n_pins)
+ mlx5_init_pin_config(clock);
+
+ clock->ptp = ptp_clock_register(&clock->ptp_info,
+ &mdev->pdev->dev);
+ if (IS_ERR(clock->ptp)) {
+ mlx5_core_warn(mdev, "ptp_clock_register failed %ld\n",
+ PTR_ERR(clock->ptp));
+ clock->ptp = NULL;
+ }
+}
+
+void mlx5_cleanup_clock(struct mlx5_core_dev *mdev)
+{
+ struct mlx5_clock *clock = &mdev->clock;
+
+ if (!MLX5_CAP_GEN(mdev, device_frequency_khz))
+ return;
+
+ if (clock->ptp) {
+ ptp_clock_unregister(clock->ptp);
+ clock->ptp = NULL;
+ }
+
+ cancel_work_sync(&clock->pps_info.out_work);
+ cancel_delayed_work_sync(&clock->overflow_work);
+ kfree(clock->ptp_info.pin_config);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h
new file mode 100644
index 0000000..a8eeced
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (c) 2017, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __LIB_CLOCK_H__
+#define __LIB_CLOCK_H__
+
+void mlx5_init_clock(struct mlx5_core_dev *mdev);
+void mlx5_cleanup_clock(struct mlx5_core_dev *mdev);
+
+static inline ktime_t mlx5_timecounter_cyc2time(struct mlx5_clock *clock,
+ u64 timestamp)
+{
+ u64 nsec;
+
+ read_lock(&clock->lock);
+ nsec = timecounter_cyc2time(&clock->tc, timestamp);
+ read_unlock(&clock->lock);
+
+ return ns_to_ktime(nsec);
+}
+
+#endif
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 06562c9..5f32344 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -59,6 +59,7 @@
#include "lib/mlx5.h"
#include "fpga/core.h"
#include "accel/ipsec.h"
+#include "lib/clock.h"
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver");
@@ -889,6 +890,8 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
mlx5_init_reserved_gids(dev);
+ mlx5_init_clock(dev);
+
err = mlx5_init_rl_table(dev);
if (err) {
dev_err(&pdev->dev, "Failed to init rate limiting\n");
@@ -949,6 +952,7 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
mlx5_eswitch_cleanup(dev->priv.eswitch);
mlx5_mpfs_cleanup(dev);
mlx5_cleanup_rl_table(dev);
+ mlx5_cleanup_clock(dev);
mlx5_cleanup_reserved_gids(dev);
mlx5_cleanup_mkey_table(dev);
mlx5_cleanup_srq_table(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index b7c2900..ff4a0b8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -93,6 +93,7 @@ void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
unsigned long param);
void mlx5_core_page_fault(struct mlx5_core_dev *dev,
struct mlx5_pagefault *pfault);
+void mlx5_pps_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
void mlx5_port_module_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe);
void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force);
void mlx5_disable_device(struct mlx5_core_dev *dev);
@@ -121,6 +122,8 @@ int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group,
u8 access_reg_group);
int mlx5_query_mcam_reg(struct mlx5_core_dev *dev, u32 *mcap, u8 feature_group,
u8 access_reg_group);
+int mlx5_query_qcam_reg(struct mlx5_core_dev *mdev, u32 *qcam,
+ u8 feature_group, u8 access_reg_group);
void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev);
void mlx5_lag_remove(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index e07061f..c37d00c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -98,6 +98,18 @@ int mlx5_query_mcam_reg(struct mlx5_core_dev *dev, u32 *mcam, u8 feature_group,
return mlx5_core_access_reg(dev, in, sz, mcam, sz, MLX5_REG_MCAM, 0, 0);
}
+int mlx5_query_qcam_reg(struct mlx5_core_dev *mdev, u32 *qcam,
+ u8 feature_group, u8 access_reg_group)
+{
+ u32 in[MLX5_ST_SZ_DW(qcam_reg)] = {};
+ int sz = MLX5_ST_SZ_BYTES(qcam_reg);
+
+ MLX5_SET(qcam_reg, in, feature_group, feature_group);
+ MLX5_SET(qcam_reg, in, access_reg_group, access_reg_group);
+
+ return mlx5_core_access_reg(mdev, in, sz, qcam, sz, MLX5_REG_QCAM, 0, 0);
+}
+
struct mlx5_reg_pcap {
u8 rsvd0;
u8 port_num;
@@ -959,3 +971,102 @@ int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode)
return mlx5_core_access_reg(mdev, in, sizeof(in), out,
sizeof(out), MLX5_REG_MTPPSE, 0, 1);
}
+
+int mlx5_set_trust_state(struct mlx5_core_dev *mdev, u8 trust_state)
+{
+ u32 out[MLX5_ST_SZ_DW(qpts_reg)] = {};
+ u32 in[MLX5_ST_SZ_DW(qpts_reg)] = {};
+ int err;
+
+ MLX5_SET(qpts_reg, in, local_port, 1);
+ MLX5_SET(qpts_reg, in, trust_state, trust_state);
+
+ err = mlx5_core_access_reg(mdev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_QPTS, 0, 1);
+ return err;
+}
+
+int mlx5_query_trust_state(struct mlx5_core_dev *mdev, u8 *trust_state)
+{
+ u32 out[MLX5_ST_SZ_DW(qpts_reg)] = {};
+ u32 in[MLX5_ST_SZ_DW(qpts_reg)] = {};
+ int err;
+
+ MLX5_SET(qpts_reg, in, local_port, 1);
+
+ err = mlx5_core_access_reg(mdev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_QPTS, 0, 0);
+ if (!err)
+ *trust_state = MLX5_GET(qpts_reg, out, trust_state);
+
+ return err;
+}
+
+int mlx5_set_dscp2prio(struct mlx5_core_dev *mdev, u8 dscp, u8 prio)
+{
+ int sz = MLX5_ST_SZ_BYTES(qpdpm_reg);
+ void *qpdpm_dscp;
+ void *out;
+ void *in;
+ int err;
+
+ in = kzalloc(sz, GFP_KERNEL);
+ out = kzalloc(sz, GFP_KERNEL);
+ if (!in || !out) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ MLX5_SET(qpdpm_reg, in, local_port, 1);
+ err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_QPDPM, 0, 0);
+ if (err)
+ goto out;
+
+ memcpy(in, out, sz);
+ MLX5_SET(qpdpm_reg, in, local_port, 1);
+
+ /* Update the corresponding dscp entry */
+ qpdpm_dscp = MLX5_ADDR_OF(qpdpm_reg, in, dscp[dscp]);
+ MLX5_SET16(qpdpm_dscp_reg, qpdpm_dscp, prio, prio);
+ MLX5_SET16(qpdpm_dscp_reg, qpdpm_dscp, e, 1);
+ err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_QPDPM, 0, 1);
+
+out:
+ kfree(in);
+ kfree(out);
+ return err;
+}
+
+/* dscp2prio[i]: priority that dscp i mapped to */
+#define MLX5E_SUPPORTED_DSCP 64
+int mlx5_query_dscp2prio(struct mlx5_core_dev *mdev, u8 *dscp2prio)
+{
+ int sz = MLX5_ST_SZ_BYTES(qpdpm_reg);
+ void *qpdpm_dscp;
+ void *out;
+ void *in;
+ int err;
+ int i;
+
+ in = kzalloc(sz, GFP_KERNEL);
+ out = kzalloc(sz, GFP_KERNEL);
+ if (!in || !out) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ MLX5_SET(qpdpm_reg, in, local_port, 1);
+ err = mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_QPDPM, 0, 0);
+ if (err)
+ goto out;
+
+ for (i = 0; i < (MLX5E_SUPPORTED_DSCP); i++) {
+ qpdpm_dscp = MLX5_ADDR_OF(qpdpm_reg, out, dscp[i]);
+ dscp2prio[i] = MLX5_GET16(qpdpm_dscp_reg, qpdpm_dscp, prio);
+ }
+
+out:
+ kfree(in);
+ kfree(out);
+ return err;
+}
OpenPOWER on IntegriCloud