summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
authorDaniel Vetter <daniel.vetter@ffwll.ch>2016-10-12 08:22:25 +0200
committerDaniel Vetter <daniel.vetter@ffwll.ch>2016-10-12 08:22:25 +0200
commitc0c8b9ed1b0c14d91ff73624df6d918bb47c8a5e (patch)
tree7fb6f2c7c028cd4926b827d3b5937c49afefa63a /drivers/net/ethernet
parent5a920b85f2c6e3fd7d9dd9bb3f3345e9085e2360 (diff)
parent69405d3da98b48633b78a49403e4f9cdb7c6a0f5 (diff)
downloadop-kernel-dev-c0c8b9ed1b0c14d91ff73624df6d918bb47c8a5e.zip
op-kernel-dev-c0c8b9ed1b0c14d91ff73624df6d918bb47c8a5e.tar.gz
Merge tag 'drm-for-v4.9' into drm-intel-next-queued
It's been over two months, git definitely lost it's marbles. Conflicts resolved by picking our version, plus manually checking the diff with the parent in drm-intel-next-queued to make sure git didn't do anything stupid. It did, so I removed 2 occasions where it double-inserted a bit of code. The diff is now just - kernel-doc changes - drm format/name changes - display-info changes so looks all reasonable. Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c2
-rw-r--r--drivers/net/ethernet/arc/emac_main.c6
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c2
-rw-r--r--drivers/net/ethernet/atheros/alx/reg.h1
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c19
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c4
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c16
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c57
-rw-r--r--drivers/net/ethernet/cadence/macb.c23
-rw-r--r--drivers/net/ethernet/cadence/macb.h2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h1
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c11
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_reg.h1
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c5
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c20
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c64
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c25
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h15
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c9
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c8
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c20
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h3
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c3
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c34
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c6
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h1
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c3
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c66
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_client.c41
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c51
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c26
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c23
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c132
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.h22
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c85
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_common.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c101
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c112
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c63
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c68
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c46
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c59
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/port.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c184
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c165
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c70
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h4
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c13
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c1
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c7
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c20
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h11
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c241
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_hsi.h8
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c4
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_main.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h9
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c95
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c2
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c7
-rw-r--r--drivers/net/ethernet/sfc/ef10.c3
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c7
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h67
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c213
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c2
-rw-r--r--drivers/net/ethernet/synopsys/dwc_eth_qos.c55
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c2
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c8
102 files changed, 1752 insertions, 1028 deletions
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index 37a0f46..18bb955 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -793,6 +793,8 @@ int xgene_enet_phy_connect(struct net_device *ndev)
netdev_err(ndev, "Could not connect to PHY\n");
return -ENODEV;
}
+#else
+ return -ENODEV;
#endif
}
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index 4bff0f3..b0da969 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -771,8 +771,10 @@ int arc_emac_probe(struct net_device *ndev, int interface)
priv->dev = dev;
priv->regs = devm_ioremap_resource(dev, &res_regs);
- if (IS_ERR(priv->regs))
- return PTR_ERR(priv->regs);
+ if (IS_ERR(priv->regs)) {
+ err = PTR_ERR(priv->regs);
+ goto out_put_node;
+ }
dev_dbg(dev, "Registers base address is 0x%p\n", priv->regs);
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 6453148..4eb17da 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1545,6 +1545,8 @@ static const struct pci_device_id alx_pci_tbl[] = {
.driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
{ PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2400),
.driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
+ { PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_E2500),
+ .driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
{ PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8162),
.driver_data = ALX_DEV_QUIRK_MSI_INTX_DISABLE_BUG },
{ PCI_VDEVICE(ATTANSIC, ALX_DEV_ID_AR8171) },
diff --git a/drivers/net/ethernet/atheros/alx/reg.h b/drivers/net/ethernet/atheros/alx/reg.h
index 0959e68..1fc2d85 100644
--- a/drivers/net/ethernet/atheros/alx/reg.h
+++ b/drivers/net/ethernet/atheros/alx/reg.h
@@ -38,6 +38,7 @@
#define ALX_DEV_ID_AR8161 0x1091
#define ALX_DEV_ID_E2200 0xe091
#define ALX_DEV_ID_E2400 0xe0a1
+#define ALX_DEV_ID_E2500 0xe0b1
#define ALX_DEV_ID_AR8162 0x1090
#define ALX_DEV_ID_AR8171 0x10A1
#define ALX_DEV_ID_AR8172 0x10A0
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c
index 9a9745c4..625235d 100644
--- a/drivers/net/ethernet/broadcom/bgmac-bcma.c
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c
@@ -159,7 +159,7 @@ static int bgmac_probe(struct bcma_device *core)
if (!bgmac_is_bcm4707_family(core)) {
mii_bus = bcma_mdio_mii_register(core, bgmac->phyaddr);
- if (!IS_ERR(mii_bus)) {
+ if (IS_ERR(mii_bus)) {
err = PTR_ERR(mii_bus);
goto err;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 8fc3f3c..505ceaf 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -6356,10 +6356,6 @@ bnx2_open(struct net_device *dev)
struct bnx2 *bp = netdev_priv(dev);
int rc;
- rc = bnx2_request_firmware(bp);
- if (rc < 0)
- goto out;
-
netif_carrier_off(dev);
bnx2_disable_int(bp);
@@ -6428,7 +6424,6 @@ open_err:
bnx2_free_irq(bp);
bnx2_free_mem(bp);
bnx2_del_napi(bp);
- bnx2_release_firmware(bp);
goto out;
}
@@ -8575,6 +8570,12 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, dev);
+ rc = bnx2_request_firmware(bp);
+ if (rc < 0)
+ goto error;
+
+
+ bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
memcpy(dev->dev_addr, bp->mac_addr, ETH_ALEN);
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
@@ -8607,6 +8608,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
error:
+ bnx2_release_firmware(bp);
pci_iounmap(pdev, bp->regview);
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 97e8925..fa3386b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -772,6 +772,11 @@ void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
(bp->common.bc_ver & 0xff00) >> 8,
(bp->common.bc_ver & 0xff));
+ if (pci_channel_offline(bp->pdev)) {
+ BNX2X_ERR("Cannot dump MCP info while in PCI error\n");
+ return;
+ }
+
val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER);
if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER))
BNX2X_ERR("%s" "MCP PC at 0x%x\n", lvl, val);
@@ -9415,10 +9420,16 @@ unload_error:
/* Release IRQs */
bnx2x_free_irq(bp);
- /* Reset the chip */
- rc = bnx2x_reset_hw(bp, reset_code);
- if (rc)
- BNX2X_ERR("HW_RESET failed\n");
+ /* Reset the chip, unless PCI function is offline. If we reach this
+ * point following a PCI error handling, it means device is really
+ * in a bad state and we're about to remove it, so reset the chip
+ * is not a good idea.
+ */
+ if (!pci_channel_offline(bp->pdev)) {
+ rc = bnx2x_reset_hw(bp, reset_code);
+ if (rc)
+ BNX2X_ERR("HW_RESET failed\n");
+ }
/* Report UNLOAD_DONE to MCP */
bnx2x_send_unload_done(bp, keep_link);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 2cf7910..228c964 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -353,8 +353,8 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev)
push_len = (length + sizeof(*tx_push) + 7) / 8;
if (push_len > 16) {
__iowrite64_copy(txr->tx_doorbell, tx_push_buf, 16);
- __iowrite64_copy(txr->tx_doorbell + 4, tx_push_buf + 1,
- push_len - 16);
+ __iowrite32_copy(txr->tx_doorbell + 4, tx_push_buf + 1,
+ (push_len - 16) << 1);
} else {
__iowrite64_copy(txr->tx_doorbell, tx_push_buf,
push_len);
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index ff300f7..a2551bc 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -12552,10 +12552,6 @@ static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
info->data = TG3_RSS_MAX_NUM_QS;
}
- /* The first interrupt vector only
- * handles link interrupts.
- */
- info->data -= 1;
return 0;
default:
@@ -14014,7 +14010,9 @@ static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
}
if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
+ (!ec->rx_coalesce_usecs) ||
(ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
+ (!ec->tx_coalesce_usecs) ||
(ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
(ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
(ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
@@ -14025,16 +14023,6 @@ static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
(ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
return -EINVAL;
- /* No rx interrupts will be generated if both are zero */
- if ((ec->rx_coalesce_usecs == 0) &&
- (ec->rx_max_coalesced_frames == 0))
- return -EINVAL;
-
- /* No tx interrupts will be generated if both are zero */
- if ((ec->tx_coalesce_usecs == 0) &&
- (ec->tx_max_coalesced_frames == 0))
- return -EINVAL;
-
/* Only copy relevant parameters, ignore all others. */
tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index 0e4fdc3..31f61a7 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -31,15 +31,10 @@
#define BNAD_NUM_TXF_COUNTERS 12
#define BNAD_NUM_RXF_COUNTERS 10
#define BNAD_NUM_CQ_COUNTERS (3 + 5)
-#define BNAD_NUM_RXQ_COUNTERS 6
+#define BNAD_NUM_RXQ_COUNTERS 7
#define BNAD_NUM_TXQ_COUNTERS 5
-#define BNAD_ETHTOOL_STATS_NUM \
- (sizeof(struct rtnl_link_stats64) / sizeof(u64) + \
- sizeof(struct bnad_drv_stats) / sizeof(u64) + \
- offsetof(struct bfi_enet_stats, rxf_stats[0]) / sizeof(u64))
-
-static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
+static const char *bnad_net_stats_strings[] = {
"rx_packets",
"tx_packets",
"rx_bytes",
@@ -50,22 +45,10 @@ static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
"tx_dropped",
"multicast",
"collisions",
-
"rx_length_errors",
- "rx_over_errors",
"rx_crc_errors",
"rx_frame_errors",
- "rx_fifo_errors",
- "rx_missed_errors",
-
- "tx_aborted_errors",
- "tx_carrier_errors",
"tx_fifo_errors",
- "tx_heartbeat_errors",
- "tx_window_errors",
-
- "rx_compressed",
- "tx_compressed",
"netif_queue_stop",
"netif_queue_wakeup",
@@ -254,6 +237,8 @@ static const char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
"fc_tx_fid_parity_errors",
};
+#define BNAD_ETHTOOL_STATS_NUM ARRAY_SIZE(bnad_net_stats_strings)
+
static int
bnad_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
{
@@ -658,6 +643,8 @@ bnad_get_strings(struct net_device *netdev, u32 stringset, u8 *string)
string += ETH_GSTRING_LEN;
sprintf(string, "rxq%d_allocbuf_failed", q_num);
string += ETH_GSTRING_LEN;
+ sprintf(string, "rxq%d_mapbuf_failed", q_num);
+ string += ETH_GSTRING_LEN;
sprintf(string, "rxq%d_producer_index", q_num);
string += ETH_GSTRING_LEN;
sprintf(string, "rxq%d_consumer_index", q_num);
@@ -678,6 +665,9 @@ bnad_get_strings(struct net_device *netdev, u32 stringset, u8 *string)
sprintf(string, "rxq%d_allocbuf_failed",
q_num);
string += ETH_GSTRING_LEN;
+ sprintf(string, "rxq%d_mapbuf_failed",
+ q_num);
+ string += ETH_GSTRING_LEN;
sprintf(string, "rxq%d_producer_index",
q_num);
string += ETH_GSTRING_LEN;
@@ -854,9 +844,9 @@ bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats,
u64 *buf)
{
struct bnad *bnad = netdev_priv(netdev);
- int i, j, bi;
+ int i, j, bi = 0;
unsigned long flags;
- struct rtnl_link_stats64 *net_stats64;
+ struct rtnl_link_stats64 net_stats64;
u64 *stats64;
u32 bmap;
@@ -871,14 +861,25 @@ bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats,
* under the same lock
*/
spin_lock_irqsave(&bnad->bna_lock, flags);
- bi = 0;
- memset(buf, 0, stats->n_stats * sizeof(u64));
-
- net_stats64 = (struct rtnl_link_stats64 *)buf;
- bnad_netdev_qstats_fill(bnad, net_stats64);
- bnad_netdev_hwstats_fill(bnad, net_stats64);
- bi = sizeof(*net_stats64) / sizeof(u64);
+ memset(&net_stats64, 0, sizeof(net_stats64));
+ bnad_netdev_qstats_fill(bnad, &net_stats64);
+ bnad_netdev_hwstats_fill(bnad, &net_stats64);
+
+ buf[bi++] = net_stats64.rx_packets;
+ buf[bi++] = net_stats64.tx_packets;
+ buf[bi++] = net_stats64.rx_bytes;
+ buf[bi++] = net_stats64.tx_bytes;
+ buf[bi++] = net_stats64.rx_errors;
+ buf[bi++] = net_stats64.tx_errors;
+ buf[bi++] = net_stats64.rx_dropped;
+ buf[bi++] = net_stats64.tx_dropped;
+ buf[bi++] = net_stats64.multicast;
+ buf[bi++] = net_stats64.collisions;
+ buf[bi++] = net_stats64.rx_length_errors;
+ buf[bi++] = net_stats64.rx_crc_errors;
+ buf[bi++] = net_stats64.rx_frame_errors;
+ buf[bi++] = net_stats64.tx_fifo_errors;
/* Get netif_queue_stopped from stack */
bnad->stats.drv_stats.netif_queue_stopped = netif_queue_stopped(netdev);
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 89c0cfa..d954a97 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -1323,6 +1323,24 @@ dma_error:
return 0;
}
+static inline int macb_clear_csum(struct sk_buff *skb)
+{
+ /* no change for packets without checksum offloading */
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ /* make sure we can modify the header */
+ if (unlikely(skb_cow_head(skb, 0)))
+ return -1;
+
+ /* initialize checksum field
+ * This is required - at least for Zynq, which otherwise calculates
+ * wrong UDP header checksums for UDP packets with UDP data len <=2
+ */
+ *(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0;
+ return 0;
+}
+
static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
u16 queue_index = skb_get_queue_mapping(skb);
@@ -1362,6 +1380,11 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_BUSY;
}
+ if (macb_clear_csum(skb)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
/* Map socket buffer for DMA transfer */
if (!macb_tx_map(bp, queue, skb)) {
dev_kfree_skb_any(skb);
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 36893d8..b6fcf10 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -403,11 +403,11 @@
#define MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII 0x00000004
#define MACB_CAPS_NO_GIGABIT_HALF 0x00000008
#define MACB_CAPS_USRIO_DISABLED 0x00000010
+#define MACB_CAPS_JUMBO 0x00000020
#define MACB_CAPS_FIFO_MODE 0x10000000
#define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000
#define MACB_CAPS_SG_DISABLED 0x40000000
#define MACB_CAPS_MACB_IS_GEM 0x80000000
-#define MACB_CAPS_JUMBO 0x00000010
/* Bit manipulation macros */
#define MACB_BIT(name) \
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index 83025bb..e29815d 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -279,6 +279,7 @@ struct nicvf {
u8 sqs_id;
bool sqs_mode;
bool hw_tso;
+ bool t88;
/* Receive buffer alloc */
u32 rb_page_offset;
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
index 16ed203..85cc782 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -251,9 +251,14 @@ static void nic_set_tx_pkt_pad(struct nicpf *nic, int size)
int lmac;
u64 lmac_cfg;
- /* Max value that can be set is 60 */
- if (size > 60)
- size = 60;
+ /* There is a issue in HW where-in while sending GSO sized
+ * pkts as part of TSO, if pkt len falls below this size
+ * NIC will zero PAD packet and also updates IP total length.
+ * Hence set this value to lessthan min pkt size of MAC+IP+TCP
+ * headers, BGX will do the padding to transmit 64 byte pkt.
+ */
+ if (size > 52)
+ size = 52;
for (lmac = 0; lmac < (MAX_BGX_PER_CN88XX * MAX_LMAC_PER_BGX); lmac++) {
lmac_cfg = nic_reg_read(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3));
diff --git a/drivers/net/ethernet/cavium/thunder/nic_reg.h b/drivers/net/ethernet/cavium/thunder/nic_reg.h
index afb10e3..fab35a5 100644
--- a/drivers/net/ethernet/cavium/thunder/nic_reg.h
+++ b/drivers/net/ethernet/cavium/thunder/nic_reg.h
@@ -170,7 +170,6 @@
#define NIC_QSET_SQ_0_7_DOOR (0x010838)
#define NIC_QSET_SQ_0_7_STATUS (0x010840)
#define NIC_QSET_SQ_0_7_DEBUG (0x010848)
-#define NIC_QSET_SQ_0_7_CNM_CHG (0x010860)
#define NIC_QSET_SQ_0_7_STAT_0_1 (0x010900)
#define NIC_QSET_RBDR_0_1_CFG (0x010C00)
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index d2d8ef2..ad4fddb 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -382,7 +382,10 @@ static void nicvf_get_regs(struct net_device *dev,
p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DOOR, q);
p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS, q);
p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DEBUG, q);
- p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CNM_CHG, q);
+ /* Padding, was NIC_QSET_SQ_0_7_CNM_CHG, which
+ * produces bus errors when read
+ */
+ p[i++] = 0;
p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1, q);
reg_offset = NIC_QSET_SQ_0_7_STAT_0_1 | (1 << 3);
p[i++] = nicvf_queue_reg_read(nic, reg_offset, q);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index a19e73f..3240349 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -513,6 +513,7 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
struct nicvf *nic = netdev_priv(netdev);
struct snd_queue *sq;
struct sq_hdr_subdesc *hdr;
+ struct sq_hdr_subdesc *tso_sqe;
sq = &nic->qs->sq[cqe_tx->sq_idx];
@@ -527,17 +528,21 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
nicvf_check_cqe_tx_errs(nic, cq, cqe_tx);
skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr];
- /* For TSO offloaded packets only one SQE will have a valid SKB */
if (skb) {
+ /* Check for dummy descriptor used for HW TSO offload on 88xx */
+ if (hdr->dont_send) {
+ /* Get actual TSO descriptors and free them */
+ tso_sqe =
+ (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2);
+ nicvf_put_sq_desc(sq, tso_sqe->subdesc_cnt + 1);
+ }
nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
prefetch(skb);
dev_consume_skb_any(skb);
sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL;
} else {
- /* In case of HW TSO, HW sends a CQE for each segment of a TSO
- * packet instead of a single CQE for the whole TSO packet
- * transmitted. Each of this CQE points to the same SQE, so
- * avoid freeing same SQE multiple times.
+ /* In case of SW TSO on 88xx, only last segment will have
+ * a SKB attached, so just free SQEs here.
*/
if (!nic->hw_tso)
nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
@@ -1502,6 +1507,7 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct net_device *netdev;
struct nicvf *nic;
int err, qcount;
+ u16 sdevid;
err = pci_enable_device(pdev);
if (err) {
@@ -1575,6 +1581,10 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!pass1_silicon(nic->pdev))
nic->hw_tso = true;
+ pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid);
+ if (sdevid == 0xA134)
+ nic->t88 = true;
+
/* Check if this VF is in QS only mode */
if (nic->sqs_mode)
return 0;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index 0ff8e60..dda3ea3 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -938,6 +938,8 @@ static int nicvf_tso_count_subdescs(struct sk_buff *skb)
return num_edescs + sh->gso_segs;
}
+#define POST_CQE_DESC_COUNT 2
+
/* Get the number of SQ descriptors needed to xmit this skb */
static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb)
{
@@ -948,6 +950,10 @@ static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb)
return subdesc_cnt;
}
+ /* Dummy descriptors to get TSO pkt completion notification */
+ if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size)
+ subdesc_cnt += POST_CQE_DESC_COUNT;
+
if (skb_shinfo(skb)->nr_frags)
subdesc_cnt += skb_shinfo(skb)->nr_frags;
@@ -965,14 +971,21 @@ nicvf_sq_add_hdr_subdesc(struct nicvf *nic, struct snd_queue *sq, int qentry,
struct sq_hdr_subdesc *hdr;
hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
- sq->skbuff[qentry] = (u64)skb;
-
memset(hdr, 0, SND_QUEUE_DESC_SIZE);
hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
- /* Enable notification via CQE after processing SQE */
- hdr->post_cqe = 1;
- /* No of subdescriptors following this */
- hdr->subdesc_cnt = subdesc_cnt;
+
+ if (nic->t88 && nic->hw_tso && skb_shinfo(skb)->gso_size) {
+ /* post_cqe = 0, to avoid HW posting a CQE for every TSO
+ * segment transmitted on 88xx.
+ */
+ hdr->subdesc_cnt = subdesc_cnt - POST_CQE_DESC_COUNT;
+ } else {
+ sq->skbuff[qentry] = (u64)skb;
+ /* Enable notification via CQE after processing SQE */
+ hdr->post_cqe = 1;
+ /* No of subdescriptors following this */
+ hdr->subdesc_cnt = subdesc_cnt;
+ }
hdr->tot_len = len;
/* Offload checksum calculation to HW */
@@ -1023,6 +1036,37 @@ static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
gather->addr = data;
}
+/* Add HDR + IMMEDIATE subdescriptors right after descriptors of a TSO
+ * packet so that a CQE is posted as a notifation for transmission of
+ * TSO packet.
+ */
+static inline void nicvf_sq_add_cqe_subdesc(struct snd_queue *sq, int qentry,
+ int tso_sqe, struct sk_buff *skb)
+{
+ struct sq_imm_subdesc *imm;
+ struct sq_hdr_subdesc *hdr;
+
+ sq->skbuff[qentry] = (u64)skb;
+
+ hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
+ memset(hdr, 0, SND_QUEUE_DESC_SIZE);
+ hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
+ /* Enable notification via CQE after processing SQE */
+ hdr->post_cqe = 1;
+ /* There is no packet to transmit here */
+ hdr->dont_send = 1;
+ hdr->subdesc_cnt = POST_CQE_DESC_COUNT - 1;
+ hdr->tot_len = 1;
+ /* Actual TSO header SQE index, needed for cleanup */
+ hdr->rsvd2 = tso_sqe;
+
+ qentry = nicvf_get_nxt_sqentry(sq, qentry);
+ imm = (struct sq_imm_subdesc *)GET_SQ_DESC(sq, qentry);
+ memset(imm, 0, SND_QUEUE_DESC_SIZE);
+ imm->subdesc_type = SQ_DESC_TYPE_IMMEDIATE;
+ imm->len = 1;
+}
+
/* Segment a TSO packet into 'gso_size' segments and append
* them to SQ for transfer
*/
@@ -1096,7 +1140,7 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
{
int i, size;
- int subdesc_cnt;
+ int subdesc_cnt, tso_sqe = 0;
int sq_num, qentry;
struct queue_set *qs;
struct snd_queue *sq;
@@ -1131,6 +1175,7 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
/* Add SQ header subdesc */
nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1,
skb, skb->len);
+ tso_sqe = qentry;
/* Add SQ gather subdescs */
qentry = nicvf_get_nxt_sqentry(sq, qentry);
@@ -1154,6 +1199,11 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
}
doorbell:
+ if (nic->t88 && skb_shinfo(skb)->gso_size) {
+ qentry = nicvf_get_nxt_sqentry(sq, qentry);
+ nicvf_sq_add_cqe_subdesc(sq, qentry, tso_sqe, skb);
+ }
+
/* make sure all memory stores are done before ringing doorbell */
smp_wmb();
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 2e2aa9f..edd2338 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -419,8 +419,8 @@ struct link_config {
unsigned short supported; /* link capabilities */
unsigned short advertising; /* advertised capabilities */
unsigned short lp_advertising; /* peer advertised capabilities */
- unsigned short requested_speed; /* speed user has requested */
- unsigned short speed; /* actual link speed */
+ unsigned int requested_speed; /* speed user has requested */
+ unsigned int speed; /* actual link speed */
unsigned char requested_fc; /* flow control user has requested */
unsigned char fc; /* actual link flow control */
unsigned char autoneg; /* autonegotiating? */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index c45de49..3ceafb55 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -4305,10 +4305,17 @@ static const struct pci_error_handlers cxgb4_eeh = {
.resume = eeh_resume,
};
+/* Return true if the Link Configuration supports "High Speeds" (those greater
+ * than 1Gb/s).
+ */
static inline bool is_x_10g_port(const struct link_config *lc)
{
- return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
- (lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
+ unsigned int speeds, high_speeds;
+
+ speeds = FW_PORT_CAP_SPEED_V(FW_PORT_CAP_SPEED_G(lc->supported));
+ high_speeds = speeds & ~(FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G);
+
+ return high_speeds != 0;
}
static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
@@ -4335,6 +4342,11 @@ static void cfg_queues(struct adapter *adap)
#endif
int ciq_size;
+ /* Reduce memory usage in kdump environment, disable all offload.
+ */
+ if (is_kdump_kernel())
+ adap->params.offload = 0;
+
for_each_port(adap, i)
n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
#ifdef CONFIG_CHELSIO_T4_DCB
@@ -4365,11 +4377,6 @@ static void cfg_queues(struct adapter *adap)
if (q10g > netif_get_num_default_rss_queues())
q10g = netif_get_num_default_rss_queues();
- /* Reduce memory usage in kdump environment, disable all offload.
- */
- if (is_kdump_kernel())
- adap->params.offload = 0;
-
for_each_port(adap, i) {
struct port_info *pi = adap2pinfo(adap, i);
@@ -4756,8 +4763,12 @@ static void print_port_info(const struct net_device *dev)
bufp += sprintf(bufp, "1000/");
if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
bufp += sprintf(bufp, "10G/");
+ if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_25G)
+ bufp += sprintf(bufp, "25G/");
if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
bufp += sprintf(bufp, "40G/");
+ if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100G)
+ bufp += sprintf(bufp, "100G/");
if (bufp != buf)
--bufp;
sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index dc92c80..660204b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -3627,7 +3627,8 @@ void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
}
#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
- FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
+ FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_25G | \
+ FW_PORT_CAP_SPEED_40G | FW_PORT_CAP_SPEED_100G | \
FW_PORT_CAP_ANEG)
/**
@@ -7196,8 +7197,12 @@ void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
speed = 1000;
else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
speed = 10000;
+ else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G))
+ speed = 25000;
else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
speed = 40000;
+ else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G))
+ speed = 100000;
lc = &pi->link_cfg;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index a89b307..30507d4 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -2265,6 +2265,12 @@ enum fw_port_cap {
FW_PORT_CAP_802_3_ASM_DIR = 0x8000,
};
+#define FW_PORT_CAP_SPEED_S 0
+#define FW_PORT_CAP_SPEED_M 0x3f
+#define FW_PORT_CAP_SPEED_V(x) ((x) << FW_PORT_CAP_SPEED_S)
+#define FW_PORT_CAP_SPEED_G(x) \
+ (((x) >> FW_PORT_CAP_SPEED_S) & FW_PORT_CAP_SPEED_M)
+
enum fw_port_mdi {
FW_PORT_CAP_MDI_UNCHANGED,
FW_PORT_CAP_MDI_AUTO,
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
index 8ee5414..17a2bbc 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
@@ -108,8 +108,8 @@ struct link_config {
unsigned int supported; /* link capabilities */
unsigned int advertising; /* advertised capabilities */
unsigned short lp_advertising; /* peer advertised capabilities */
- unsigned short requested_speed; /* speed user has requested */
- unsigned short speed; /* actual link speed */
+ unsigned int requested_speed; /* speed user has requested */
+ unsigned int speed; /* actual link speed */
unsigned char requested_fc; /* flow control user has requested */
unsigned char fc; /* actual link flow control */
unsigned char autoneg; /* autonegotiating? */
@@ -271,10 +271,17 @@ static inline bool is_10g_port(const struct link_config *lc)
return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
}
+/* Return true if the Link Configuration supports "High Speeds" (those greater
+ * than 1Gb/s).
+ */
static inline bool is_x_10g_port(const struct link_config *lc)
{
- return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
- (lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
+ unsigned int speeds, high_speeds;
+
+ speeds = FW_PORT_CAP_SPEED_V(FW_PORT_CAP_SPEED_G(lc->supported));
+ high_speeds = speeds & ~(FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G);
+
+ return high_speeds != 0;
}
static inline unsigned int core_ticks_per_usec(const struct adapter *adapter)
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 427bfa7..b5622b1 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -314,8 +314,9 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
}
#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
- FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
- FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG)
+ FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_25G | \
+ FW_PORT_CAP_SPEED_40G | FW_PORT_CAP_SPEED_100G | \
+ FW_PORT_CAP_ANEG)
/**
* init_link_config - initialize a link's SW state
@@ -1712,8 +1713,12 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
speed = 1000;
else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
speed = 10000;
+ else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G))
+ speed = 25000;
else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
speed = 40000;
+ else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G))
+ speed = 100000;
/*
* Scan all of our "ports" (Virtual Interfaces) looking for
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 1471e16..f45385f 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1299,6 +1299,7 @@ static int
dm9000_open(struct net_device *dev)
{
struct board_info *db = netdev_priv(dev);
+ unsigned int irq_flags = irq_get_trigger_type(dev->irq);
if (netif_msg_ifup(db))
dev_dbg(db->dev, "enabling %s\n", dev->name);
@@ -1306,9 +1307,11 @@ dm9000_open(struct net_device *dev)
/* If there is no IRQ type specified, tell the user that this is a
* problem
*/
- if (irq_get_trigger_type(dev->irq) == IRQF_TRIGGER_NONE)
+ if (irq_flags == IRQF_TRIGGER_NONE)
dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n");
+ irq_flags |= IRQF_SHARED;
+
/* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */
iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
mdelay(1); /* delay needs by DM9000B */
@@ -1316,8 +1319,7 @@ dm9000_open(struct net_device *dev)
/* Initialize DM9000 board */
dm9000_init_dm9000(dev);
- if (request_irq(dev->irq, dm9000_interrupt, IRQF_SHARED,
- dev->name, dev))
+ if (request_irq(dev->irq, dm9000_interrupt, irq_flags, dev->name, dev))
return -EAGAIN;
/* Now that we have an interrupt handler hooked up we can unmask
* our interrupts
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index d20935d..4b4f5bc 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2922,17 +2922,25 @@ static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
{
unsigned int size = lstatus & BD_LENGTH_MASK;
struct page *page = rxb->page;
+ bool last = !!(lstatus & BD_LFLAG(RXBD_LAST));
/* Remove the FCS from the packet length */
- if (likely(lstatus & BD_LFLAG(RXBD_LAST)))
+ if (last)
size -= ETH_FCS_LEN;
- if (likely(first))
+ if (likely(first)) {
skb_put(skb, size);
- else
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
- rxb->page_offset + RXBUF_ALIGNMENT,
- size, GFAR_RXB_TRUESIZE);
+ } else {
+ /* the last fragments' length contains the full frame length */
+ if (last)
+ size -= skb->len;
+
+ /* in case the last fragment consisted only of the FCS */
+ if (size > 0)
+ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
+ rxb->page_offset + RXBUF_ALIGNMENT,
+ size, GFAR_RXB_TRUESIZE);
+ }
/* try reuse page */
if (unlikely(page_count(page) != 1))
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 373fd09..6e8a9c8 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -100,7 +100,8 @@ extern const char gfar_driver_version[];
#define DEFAULT_RX_LFC_THR 16
#define DEFAULT_LFC_PTVVAL 4
-#define GFAR_RXB_SIZE 1536
+/* prevent fragmenation by HW in DSA environments */
+#define GFAR_RXB_SIZE roundup(1536 + 8, 64)
#define GFAR_SKBFRAG_SIZE (RXBUF_ALIGNMENT + GFAR_RXB_SIZE \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
#define GFAR_RXB_TRUESIZE 2048
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
index 1235c7f..1e1eb92 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_gmac.c
@@ -17,7 +17,7 @@ static const struct mac_stats_string g_gmac_stats_string[] = {
{"gmac_rx_octets_total_ok", MAC_STATS_FIELD_OFF(rx_good_bytes)},
{"gmac_rx_octets_bad", MAC_STATS_FIELD_OFF(rx_bad_bytes)},
{"gmac_rx_uc_pkts", MAC_STATS_FIELD_OFF(rx_uc_pkts)},
- {"gamc_rx_mc_pkts", MAC_STATS_FIELD_OFF(rx_mc_pkts)},
+ {"gmac_rx_mc_pkts", MAC_STATS_FIELD_OFF(rx_mc_pkts)},
{"gmac_rx_bc_pkts", MAC_STATS_FIELD_OFF(rx_bc_pkts)},
{"gmac_rx_pkts_64octets", MAC_STATS_FIELD_OFF(rx_64bytes)},
{"gmac_rx_pkts_65to127", MAC_STATS_FIELD_OFF(rx_65to127)},
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
index ff8b6a4..6ea8722 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_ppe.c
@@ -328,9 +328,10 @@ static void hns_ppe_init_hw(struct hns_ppe_cb *ppe_cb)
static void hns_ppe_uninit_hw(struct hns_ppe_cb *ppe_cb)
{
u32 port;
- struct dsaf_device *dsaf_dev = ppe_cb->ppe_common_cb->dsaf_dev;
if (ppe_cb->ppe_common_cb) {
+ struct dsaf_device *dsaf_dev = ppe_cb->ppe_common_cb->dsaf_dev;
+
port = ppe_cb->index;
dsaf_dev->misc_op->ppe_srst(dsaf_dev, port, 0);
}
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index 4c9771d..7af09cb 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -977,7 +977,37 @@ static void emac_set_multicast_list(struct net_device *ndev)
dev->mcast_pending = 1;
return;
}
+
+ mutex_lock(&dev->link_lock);
__emac_set_multicast_list(dev);
+ mutex_unlock(&dev->link_lock);
+}
+
+static int emac_set_mac_address(struct net_device *ndev, void *sa)
+{
+ struct emac_instance *dev = netdev_priv(ndev);
+ struct sockaddr *addr = sa;
+ struct emac_regs __iomem *p = dev->emacp;
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ mutex_lock(&dev->link_lock);
+
+ memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
+
+ emac_rx_disable(dev);
+ emac_tx_disable(dev);
+ out_be32(&p->iahr, (ndev->dev_addr[0] << 8) | ndev->dev_addr[1]);
+ out_be32(&p->ialr, (ndev->dev_addr[2] << 24) |
+ (ndev->dev_addr[3] << 16) | (ndev->dev_addr[4] << 8) |
+ ndev->dev_addr[5]);
+ emac_tx_enable(dev);
+ emac_rx_enable(dev);
+
+ mutex_unlock(&dev->link_lock);
+
+ return 0;
}
static int emac_resize_rx_ring(struct emac_instance *dev, int new_mtu)
@@ -2686,7 +2716,7 @@ static const struct net_device_ops emac_netdev_ops = {
.ndo_do_ioctl = emac_ioctl,
.ndo_tx_timeout = emac_tx_timeout,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = emac_set_mac_address,
.ndo_start_xmit = emac_start_xmit,
.ndo_change_mtu = eth_change_mtu,
};
@@ -2699,7 +2729,7 @@ static const struct net_device_ops emac_gige_netdev_ops = {
.ndo_do_ioctl = emac_ioctl,
.ndo_tx_timeout = emac_tx_timeout,
.ndo_validate_addr = eth_validate_addr,
- .ndo_set_mac_address = eth_mac_addr,
+ .ndo_set_mac_address = emac_set_mac_address,
.ndo_start_xmit = emac_start_xmit_sg,
.ndo_change_mtu = emac_change_mtu,
};
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index 7fd4d54..6b03c85 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -2032,7 +2032,8 @@ const struct e1000_info e1000_82574_info = {
| FLAG2_DISABLE_ASPM_L0S
| FLAG2_DISABLE_ASPM_L1
| FLAG2_NO_DISABLE_RX
- | FLAG2_DMA_BURST,
+ | FLAG2_DMA_BURST
+ | FLAG2_CHECK_SYSTIM_OVERFLOW,
.pba = 32,
.max_hw_frame_size = DEFAULT_JUMBO,
.get_variants = e1000_get_variants_82571,
@@ -2053,7 +2054,8 @@ const struct e1000_info e1000_82583_info = {
| FLAG_HAS_CTRLEXT_ON_LOAD,
.flags2 = FLAG2_DISABLE_ASPM_L0S
| FLAG2_DISABLE_ASPM_L1
- | FLAG2_NO_DISABLE_RX,
+ | FLAG2_NO_DISABLE_RX
+ | FLAG2_CHECK_SYSTIM_OVERFLOW,
.pba = 32,
.max_hw_frame_size = DEFAULT_JUMBO,
.get_variants = e1000_get_variants_82571,
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index ef96cd1..879cca4 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -452,6 +452,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca);
#define FLAG2_PCIM2PCI_ARBITER_WA BIT(11)
#define FLAG2_DFLT_CRC_STRIPPING BIT(12)
#define FLAG2_CHECK_RX_HWTSTAMP BIT(13)
+#define FLAG2_CHECK_SYSTIM_OVERFLOW BIT(14)
#define E1000_RX_DESC_PS(R, i) \
(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index 3e11322..f3aaca7 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -5885,7 +5885,8 @@ const struct e1000_info e1000_pch_lpt_info = {
| FLAG_HAS_JUMBO_FRAMES
| FLAG_APME_IN_WUC,
.flags2 = FLAG2_HAS_PHY_STATS
- | FLAG2_HAS_EEE,
+ | FLAG2_HAS_EEE
+ | FLAG2_CHECK_SYSTIM_OVERFLOW,
.pba = 26,
.max_hw_frame_size = 9022,
.get_variants = e1000_get_variants_ich8lan,
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 02f4439..7017281 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -4303,6 +4303,42 @@ void e1000e_reinit_locked(struct e1000_adapter *adapter)
}
/**
+ * e1000e_sanitize_systim - sanitize raw cycle counter reads
+ * @hw: pointer to the HW structure
+ * @systim: cycle_t value read, sanitized and returned
+ *
+ * Errata for 82574/82583 possible bad bits read from SYSTIMH/L:
+ * check to see that the time is incrementing at a reasonable
+ * rate and is a multiple of incvalue.
+ **/
+static cycle_t e1000e_sanitize_systim(struct e1000_hw *hw, cycle_t systim)
+{
+ u64 time_delta, rem, temp;
+ cycle_t systim_next;
+ u32 incvalue;
+ int i;
+
+ incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK;
+ for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) {
+ /* latch SYSTIMH on read of SYSTIML */
+ systim_next = (cycle_t)er32(SYSTIML);
+ systim_next |= (cycle_t)er32(SYSTIMH) << 32;
+
+ time_delta = systim_next - systim;
+ temp = time_delta;
+ /* VMWare users have seen incvalue of zero, don't div / 0 */
+ rem = incvalue ? do_div(temp, incvalue) : (time_delta != 0);
+
+ systim = systim_next;
+
+ if ((time_delta < E1000_82574_SYSTIM_EPSILON) && (rem == 0))
+ break;
+ }
+
+ return systim;
+}
+
+/**
* e1000e_cyclecounter_read - read raw cycle counter (used by time counter)
* @cc: cyclecounter structure
**/
@@ -4312,7 +4348,7 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc)
cc);
struct e1000_hw *hw = &adapter->hw;
u32 systimel, systimeh;
- cycle_t systim, systim_next;
+ cycle_t systim;
/* SYSTIMH latching upon SYSTIML read does not work well.
* This means that if SYSTIML overflows after we read it but before
* we read SYSTIMH, the value of SYSTIMH has been incremented and we
@@ -4335,33 +4371,9 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc)
systim = (cycle_t)systimel;
systim |= (cycle_t)systimeh << 32;
- if ((hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82583)) {
- u64 time_delta, rem, temp;
- u32 incvalue;
- int i;
-
- /* errata for 82574/82583 possible bad bits read from SYSTIMH/L
- * check to see that the time is incrementing at a reasonable
- * rate and is a multiple of incvalue
- */
- incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK;
- for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) {
- /* latch SYSTIMH on read of SYSTIML */
- systim_next = (cycle_t)er32(SYSTIML);
- systim_next |= (cycle_t)er32(SYSTIMH) << 32;
-
- time_delta = systim_next - systim;
- temp = time_delta;
- /* VMWare users have seen incvalue of zero, don't div / 0 */
- rem = incvalue ? do_div(temp, incvalue) : (time_delta != 0);
-
- systim = systim_next;
+ if (adapter->flags2 & FLAG2_CHECK_SYSTIM_OVERFLOW)
+ systim = e1000e_sanitize_systim(hw, systim);
- if ((time_delta < E1000_82574_SYSTIM_EPSILON) &&
- (rem == 0))
- break;
- }
- }
return systim;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c
index e1370c5..618f184 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_client.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_client.c
@@ -199,6 +199,7 @@ void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi)
void i40e_notify_client_of_netdev_open(struct i40e_vsi *vsi)
{
struct i40e_client_instance *cdev;
+ int ret = 0;
if (!vsi)
return;
@@ -211,7 +212,14 @@ void i40e_notify_client_of_netdev_open(struct i40e_vsi *vsi)
"Cannot locate client instance open routine\n");
continue;
}
- cdev->client->ops->open(&cdev->lan_info, cdev->client);
+ if (!(test_bit(__I40E_CLIENT_INSTANCE_OPENED,
+ &cdev->state))) {
+ ret = cdev->client->ops->open(&cdev->lan_info,
+ cdev->client);
+ if (!ret)
+ set_bit(__I40E_CLIENT_INSTANCE_OPENED,
+ &cdev->state);
+ }
}
}
mutex_unlock(&i40e_client_instance_mutex);
@@ -407,12 +415,14 @@ struct i40e_vsi *i40e_vsi_lookup(struct i40e_pf *pf,
* i40e_client_add_instance - add a client instance struct to the instance list
* @pf: pointer to the board struct
* @client: pointer to a client struct in the client list.
+ * @existing: if there was already an existing instance
*
- * Returns cdev ptr on success, NULL on failure
+ * Returns cdev ptr on success or if already exists, NULL on failure
**/
static
struct i40e_client_instance *i40e_client_add_instance(struct i40e_pf *pf,
- struct i40e_client *client)
+ struct i40e_client *client,
+ bool *existing)
{
struct i40e_client_instance *cdev;
struct netdev_hw_addr *mac = NULL;
@@ -421,7 +431,7 @@ struct i40e_client_instance *i40e_client_add_instance(struct i40e_pf *pf,
mutex_lock(&i40e_client_instance_mutex);
list_for_each_entry(cdev, &i40e_client_instances, list) {
if ((cdev->lan_info.pf == pf) && (cdev->client == client)) {
- cdev = NULL;
+ *existing = true;
goto out;
}
}
@@ -505,6 +515,7 @@ void i40e_client_subtask(struct i40e_pf *pf)
{
struct i40e_client_instance *cdev;
struct i40e_client *client;
+ bool existing = false;
int ret = 0;
if (!(pf->flags & I40E_FLAG_SERVICE_CLIENT_REQUESTED))
@@ -528,18 +539,25 @@ void i40e_client_subtask(struct i40e_pf *pf)
/* check if L2 VSI is up, if not we are not ready */
if (test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state))
continue;
+ } else {
+ dev_warn(&pf->pdev->dev, "This client %s is being instanciated at probe\n",
+ client->name);
}
/* Add the client instance to the instance list */
- cdev = i40e_client_add_instance(pf, client);
+ cdev = i40e_client_add_instance(pf, client, &existing);
if (!cdev)
continue;
- /* Also up the ref_cnt of no. of instances of this client */
- atomic_inc(&client->ref_cnt);
- dev_info(&pf->pdev->dev, "Added instance of Client %s to PF%d bus=0x%02x func=0x%02x\n",
- client->name, pf->hw.pf_id,
- pf->hw.bus.device, pf->hw.bus.func);
+ if (!existing) {
+ /* Also up the ref_cnt for no. of instances of this
+ * client.
+ */
+ atomic_inc(&client->ref_cnt);
+ dev_info(&pf->pdev->dev, "Added instance of Client %s to PF%d bus=0x%02x func=0x%02x\n",
+ client->name, pf->hw.pf_id,
+ pf->hw.bus.device, pf->hw.bus.func);
+ }
/* Send an Open request to the client */
atomic_inc(&cdev->ref_cnt);
@@ -588,7 +606,8 @@ int i40e_lan_add_device(struct i40e_pf *pf)
pf->hw.pf_id, pf->hw.bus.device, pf->hw.bus.func);
/* Since in some cases register may have happened before a device gets
- * added, we can schedule a subtask to go initiate the clients.
+ * added, we can schedule a subtask to go initiate the clients if
+ * they can be launched at probe time.
*/
pf->flags |= I40E_FLAG_SERVICE_CLIENT_REQUESTED;
i40e_service_event_schedule(pf);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 81c99e1..d0b3a1b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -4554,23 +4554,38 @@ static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
**/
static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
{
+ int i, tc_unused = 0;
u8 num_tc = 0;
- int i;
+ u8 ret = 0;
/* Scan the ETS Config Priority Table to find
* traffic class enabled for a given priority
- * and use the traffic class index to get the
- * number of traffic classes enabled
+ * and create a bitmask of enabled TCs
*/
- for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
- if (dcbcfg->etscfg.prioritytable[i] > num_tc)
- num_tc = dcbcfg->etscfg.prioritytable[i];
- }
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++)
+ num_tc |= BIT(dcbcfg->etscfg.prioritytable[i]);
- /* Traffic class index starts from zero so
- * increment to return the actual count
+ /* Now scan the bitmask to check for
+ * contiguous TCs starting with TC0
*/
- return num_tc + 1;
+ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
+ if (num_tc & BIT(i)) {
+ if (!tc_unused) {
+ ret++;
+ } else {
+ pr_err("Non-contiguous TC - Disabling DCB\n");
+ return 1;
+ }
+ } else {
+ tc_unused = 1;
+ }
+ }
+
+ /* There is always at least TC0 */
+ if (!ret)
+ ret = 1;
+
+ return ret;
}
/**
@@ -5098,9 +5113,13 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
DCB_CAP_DCBX_VER_IEEE;
pf->flags |= I40E_FLAG_DCB_CAPABLE;
- /* Enable DCB tagging only when more than one TC */
+ /* Enable DCB tagging only when more than one TC
+ * or explicitly disable if only one TC
+ */
if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
pf->flags |= I40E_FLAG_DCB_ENABLED;
+ else
+ pf->flags &= ~I40E_FLAG_DCB_ENABLED;
dev_dbg(&pf->pdev->dev,
"DCBX offload is supported for this PF.\n");
}
@@ -5416,7 +5435,6 @@ int i40e_open(struct net_device *netdev)
wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
udp_tunnel_get_rx_info(netdev);
- i40e_notify_client_of_netdev_open(vsi);
return 0;
}
@@ -5702,7 +5720,7 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
u8 type;
/* Not DCB capable or capability disabled */
- if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
+ if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
return ret;
/* Ignore if event is not for Nearest Bridge */
@@ -7882,6 +7900,7 @@ static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
#endif
I40E_FLAG_RSS_ENABLED |
I40E_FLAG_DCB_CAPABLE |
+ I40E_FLAG_DCB_ENABLED |
I40E_FLAG_SRIOV_ENABLED |
I40E_FLAG_FD_SB_ENABLED |
I40E_FLAG_FD_ATR_ENABLED |
@@ -10488,6 +10507,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
I40E_FLAG_FD_SB_ENABLED |
I40E_FLAG_FD_ATR_ENABLED |
I40E_FLAG_DCB_CAPABLE |
+ I40E_FLAG_DCB_ENABLED |
I40E_FLAG_SRIOV_ENABLED |
I40E_FLAG_VMDQ_ENABLED);
} else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
@@ -10511,7 +10531,8 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
/* Not enough queues for all TCs */
if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
(queues_left < I40E_MAX_TRAFFIC_CLASS)) {
- pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
+ pf->flags &= ~(I40E_FLAG_DCB_CAPABLE |
+ I40E_FLAG_DCB_ENABLED);
dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
}
pf->num_lan_qps = max_t(int, pf->rss_size_max,
@@ -10908,7 +10929,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = i40e_init_pf_dcb(pf);
if (err) {
dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
- pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
+ pf->flags &= ~(I40E_FLAG_DCB_CAPABLE & I40E_FLAG_DCB_ENABLED);
/* Continue without DCB enabled */
}
#endif /* CONFIG_I40E_DCB */
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index e61b647..336c103 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -744,7 +744,8 @@ static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
}
}
- shhwtstamps.hwtstamp = ktime_sub_ns(shhwtstamps.hwtstamp, adjust);
+ shhwtstamps.hwtstamp =
+ ktime_add_ns(shhwtstamps.hwtstamp, adjust);
skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps);
dev_kfree_skb_any(adapter->ptp_tx_skb);
@@ -767,13 +768,32 @@ void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
struct sk_buff *skb)
{
__le64 *regval = (__le64 *)va;
+ struct igb_adapter *adapter = q_vector->adapter;
+ int adjust = 0;
/* The timestamp is recorded in little endian format.
* DWORD: 0 1 2 3
* Field: Reserved Reserved SYSTIML SYSTIMH
*/
- igb_ptp_systim_to_hwtstamp(q_vector->adapter, skb_hwtstamps(skb),
+ igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb),
le64_to_cpu(regval[1]));
+
+ /* adjust timestamp for the RX latency based on link speed */
+ if (adapter->hw.mac.type == e1000_i210) {
+ switch (adapter->link_speed) {
+ case SPEED_10:
+ adjust = IGB_I210_RX_LATENCY_10;
+ break;
+ case SPEED_100:
+ adjust = IGB_I210_RX_LATENCY_100;
+ break;
+ case SPEED_1000:
+ adjust = IGB_I210_RX_LATENCY_1000;
+ break;
+ }
+ }
+ skb_hwtstamps(skb)->hwtstamp =
+ ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust);
}
/**
@@ -825,7 +845,7 @@ void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
}
}
skb_hwtstamps(skb)->hwtstamp =
- ktime_add_ns(skb_hwtstamps(skb)->hwtstamp, adjust);
+ ktime_sub_ns(skb_hwtstamps(skb)->hwtstamp, adjust);
/* Update the last_rx_timestamp timer in order to enable watchdog check
* for error case of latched timestamp on a dropped packet.
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index b4217f3..c47b605 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -2958,8 +2958,10 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
}
/* was that the last pool using this rar? */
- if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
+ if (mpsar_lo == 0 && mpsar_hi == 0 &&
+ rar != 0 && rar != hw->mac.san_mac_rar_index)
hw->mac.ops.clear_rar(hw, rar);
+
return 0;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 5418c69a..b4f0374 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -4100,6 +4100,8 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
u32 vlnctrl, i;
+ vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
@@ -4112,8 +4114,7 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
/* fall through */
case ixgbe_mac_82598EB:
/* legacy case, we can just disable VLAN filtering */
- vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
- vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
+ vlnctrl &= ~IXGBE_VLNCTRL_VFE;
IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
return;
}
@@ -4125,6 +4126,10 @@ static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter)
/* Set flag so we don't redo unnecessary work */
adapter->flags2 |= IXGBE_FLAG2_VLAN_PROMISC;
+ /* For VMDq and SR-IOV we must leave VLAN filtering enabled */
+ vlnctrl |= IXGBE_VLNCTRL_VFE;
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+
/* Add PF to all active pools */
for (i = IXGBE_VLVF_ENTRIES; --i;) {
u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32);
@@ -4191,6 +4196,11 @@ static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw;
u32 vlnctrl, i;
+ /* Set VLAN filtering to enabled */
+ vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
+ vlnctrl |= IXGBE_VLNCTRL_VFE;
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
+
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
@@ -4202,10 +4212,6 @@ static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter)
break;
/* fall through */
case ixgbe_mac_82598EB:
- vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
- vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
- vlnctrl |= IXGBE_VLNCTRL_VFE;
- IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
return;
}
@@ -8390,12 +8396,14 @@ static int parse_tc_actions(struct ixgbe_adapter *adapter,
struct tcf_exts *exts, u64 *action, u8 *queue)
{
const struct tc_action *a;
+ LIST_HEAD(actions);
int err;
if (tc_no_actions(exts))
return -EINVAL;
- tc_for_each_action(a, exts) {
+ tcf_exts_to_list(exts, &actions);
+ list_for_each_entry(a, &actions, list) {
/* Drop action */
if (is_tcf_gact_shot(a)) {
@@ -9517,6 +9525,7 @@ skip_sriov:
/* copy netdev features into list of user selectable features */
netdev->hw_features |= netdev->features |
+ NETIF_F_HW_VLAN_CTAG_FILTER |
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_RXALL |
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index b57ae3a..3743af8 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -50,6 +50,10 @@ static const struct mtk_ethtool_stats {
MTK_ETHTOOL_STAT(rx_flow_control_packets),
};
+static const char * const mtk_clks_source_name[] = {
+ "ethif", "esw", "gp1", "gp2"
+};
+
void mtk_w32(struct mtk_eth *eth, u32 val, unsigned reg)
{
__raw_writel(val, eth->base + reg);
@@ -245,12 +249,16 @@ static int mtk_phy_connect(struct mtk_mac *mac)
case PHY_INTERFACE_MODE_MII:
ge_mode = 1;
break;
- case PHY_INTERFACE_MODE_RMII:
+ case PHY_INTERFACE_MODE_REVMII:
ge_mode = 2;
break;
+ case PHY_INTERFACE_MODE_RMII:
+ if (!mac->id)
+ goto err_phy;
+ ge_mode = 3;
+ break;
default:
- dev_err(eth->dev, "invalid phy_mode\n");
- return -1;
+ goto err_phy;
}
/* put the gmac into the right mode */
@@ -263,19 +271,31 @@ static int mtk_phy_connect(struct mtk_mac *mac)
mac->phy_dev->autoneg = AUTONEG_ENABLE;
mac->phy_dev->speed = 0;
mac->phy_dev->duplex = 0;
+
+ if (of_phy_is_fixed_link(mac->of_node))
+ mac->phy_dev->supported |=
+ SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+
mac->phy_dev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause |
SUPPORTED_Asym_Pause;
mac->phy_dev->advertising = mac->phy_dev->supported |
ADVERTISED_Autoneg;
phy_start_aneg(mac->phy_dev);
+ of_node_put(np);
+
return 0;
+
+err_phy:
+ of_node_put(np);
+ dev_err(eth->dev, "invalid phy_mode\n");
+ return -EINVAL;
}
static int mtk_mdio_init(struct mtk_eth *eth)
{
struct device_node *mii_np;
- int err;
+ int ret;
mii_np = of_get_child_by_name(eth->dev->of_node, "mdio-bus");
if (!mii_np) {
@@ -284,13 +304,13 @@ static int mtk_mdio_init(struct mtk_eth *eth)
}
if (!of_device_is_available(mii_np)) {
- err = 0;
+ ret = -ENODEV;
goto err_put_node;
}
- eth->mii_bus = mdiobus_alloc();
+ eth->mii_bus = devm_mdiobus_alloc(eth->dev);
if (!eth->mii_bus) {
- err = -ENOMEM;
+ ret = -ENOMEM;
goto err_put_node;
}
@@ -301,19 +321,11 @@ static int mtk_mdio_init(struct mtk_eth *eth)
eth->mii_bus->parent = eth->dev;
snprintf(eth->mii_bus->id, MII_BUS_ID_SIZE, "%s", mii_np->name);
- err = of_mdiobus_register(eth->mii_bus, mii_np);
- if (err)
- goto err_free_bus;
-
- return 0;
-
-err_free_bus:
- mdiobus_free(eth->mii_bus);
+ ret = of_mdiobus_register(eth->mii_bus, mii_np);
err_put_node:
of_node_put(mii_np);
- eth->mii_bus = NULL;
- return err;
+ return ret;
}
static void mtk_mdio_cleanup(struct mtk_eth *eth)
@@ -322,8 +334,6 @@ static void mtk_mdio_cleanup(struct mtk_eth *eth)
return;
mdiobus_unregister(eth->mii_bus);
- of_node_put(eth->mii_bus->dev.of_node);
- mdiobus_free(eth->mii_bus);
}
static inline void mtk_irq_disable(struct mtk_eth *eth, u32 mask)
@@ -542,15 +552,15 @@ static inline struct mtk_tx_buf *mtk_desc_to_tx_buf(struct mtk_tx_ring *ring,
return &ring->buf[idx];
}
-static void mtk_tx_unmap(struct device *dev, struct mtk_tx_buf *tx_buf)
+static void mtk_tx_unmap(struct mtk_eth *eth, struct mtk_tx_buf *tx_buf)
{
if (tx_buf->flags & MTK_TX_FLAGS_SINGLE0) {
- dma_unmap_single(dev,
+ dma_unmap_single(eth->dev,
dma_unmap_addr(tx_buf, dma_addr0),
dma_unmap_len(tx_buf, dma_len0),
DMA_TO_DEVICE);
} else if (tx_buf->flags & MTK_TX_FLAGS_PAGE0) {
- dma_unmap_page(dev,
+ dma_unmap_page(eth->dev,
dma_unmap_addr(tx_buf, dma_addr0),
dma_unmap_len(tx_buf, dma_len0),
DMA_TO_DEVICE);
@@ -572,14 +582,15 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
dma_addr_t mapped_addr;
unsigned int nr_frags;
int i, n_desc = 1;
- u32 txd4 = 0;
+ u32 txd4 = 0, fport;
itxd = ring->next_free;
if (itxd == ring->last_free)
return -ENOMEM;
/* set the forward port */
- txd4 |= (mac->id + 1) << TX_DMA_FPORT_SHIFT;
+ fport = (mac->id + 1) << TX_DMA_FPORT_SHIFT;
+ txd4 |= fport;
tx_buf = mtk_desc_to_tx_buf(ring, itxd);
memset(tx_buf, 0, sizeof(*tx_buf));
@@ -595,9 +606,9 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
if (skb_vlan_tag_present(skb))
txd4 |= TX_DMA_INS_VLAN | skb_vlan_tag_get(skb);
- mapped_addr = dma_map_single(&dev->dev, skb->data,
+ mapped_addr = dma_map_single(eth->dev, skb->data,
skb_headlen(skb), DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
+ if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
return -ENOMEM;
WRITE_ONCE(itxd->txd1, mapped_addr);
@@ -623,10 +634,10 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
n_desc++;
frag_map_size = min(frag_size, MTK_TX_DMA_BUF_LEN);
- mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset,
+ mapped_addr = skb_frag_dma_map(eth->dev, frag, offset,
frag_map_size,
DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(&dev->dev, mapped_addr)))
+ if (unlikely(dma_mapping_error(eth->dev, mapped_addr)))
goto err_dma;
if (i == nr_frags - 1 &&
@@ -637,7 +648,7 @@ static int mtk_tx_map(struct sk_buff *skb, struct net_device *dev,
WRITE_ONCE(txd->txd3, (TX_DMA_SWC |
TX_DMA_PLEN0(frag_map_size) |
last_frag * TX_DMA_LS0));
- WRITE_ONCE(txd->txd4, 0);
+ WRITE_ONCE(txd->txd4, fport);
tx_buf->skb = (struct sk_buff *)MTK_DMA_DUMMY_DESC;
tx_buf = mtk_desc_to_tx_buf(ring, txd);
@@ -679,7 +690,7 @@ err_dma:
tx_buf = mtk_desc_to_tx_buf(ring, itxd);
/* unmap dma */
- mtk_tx_unmap(&dev->dev, tx_buf);
+ mtk_tx_unmap(eth, tx_buf);
itxd->txd3 = TX_DMA_LS0 | TX_DMA_OWNER_CPU;
itxd = mtk_qdma_phys_to_virt(ring, itxd->txd2);
@@ -836,11 +847,11 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
netdev->stats.rx_dropped++;
goto release_desc;
}
- dma_addr = dma_map_single(&eth->netdev[mac]->dev,
+ dma_addr = dma_map_single(eth->dev,
new_data + NET_SKB_PAD,
ring->buf_size,
DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) {
+ if (unlikely(dma_mapping_error(eth->dev, dma_addr))) {
skb_free_frag(new_data);
netdev->stats.rx_dropped++;
goto release_desc;
@@ -849,13 +860,13 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
/* receive data */
skb = build_skb(data, ring->frag_size);
if (unlikely(!skb)) {
- put_page(virt_to_head_page(new_data));
+ skb_free_frag(new_data);
netdev->stats.rx_dropped++;
goto release_desc;
}
skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
- dma_unmap_single(&netdev->dev, trxd.rxd1,
+ dma_unmap_single(eth->dev, trxd.rxd1,
ring->buf_size, DMA_FROM_DEVICE);
pktlen = RX_DMA_GET_PLEN0(trxd.rxd2);
skb->dev = netdev;
@@ -937,7 +948,7 @@ static int mtk_poll_tx(struct mtk_eth *eth, int budget)
done[mac]++;
budget--;
}
- mtk_tx_unmap(eth->dev, tx_buf);
+ mtk_tx_unmap(eth, tx_buf);
ring->last_free = desc;
atomic_inc(&ring->free_count);
@@ -1092,7 +1103,7 @@ static void mtk_tx_clean(struct mtk_eth *eth)
if (ring->buf) {
for (i = 0; i < MTK_DMA_SIZE; i++)
- mtk_tx_unmap(eth->dev, &ring->buf[i]);
+ mtk_tx_unmap(eth, &ring->buf[i]);
kfree(ring->buf);
ring->buf = NULL;
}
@@ -1490,10 +1501,7 @@ static void mtk_uninit(struct net_device *dev)
struct mtk_eth *eth = mac->hw;
phy_disconnect(mac->phy_dev);
- mtk_mdio_cleanup(eth);
mtk_irq_disable(eth, ~0);
- free_irq(eth->irq[1], dev);
- free_irq(eth->irq[2], dev);
}
static int mtk_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
@@ -1751,6 +1759,7 @@ static int mtk_add_mac(struct mtk_eth *eth, struct device_node *np)
goto free_netdev;
}
spin_lock_init(&mac->hw_stats->stats_lock);
+ u64_stats_init(&mac->hw_stats->syncp);
mac->hw_stats->reg_offset = id * MTK_STAT_OFFSET;
SET_NETDEV_DEV(eth->netdev[id], eth->dev);
@@ -1796,6 +1805,7 @@ static int mtk_probe(struct platform_device *pdev)
if (!eth)
return -ENOMEM;
+ eth->dev = &pdev->dev;
eth->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(eth->base))
return PTR_ERR(eth->base);
@@ -1830,21 +1840,21 @@ static int mtk_probe(struct platform_device *pdev)
return -ENXIO;
}
}
+ for (i = 0; i < ARRAY_SIZE(eth->clks); i++) {
+ eth->clks[i] = devm_clk_get(eth->dev,
+ mtk_clks_source_name[i]);
+ if (IS_ERR(eth->clks[i])) {
+ if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ return -ENODEV;
+ }
+ }
- eth->clk_ethif = devm_clk_get(&pdev->dev, "ethif");
- eth->clk_esw = devm_clk_get(&pdev->dev, "esw");
- eth->clk_gp1 = devm_clk_get(&pdev->dev, "gp1");
- eth->clk_gp2 = devm_clk_get(&pdev->dev, "gp2");
- if (IS_ERR(eth->clk_esw) || IS_ERR(eth->clk_gp1) ||
- IS_ERR(eth->clk_gp2) || IS_ERR(eth->clk_ethif))
- return -ENODEV;
-
- clk_prepare_enable(eth->clk_ethif);
- clk_prepare_enable(eth->clk_esw);
- clk_prepare_enable(eth->clk_gp1);
- clk_prepare_enable(eth->clk_gp2);
+ clk_prepare_enable(eth->clks[MTK_CLK_ETHIF]);
+ clk_prepare_enable(eth->clks[MTK_CLK_ESW]);
+ clk_prepare_enable(eth->clks[MTK_CLK_GP1]);
+ clk_prepare_enable(eth->clks[MTK_CLK_GP2]);
- eth->dev = &pdev->dev;
eth->msg_enable = netif_msg_init(mtk_msg_level, MTK_DEFAULT_MSG_ENABLE);
INIT_WORK(&eth->pending_work, mtk_pending_work);
@@ -1886,15 +1896,24 @@ err_free_dev:
static int mtk_remove(struct platform_device *pdev)
{
struct mtk_eth *eth = platform_get_drvdata(pdev);
+ int i;
- clk_disable_unprepare(eth->clk_ethif);
- clk_disable_unprepare(eth->clk_esw);
- clk_disable_unprepare(eth->clk_gp1);
- clk_disable_unprepare(eth->clk_gp2);
+ /* stop all devices to make sure that dma is properly shut down */
+ for (i = 0; i < MTK_MAC_COUNT; i++) {
+ if (!eth->netdev[i])
+ continue;
+ mtk_stop(eth->netdev[i]);
+ }
+
+ clk_disable_unprepare(eth->clks[MTK_CLK_ETHIF]);
+ clk_disable_unprepare(eth->clks[MTK_CLK_ESW]);
+ clk_disable_unprepare(eth->clks[MTK_CLK_GP1]);
+ clk_disable_unprepare(eth->clks[MTK_CLK_GP2]);
netif_napi_del(&eth->tx_napi);
netif_napi_del(&eth->rx_napi);
mtk_cleanup(eth);
+ mtk_mdio_cleanup(eth);
platform_set_drvdata(pdev, NULL);
return 0;
@@ -1904,6 +1923,7 @@ const struct of_device_id of_mtk_match[] = {
{ .compatible = "mediatek,mt7623-eth" },
{},
};
+MODULE_DEVICE_TABLE(of, of_mtk_match);
static struct platform_driver mtk_driver = {
.probe = mtk_probe,
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
index f82e3ac..6e1ade7 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h
@@ -290,6 +290,17 @@ enum mtk_tx_flags {
MTK_TX_FLAGS_PAGE0 = 0x02,
};
+/* This enum allows us to identify how the clock is defined on the array of the
+ * clock in the order
+ */
+enum mtk_clks_map {
+ MTK_CLK_ETHIF,
+ MTK_CLK_ESW,
+ MTK_CLK_GP1,
+ MTK_CLK_GP2,
+ MTK_CLK_MAX
+};
+
/* struct mtk_tx_buf - This struct holds the pointers to the memory pointed at
* by the TX descriptor s
* @skb: The SKB pointer of the packet being sent
@@ -370,10 +381,7 @@ struct mtk_rx_ring {
* @scratch_ring: Newer SoCs need memory for a second HW managed TX ring
* @phy_scratch_ring: physical address of scratch_ring
* @scratch_head: The scratch memory that scratch_ring points to.
- * @clk_ethif: The ethif clock
- * @clk_esw: The switch clock
- * @clk_gp1: The gmac1 clock
- * @clk_gp2: The gmac2 clock
+ * @clks: clock array for all clocks required
* @mii_bus: If there is a bus we need to create an instance for it
* @pending_work: The workqueue used to reset the dma ring
*/
@@ -400,10 +408,8 @@ struct mtk_eth {
struct mtk_tx_dma *scratch_ring;
dma_addr_t phy_scratch_ring;
void *scratch_head;
- struct clk *clk_ethif;
- struct clk *clk_esw;
- struct clk *clk_gp1;
- struct clk *clk_gp2;
+ struct clk *clks[MTK_CLK_MAX];
+
struct mii_bus *mii_bus;
struct work_struct pending_work;
};
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
index 99c6bbd..b04760a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
@@ -94,7 +94,7 @@ static u8 mlx4_en_dcbnl_getcap(struct net_device *dev, int capid, u8 *cap)
*cap = true;
break;
case DCB_CAP_ATTR_DCBX:
- *cap = priv->cee_params.dcbx_cap;
+ *cap = priv->dcbx_cap;
break;
case DCB_CAP_ATTR_PFC_TCS:
*cap = 1 << mlx4_max_tc(priv->mdev->dev);
@@ -111,14 +111,14 @@ static u8 mlx4_en_dcbnl_getpfcstate(struct net_device *netdev)
{
struct mlx4_en_priv *priv = netdev_priv(netdev);
- return priv->cee_params.dcb_cfg.pfc_state;
+ return priv->cee_config.pfc_state;
}
static void mlx4_en_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
{
struct mlx4_en_priv *priv = netdev_priv(netdev);
- priv->cee_params.dcb_cfg.pfc_state = state;
+ priv->cee_config.pfc_state = state;
}
static void mlx4_en_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
@@ -126,7 +126,7 @@ static void mlx4_en_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
{
struct mlx4_en_priv *priv = netdev_priv(netdev);
- *setting = priv->cee_params.dcb_cfg.tc_config[priority].dcb_pfc;
+ *setting = priv->cee_config.dcb_pfc[priority];
}
static void mlx4_en_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
@@ -134,8 +134,8 @@ static void mlx4_en_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
{
struct mlx4_en_priv *priv = netdev_priv(netdev);
- priv->cee_params.dcb_cfg.tc_config[priority].dcb_pfc = setting;
- priv->cee_params.dcb_cfg.pfc_state = true;
+ priv->cee_config.dcb_pfc[priority] = setting;
+ priv->cee_config.pfc_state = true;
}
static int mlx4_en_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
@@ -157,13 +157,11 @@ static u8 mlx4_en_dcbnl_set_all(struct net_device *netdev)
{
struct mlx4_en_priv *priv = netdev_priv(netdev);
struct mlx4_en_dev *mdev = priv->mdev;
- struct mlx4_en_cee_config *dcb_cfg = &priv->cee_params.dcb_cfg;
- int err = 0;
- if (!(priv->cee_params.dcbx_cap & DCB_CAP_DCBX_VER_CEE))
- return -EINVAL;
+ if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ return 1;
- if (dcb_cfg->pfc_state) {
+ if (priv->cee_config.pfc_state) {
int tc;
priv->prof->rx_pause = 0;
@@ -171,7 +169,7 @@ static u8 mlx4_en_dcbnl_set_all(struct net_device *netdev)
for (tc = 0; tc < CEE_DCBX_MAX_PRIO; tc++) {
u8 tc_mask = 1 << tc;
- switch (dcb_cfg->tc_config[tc].dcb_pfc) {
+ switch (priv->cee_config.dcb_pfc[tc]) {
case pfc_disabled:
priv->prof->tx_ppp &= ~tc_mask;
priv->prof->rx_ppp &= ~tc_mask;
@@ -199,15 +197,17 @@ static u8 mlx4_en_dcbnl_set_all(struct net_device *netdev)
en_dbg(DRV, priv, "Set pfc off\n");
}
- err = mlx4_SET_PORT_general(mdev->dev, priv->port,
- priv->rx_skb_size + ETH_FCS_LEN,
- priv->prof->tx_pause,
- priv->prof->tx_ppp,
- priv->prof->rx_pause,
- priv->prof->rx_ppp);
- if (err)
+ if (mlx4_SET_PORT_general(mdev->dev, priv->port,
+ priv->rx_skb_size + ETH_FCS_LEN,
+ priv->prof->tx_pause,
+ priv->prof->tx_ppp,
+ priv->prof->rx_pause,
+ priv->prof->rx_ppp)) {
en_err(priv, "Failed setting pause params\n");
- return err;
+ return 1;
+ }
+
+ return 0;
}
static u8 mlx4_en_dcbnl_get_state(struct net_device *dev)
@@ -225,7 +225,7 @@ static u8 mlx4_en_dcbnl_set_state(struct net_device *dev, u8 state)
struct mlx4_en_priv *priv = netdev_priv(dev);
int num_tcs = 0;
- if (!(priv->cee_params.dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
return 1;
if (!!(state) == !!(priv->flags & MLX4_EN_FLAG_DCB_ENABLED))
@@ -238,7 +238,10 @@ static u8 mlx4_en_dcbnl_set_state(struct net_device *dev, u8 state)
priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED;
}
- return mlx4_en_setup_tc(dev, num_tcs);
+ if (mlx4_en_setup_tc(dev, num_tcs))
+ return 1;
+
+ return 0;
}
/* On success returns a non-zero 802.1p user priority bitmap
@@ -252,7 +255,7 @@ static int mlx4_en_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
.selector = idtype,
.protocol = id,
};
- if (!(priv->cee_params.dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
return 0;
return dcb_getapp(netdev, &app);
@@ -264,7 +267,7 @@ static int mlx4_en_dcbnl_setapp(struct net_device *netdev, u8 idtype,
struct mlx4_en_priv *priv = netdev_priv(netdev);
struct dcb_app app;
- if (!(priv->cee_params.dcbx_cap & DCB_CAP_DCBX_VER_CEE))
+ if (!(priv->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
return -EINVAL;
memset(&app, 0, sizeof(struct dcb_app));
@@ -433,7 +436,7 @@ static u8 mlx4_en_dcbnl_getdcbx(struct net_device *dev)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
- return priv->cee_params.dcbx_cap;
+ return priv->dcbx_cap;
}
static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode)
@@ -442,7 +445,7 @@ static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode)
struct ieee_ets ets = {0};
struct ieee_pfc pfc = {0};
- if (mode == priv->cee_params.dcbx_cap)
+ if (mode == priv->dcbx_cap)
return 0;
if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
@@ -451,7 +454,7 @@ static u8 mlx4_en_dcbnl_setdcbx(struct net_device *dev, u8 mode)
!(mode & DCB_CAP_DCBX_HOST))
goto err;
- priv->cee_params.dcbx_cap = mode;
+ priv->dcbx_cap = mode;
ets.ets_cap = IEEE_8021QAZ_MAX_TCS;
pfc.pfc_cap = IEEE_8021QAZ_MAX_TCS;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 4198e9b..fedb829 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -71,10 +71,11 @@ int mlx4_en_setup_tc(struct net_device *dev, u8 up)
#ifdef CONFIG_MLX4_EN_DCB
if (!mlx4_is_slave(priv->mdev->dev)) {
if (up) {
- priv->flags |= MLX4_EN_FLAG_DCB_ENABLED;
+ if (priv->dcbx_cap)
+ priv->flags |= MLX4_EN_FLAG_DCB_ENABLED;
} else {
priv->flags &= ~MLX4_EN_FLAG_DCB_ENABLED;
- priv->cee_params.dcb_cfg.pfc_state = false;
+ priv->cee_config.pfc_state = false;
}
}
#endif /* CONFIG_MLX4_EN_DCB */
@@ -3048,9 +3049,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
struct mlx4_en_priv *priv;
int i;
int err;
-#ifdef CONFIG_MLX4_EN_DCB
- struct tc_configuration *tc;
-#endif
dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
MAX_TX_RINGS, MAX_RX_RINGS);
@@ -3117,16 +3115,13 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
priv->msg_enable = MLX4_EN_MSG_LEVEL;
#ifdef CONFIG_MLX4_EN_DCB
if (!mlx4_is_slave(priv->mdev->dev)) {
- priv->cee_params.dcbx_cap = DCB_CAP_DCBX_VER_CEE |
- DCB_CAP_DCBX_HOST |
- DCB_CAP_DCBX_VER_IEEE;
+ priv->dcbx_cap = DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_HOST |
+ DCB_CAP_DCBX_VER_IEEE;
priv->flags |= MLX4_EN_DCB_ENABLED;
- priv->cee_params.dcb_cfg.pfc_state = false;
+ priv->cee_config.pfc_state = false;
- for (i = 0; i < MLX4_EN_NUM_UP; i++) {
- tc = &priv->cee_params.dcb_cfg.tc_config[i];
- tc->dcb_pfc = pfc_disabled;
- }
+ for (i = 0; i < MLX4_EN_NUM_UP; i++)
+ priv->cee_config.dcb_pfc[i] = pfc_disabled;
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) {
dev->dcbnl_ops = &mlx4_en_dcbnl_ops;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 9df87ca..e2509bb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -818,7 +818,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
real_size = get_real_size(skb, shinfo, dev, &lso_header_size,
&inline_ok, &fragptr);
if (unlikely(!real_size))
- goto tx_drop;
+ goto tx_drop_count;
/* Align descriptor to TXBB size */
desc_size = ALIGN(real_size, TXBB_SIZE);
@@ -826,7 +826,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(nr_txbb > MAX_DESC_TXBBS)) {
if (netif_msg_tx_err(priv))
en_warn(priv, "Oversized header or SG list\n");
- goto tx_drop;
+ goto tx_drop_count;
}
bf_ok = ring->bf_enabled;
@@ -1071,9 +1071,10 @@ tx_drop_unmap:
PCI_DMA_TODEVICE);
}
+tx_drop_count:
+ ring->tx_dropped++;
tx_drop:
dev_kfree_skb_any(skb);
- ring->tx_dropped++;
return NETDEV_TX_OK;
}
@@ -1106,7 +1107,7 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_alloc *frame,
goto tx_drop;
if (mlx4_en_is_tx_ring_full(ring))
- goto tx_drop;
+ goto tx_drop_count;
/* fetch ring->cons far ahead before needing it to avoid stall */
ring_cons = READ_ONCE(ring->cons);
@@ -1176,7 +1177,8 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_alloc *frame,
return NETDEV_TX_OK;
-tx_drop:
+tx_drop_count:
ring->tx_dropped++;
+tx_drop:
return NETDEV_TX_BUSY;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index f613977..cf8f8a7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -1305,8 +1305,8 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
return 0;
err_out_unmap:
- while (i >= 0)
- mlx4_free_eq(dev, &priv->eq_table.eq[i--]);
+ while (i > 0)
+ mlx4_free_eq(dev, &priv->eq_table.eq[--i]);
#ifdef CONFIG_RFS_ACCEL
for (i = 1; i <= dev->caps.num_ports; i++) {
if (mlx4_priv(dev)->port[i].rmap) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 75dd2e3..7183ac4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -2970,6 +2970,7 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
mlx4_err(dev, "Failed to create mtu file for port %d\n", port);
device_remove_file(&info->dev->persist->pdev->dev,
&info->port_attr);
+ devlink_port_unregister(&info->devlink_port);
info->port = -1;
}
@@ -2984,6 +2985,8 @@ static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr);
device_remove_file(&info->dev->persist->pdev->dev,
&info->port_mtu_attr);
+ devlink_port_unregister(&info->devlink_port);
+
#ifdef CONFIG_RFS_ACCEL
free_irq_cpu_rmap(info->rmap);
info->rmap = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 2c2913d..9099dbd 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -482,20 +482,10 @@ enum dcb_pfc_type {
pfc_enabled_rx
};
-struct tc_configuration {
- enum dcb_pfc_type dcb_pfc;
-};
-
struct mlx4_en_cee_config {
bool pfc_state;
- struct tc_configuration tc_config[MLX4_EN_NUM_UP];
+ enum dcb_pfc_type dcb_pfc[MLX4_EN_NUM_UP];
};
-
-struct mlx4_en_cee_params {
- u8 dcbx_cap;
- struct mlx4_en_cee_config dcb_cfg;
-};
-
#endif
struct ethtool_flow_id {
@@ -624,7 +614,8 @@ struct mlx4_en_priv {
struct ieee_ets ets;
u16 maxrate[IEEE_8021QAZ_MAX_TCS];
enum dcbnl_cndd_states cndd_state[IEEE_8021QAZ_MAX_TCS];
- struct mlx4_en_cee_params cee_params;
+ struct mlx4_en_cee_config cee_config;
+ u8 dcbx_cap;
#endif
#ifdef CONFIG_RFS_ACCEL
spinlock_t filters_lock;
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 3d2095e..c5b2064 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -52,7 +52,7 @@
#define MLX4_FLAG_V_IGNORE_FCS_MASK 0x2
#define MLX4_IGNORE_FCS_MASK 0x1
-#define MLNX4_TX_MAX_NUMBER 8
+#define MLX4_TC_MAX_NUMBER 8
void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table)
{
@@ -2022,7 +2022,7 @@ int mlx4_max_tc(struct mlx4_dev *dev)
u8 num_tc = dev->caps.max_tc_eth;
if (!num_tc)
- num_tc = MLNX4_TX_MAX_NUMBER;
+ num_tc = MLX4_TC_MAX_NUMBER;
return num_tc;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index d6e2a1c..c2ec01a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -143,13 +143,14 @@ static struct mlx5_cmd_layout *get_inst(struct mlx5_cmd *cmd, int idx)
return cmd->cmd_buf + (idx << cmd->log_stride);
}
-static u8 xor8_buf(void *buf, int len)
+static u8 xor8_buf(void *buf, size_t offset, int len)
{
u8 *ptr = buf;
u8 sum = 0;
int i;
+ int end = len + offset;
- for (i = 0; i < len; i++)
+ for (i = offset; i < end; i++)
sum ^= ptr[i];
return sum;
@@ -157,41 +158,49 @@ static u8 xor8_buf(void *buf, int len)
static int verify_block_sig(struct mlx5_cmd_prot_block *block)
{
- if (xor8_buf(block->rsvd0, sizeof(*block) - sizeof(block->data) - 1) != 0xff)
+ size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
+ int xor_len = sizeof(*block) - sizeof(block->data) - 1;
+
+ if (xor8_buf(block, rsvd0_off, xor_len) != 0xff)
return -EINVAL;
- if (xor8_buf(block, sizeof(*block)) != 0xff)
+ if (xor8_buf(block, 0, sizeof(*block)) != 0xff)
return -EINVAL;
return 0;
}
-static void calc_block_sig(struct mlx5_cmd_prot_block *block, u8 token,
- int csum)
+static void calc_block_sig(struct mlx5_cmd_prot_block *block)
{
- block->token = token;
- if (csum) {
- block->ctrl_sig = ~xor8_buf(block->rsvd0, sizeof(*block) -
- sizeof(block->data) - 2);
- block->sig = ~xor8_buf(block, sizeof(*block) - 1);
- }
+ int ctrl_xor_len = sizeof(*block) - sizeof(block->data) - 2;
+ size_t rsvd0_off = offsetof(struct mlx5_cmd_prot_block, rsvd0);
+
+ block->ctrl_sig = ~xor8_buf(block, rsvd0_off, ctrl_xor_len);
+ block->sig = ~xor8_buf(block, 0, sizeof(*block) - 1);
}
-static void calc_chain_sig(struct mlx5_cmd_msg *msg, u8 token, int csum)
+static void calc_chain_sig(struct mlx5_cmd_msg *msg)
{
struct mlx5_cmd_mailbox *next = msg->next;
-
- while (next) {
- calc_block_sig(next->buf, token, csum);
+ int size = msg->len;
+ int blen = size - min_t(int, sizeof(msg->first.data), size);
+ int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1)
+ / MLX5_CMD_DATA_BLOCK_SIZE;
+ int i = 0;
+
+ for (i = 0; i < n && next; i++) {
+ calc_block_sig(next->buf);
next = next->next;
}
}
static void set_signature(struct mlx5_cmd_work_ent *ent, int csum)
{
- ent->lay->sig = ~xor8_buf(ent->lay, sizeof(*ent->lay));
- calc_chain_sig(ent->in, ent->token, csum);
- calc_chain_sig(ent->out, ent->token, csum);
+ ent->lay->sig = ~xor8_buf(ent->lay, 0, sizeof(*ent->lay));
+ if (csum) {
+ calc_chain_sig(ent->in);
+ calc_chain_sig(ent->out);
+ }
}
static void poll_timeout(struct mlx5_cmd_work_ent *ent)
@@ -222,12 +231,17 @@ static int verify_signature(struct mlx5_cmd_work_ent *ent)
struct mlx5_cmd_mailbox *next = ent->out->next;
int err;
u8 sig;
+ int size = ent->out->len;
+ int blen = size - min_t(int, sizeof(ent->out->first.data), size);
+ int n = (blen + MLX5_CMD_DATA_BLOCK_SIZE - 1)
+ / MLX5_CMD_DATA_BLOCK_SIZE;
+ int i = 0;
- sig = xor8_buf(ent->lay, sizeof(*ent->lay));
+ sig = xor8_buf(ent->lay, 0, sizeof(*ent->lay));
if (sig != 0xff)
return -EINVAL;
- while (next) {
+ for (i = 0; i < n && next; i++) {
err = verify_block_sig(next->buf);
if (err)
return err;
@@ -656,7 +670,6 @@ static void cmd_work_handler(struct work_struct *work)
spin_unlock_irqrestore(&cmd->alloc_lock, flags);
}
- ent->token = alloc_token(cmd);
cmd->ent_arr[ent->idx] = ent;
lay = get_inst(cmd, ent->idx);
ent->lay = lay;
@@ -766,7 +779,8 @@ static u8 *get_status_ptr(struct mlx5_outbox_hdr *out)
static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
struct mlx5_cmd_msg *out, void *uout, int uout_size,
mlx5_cmd_cbk_t callback,
- void *context, int page_queue, u8 *status)
+ void *context, int page_queue, u8 *status,
+ u8 token)
{
struct mlx5_cmd *cmd = &dev->cmd;
struct mlx5_cmd_work_ent *ent;
@@ -783,6 +797,8 @@ static int mlx5_cmd_invoke(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *in,
if (IS_ERR(ent))
return PTR_ERR(ent);
+ ent->token = token;
+
if (!callback)
init_completion(&ent->done);
@@ -854,7 +870,8 @@ static const struct file_operations fops = {
.write = dbg_write,
};
-static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size)
+static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size,
+ u8 token)
{
struct mlx5_cmd_prot_block *block;
struct mlx5_cmd_mailbox *next;
@@ -880,6 +897,7 @@ static int mlx5_copy_to_msg(struct mlx5_cmd_msg *to, void *from, int size)
memcpy(block->data, from, copy);
from += copy;
size -= copy;
+ block->token = token;
next = next->next;
}
@@ -949,7 +967,8 @@ static void free_cmd_box(struct mlx5_core_dev *dev,
}
static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
- gfp_t flags, int size)
+ gfp_t flags, int size,
+ u8 token)
{
struct mlx5_cmd_mailbox *tmp, *head = NULL;
struct mlx5_cmd_prot_block *block;
@@ -978,6 +997,7 @@ static struct mlx5_cmd_msg *mlx5_alloc_cmd_msg(struct mlx5_core_dev *dev,
tmp->next = head;
block->next = cpu_to_be64(tmp->next ? tmp->next->dma : 0);
block->block_num = cpu_to_be32(n - i - 1);
+ block->token = token;
head = tmp;
}
msg->next = head;
@@ -1352,7 +1372,7 @@ static struct mlx5_cmd_msg *alloc_msg(struct mlx5_core_dev *dev, int in_size,
}
if (IS_ERR(msg))
- msg = mlx5_alloc_cmd_msg(dev, gfp, in_size);
+ msg = mlx5_alloc_cmd_msg(dev, gfp, in_size, 0);
return msg;
}
@@ -1377,6 +1397,7 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
int err;
u8 status = 0;
u32 drv_synd;
+ u8 token;
if (pci_channel_offline(dev->pdev) ||
dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
@@ -1395,20 +1416,22 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out,
return err;
}
- err = mlx5_copy_to_msg(inb, in, in_size);
+ token = alloc_token(&dev->cmd);
+
+ err = mlx5_copy_to_msg(inb, in, in_size, token);
if (err) {
mlx5_core_warn(dev, "err %d\n", err);
goto out_in;
}
- outb = mlx5_alloc_cmd_msg(dev, gfp, out_size);
+ outb = mlx5_alloc_cmd_msg(dev, gfp, out_size, token);
if (IS_ERR(outb)) {
err = PTR_ERR(outb);
goto out_in;
}
err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context,
- pages_queue, &status);
+ pages_queue, &status, token);
if (err)
goto out_out;
@@ -1476,7 +1499,7 @@ static int create_msg_cache(struct mlx5_core_dev *dev)
INIT_LIST_HEAD(&cmd->cache.med.head);
for (i = 0; i < NUM_LONG_LISTS; i++) {
- msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE);
+ msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, LONG_LIST_SIZE, 0);
if (IS_ERR(msg)) {
err = PTR_ERR(msg);
goto ex_err;
@@ -1486,7 +1509,7 @@ static int create_msg_cache(struct mlx5_core_dev *dev)
}
for (i = 0; i < NUM_MED_LISTS; i++) {
- msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE);
+ msg = mlx5_alloc_cmd_msg(dev, GFP_KERNEL, MED_LIST_SIZE, 0);
if (IS_ERR(msg)) {
err = PTR_ERR(msg);
goto ex_err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 1b495ef..bf722aa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -73,8 +73,12 @@
#define MLX5_MPWRQ_PAGES_PER_WQE BIT(MLX5_MPWRQ_WQE_PAGE_ORDER)
#define MLX5_MPWRQ_STRIDES_PER_PAGE (MLX5_MPWRQ_NUM_STRIDES >> \
MLX5_MPWRQ_WQE_PAGE_ORDER)
-#define MLX5_CHANNEL_MAX_NUM_MTTS (ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8) * \
- BIT(MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW))
+
+#define MLX5_MTT_OCTW(npages) (ALIGN(npages, 8) / 2)
+#define MLX5E_REQUIRED_MTTS(rqs, wqes)\
+ (rqs * wqes * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8))
+#define MLX5E_VALID_NUM_MTTS(num_mtts) (MLX5_MTT_OCTW(num_mtts) <= U16_MAX)
+
#define MLX5_UMR_ALIGN (2048)
#define MLX5_MPWRQ_SMALL_PACKET_THRESHOLD (128)
@@ -219,9 +223,8 @@ struct mlx5e_tstamp {
};
enum {
- MLX5E_RQ_STATE_POST_WQES_ENABLE,
+ MLX5E_RQ_STATE_FLUSH,
MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS,
- MLX5E_RQ_STATE_FLUSH_TIMEOUT,
MLX5E_RQ_STATE_AM,
};
@@ -304,6 +307,7 @@ struct mlx5e_rq {
unsigned long state;
int ix;
+ u32 mpwqe_mtt_offset;
struct mlx5e_rx_am am; /* Adaptive Moderation */
@@ -365,9 +369,8 @@ struct mlx5e_sq_dma {
};
enum {
- MLX5E_SQ_STATE_WAKE_TXQ_ENABLE,
+ MLX5E_SQ_STATE_FLUSH,
MLX5E_SQ_STATE_BF_ENABLE,
- MLX5E_SQ_STATE_TX_TIMEOUT,
};
struct mlx5e_ico_wqe_info {
@@ -698,7 +701,6 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget);
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget);
int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
void mlx5e_free_tx_descs(struct mlx5e_sq *sq);
-void mlx5e_free_rx_descs(struct mlx5e_rq *rq);
void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
@@ -814,11 +816,6 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
MLX5E_MAX_NUM_CHANNELS);
}
-static inline int mlx5e_get_mtt_octw(int npages)
-{
- return ALIGN(npages, 8) / 2;
-}
-
extern const struct ethtool_ops mlx5e_ethtool_ops;
#ifdef CONFIG_MLX5_CORE_EN_DCB
extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
index 673043c..9cce153 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c
@@ -139,7 +139,7 @@ int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5_core_dev *mdev)
struct mlx5e_tir *tir;
void *in;
int inlen;
- int err;
+ int err = 0;
inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
in = mlx5_vzalloc(inlen);
@@ -151,10 +151,11 @@ int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5_core_dev *mdev)
list_for_each_entry(tir, &mdev->mlx5e_res.td.tirs_list, list) {
err = mlx5_core_modify_tir(mdev, tir->tirn, in, inlen);
if (err)
- return err;
+ goto out;
}
+out:
kvfree(in);
- return 0;
+ return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index caa9a3c..762af16 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -127,29 +127,40 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
return mlx5_set_port_tc_bw_alloc(mdev, tc_tx_bw);
}
-static int mlx5e_dbcnl_validate_ets(struct ieee_ets *ets)
+static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
+ struct ieee_ets *ets)
{
int bw_sum = 0;
int i;
/* Validate Priority */
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
- if (ets->prio_tc[i] >= MLX5E_MAX_PRIORITY)
+ if (ets->prio_tc[i] >= MLX5E_MAX_PRIORITY) {
+ netdev_err(netdev,
+ "Failed to validate ETS: priority value greater than max(%d)\n",
+ MLX5E_MAX_PRIORITY);
return -EINVAL;
+ }
}
/* Validate Bandwidth Sum */
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
- if (!ets->tc_tx_bw[i])
+ if (!ets->tc_tx_bw[i]) {
+ netdev_err(netdev,
+ "Failed to validate ETS: BW 0 is illegal\n");
return -EINVAL;
+ }
bw_sum += ets->tc_tx_bw[i];
}
}
- if (bw_sum != 0 && bw_sum != 100)
+ if (bw_sum != 0 && bw_sum != 100) {
+ netdev_err(netdev,
+ "Failed to validate ETS: BW sum is illegal\n");
return -EINVAL;
+ }
return 0;
}
@@ -159,7 +170,7 @@ static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev,
struct mlx5e_priv *priv = netdev_priv(netdev);
int err;
- err = mlx5e_dbcnl_validate_ets(ets);
+ err = mlx5e_dbcnl_validate_ets(netdev, ets);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 4a3757e..7a346bb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -331,7 +331,7 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
if (mlx5e_query_global_pause_combined(priv)) {
for (i = 0; i < NUM_PPORT_PER_PRIO_PFC_COUNTERS; i++) {
data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[0],
- pport_per_prio_pfc_stats_desc, 0);
+ pport_per_prio_pfc_stats_desc, i);
}
}
@@ -352,15 +352,61 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
sq_stats_desc, j);
}
+static u32 mlx5e_rx_wqes_to_packets(struct mlx5e_priv *priv, int rq_wq_type,
+ int num_wqe)
+{
+ int packets_per_wqe;
+ int stride_size;
+ int num_strides;
+ int wqe_size;
+
+ if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
+ return num_wqe;
+
+ stride_size = 1 << priv->params.mpwqe_log_stride_sz;
+ num_strides = 1 << priv->params.mpwqe_log_num_strides;
+ wqe_size = stride_size * num_strides;
+
+ packets_per_wqe = wqe_size /
+ ALIGN(ETH_DATA_LEN, stride_size);
+ return (1 << (order_base_2(num_wqe * packets_per_wqe) - 1));
+}
+
+static u32 mlx5e_packets_to_rx_wqes(struct mlx5e_priv *priv, int rq_wq_type,
+ int num_packets)
+{
+ int packets_per_wqe;
+ int stride_size;
+ int num_strides;
+ int wqe_size;
+ int num_wqes;
+
+ if (rq_wq_type != MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
+ return num_packets;
+
+ stride_size = 1 << priv->params.mpwqe_log_stride_sz;
+ num_strides = 1 << priv->params.mpwqe_log_num_strides;
+ wqe_size = stride_size * num_strides;
+
+ num_packets = (1 << order_base_2(num_packets));
+
+ packets_per_wqe = wqe_size /
+ ALIGN(ETH_DATA_LEN, stride_size);
+ num_wqes = DIV_ROUND_UP(num_packets, packets_per_wqe);
+ return 1 << (order_base_2(num_wqes));
+}
+
static void mlx5e_get_ringparam(struct net_device *dev,
struct ethtool_ringparam *param)
{
struct mlx5e_priv *priv = netdev_priv(dev);
int rq_wq_type = priv->params.rq_wq_type;
- param->rx_max_pending = 1 << mlx5_max_log_rq_size(rq_wq_type);
+ param->rx_max_pending = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
+ 1 << mlx5_max_log_rq_size(rq_wq_type));
param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE;
- param->rx_pending = 1 << priv->params.log_rq_size;
+ param->rx_pending = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
+ 1 << priv->params.log_rq_size);
param->tx_pending = 1 << priv->params.log_sq_size;
}
@@ -370,9 +416,13 @@ static int mlx5e_set_ringparam(struct net_device *dev,
struct mlx5e_priv *priv = netdev_priv(dev);
bool was_opened;
int rq_wq_type = priv->params.rq_wq_type;
+ u32 rx_pending_wqes;
+ u32 min_rq_size;
+ u32 max_rq_size;
u16 min_rx_wqes;
u8 log_rq_size;
u8 log_sq_size;
+ u32 num_mtts;
int err = 0;
if (param->rx_jumbo_pending) {
@@ -385,18 +435,36 @@ static int mlx5e_set_ringparam(struct net_device *dev,
__func__);
return -EINVAL;
}
- if (param->rx_pending < (1 << mlx5_min_log_rq_size(rq_wq_type))) {
+
+ min_rq_size = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
+ 1 << mlx5_min_log_rq_size(rq_wq_type));
+ max_rq_size = mlx5e_rx_wqes_to_packets(priv, rq_wq_type,
+ 1 << mlx5_max_log_rq_size(rq_wq_type));
+ rx_pending_wqes = mlx5e_packets_to_rx_wqes(priv, rq_wq_type,
+ param->rx_pending);
+
+ if (param->rx_pending < min_rq_size) {
netdev_info(dev, "%s: rx_pending (%d) < min (%d)\n",
__func__, param->rx_pending,
- 1 << mlx5_min_log_rq_size(rq_wq_type));
+ min_rq_size);
return -EINVAL;
}
- if (param->rx_pending > (1 << mlx5_max_log_rq_size(rq_wq_type))) {
+ if (param->rx_pending > max_rq_size) {
netdev_info(dev, "%s: rx_pending (%d) > max (%d)\n",
__func__, param->rx_pending,
- 1 << mlx5_max_log_rq_size(rq_wq_type));
+ max_rq_size);
return -EINVAL;
}
+
+ num_mtts = MLX5E_REQUIRED_MTTS(priv->params.num_channels,
+ rx_pending_wqes);
+ if (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
+ !MLX5E_VALID_NUM_MTTS(num_mtts)) {
+ netdev_info(dev, "%s: rx_pending (%d) request can't be satisfied, try to reduce.\n",
+ __func__, param->rx_pending);
+ return -EINVAL;
+ }
+
if (param->tx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)) {
netdev_info(dev, "%s: tx_pending (%d) < min (%d)\n",
__func__, param->tx_pending,
@@ -410,9 +478,9 @@ static int mlx5e_set_ringparam(struct net_device *dev,
return -EINVAL;
}
- log_rq_size = order_base_2(param->rx_pending);
+ log_rq_size = order_base_2(rx_pending_wqes);
log_sq_size = order_base_2(param->tx_pending);
- min_rx_wqes = mlx5_min_rx_wqes(rq_wq_type, param->rx_pending);
+ min_rx_wqes = mlx5_min_rx_wqes(rq_wq_type, rx_pending_wqes);
if (log_rq_size == priv->params.log_rq_size &&
log_sq_size == priv->params.log_sq_size &&
@@ -454,6 +522,7 @@ static int mlx5e_set_channels(struct net_device *dev,
unsigned int count = ch->combined_count;
bool arfs_enabled;
bool was_opened;
+ u32 num_mtts;
int err = 0;
if (!count) {
@@ -472,6 +541,14 @@ static int mlx5e_set_channels(struct net_device *dev,
return -EINVAL;
}
+ num_mtts = MLX5E_REQUIRED_MTTS(count, BIT(priv->params.log_rq_size));
+ if (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
+ !MLX5E_VALID_NUM_MTTS(num_mtts)) {
+ netdev_info(dev, "%s: rx count (%d) request can't be satisfied, try to reduce.\n",
+ __func__, count);
+ return -EINVAL;
+ }
+
if (priv->params.num_channels == count)
return 0;
@@ -582,9 +659,10 @@ out:
static void ptys2ethtool_supported_link(unsigned long *supported_modes,
u32 eth_proto_cap)
{
+ unsigned long proto_cap = eth_proto_cap;
int proto;
- for_each_set_bit(proto, (unsigned long *)&eth_proto_cap, MLX5E_LINK_MODES_NUMBER)
+ for_each_set_bit(proto, &proto_cap, MLX5E_LINK_MODES_NUMBER)
bitmap_or(supported_modes, supported_modes,
ptys2ethtool_table[proto].supported,
__ETHTOOL_LINK_MODE_MASK_NBITS);
@@ -593,9 +671,10 @@ static void ptys2ethtool_supported_link(unsigned long *supported_modes,
static void ptys2ethtool_adver_link(unsigned long *advertising_modes,
u32 eth_proto_cap)
{
+ unsigned long proto_cap = eth_proto_cap;
int proto;
- for_each_set_bit(proto, (unsigned long *)&eth_proto_cap, MLX5E_LINK_MODES_NUMBER)
+ for_each_set_bit(proto, &proto_cap, MLX5E_LINK_MODES_NUMBER)
bitmap_or(advertising_modes, advertising_modes,
ptys2ethtool_table[proto].advertised,
__ETHTOOL_LINK_MODE_MASK_NBITS);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 870bea3..2459c7f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -39,13 +39,6 @@
#include "eswitch.h"
#include "vxlan.h"
-enum {
- MLX5_EN_QP_FLUSH_TIMEOUT_MS = 5000,
- MLX5_EN_QP_FLUSH_MSLEEP_QUANT = 20,
- MLX5_EN_QP_FLUSH_MAX_ITER = MLX5_EN_QP_FLUSH_TIMEOUT_MS /
- MLX5_EN_QP_FLUSH_MSLEEP_QUANT,
-};
-
struct mlx5e_rq_param {
u32 rqc[MLX5_ST_SZ_DW(rqc)];
struct mlx5_wq_param wq;
@@ -162,6 +155,7 @@ static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
s->tx_queue_stopped += sq_stats->stopped;
s->tx_queue_wake += sq_stats->wake;
s->tx_queue_dropped += sq_stats->dropped;
+ s->tx_xmit_more += sq_stats->xmit_more;
s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
tx_offload_none += sq_stats->csum_none;
}
@@ -340,6 +334,9 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
rq->alloc_wqe = mlx5e_alloc_rx_mpwqe;
rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
+ rq->mpwqe_mtt_offset = c->ix *
+ MLX5E_REQUIRED_MTTS(1, BIT(priv->params.log_rq_size));
+
rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz);
rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides);
rq->wqe_sz = rq->mpwqe_stride_sz * rq->mpwqe_num_strides;
@@ -428,7 +425,6 @@ static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn);
MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
- MLX5_SET(rqc, rqc, flush_in_error_en, 1);
MLX5_SET(rqc, rqc, vsd, priv->params.vlan_strip_disable);
MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
MLX5_ADAPTER_PAGE_SHIFT);
@@ -525,6 +521,27 @@ static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
return -ETIMEDOUT;
}
+static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
+{
+ struct mlx5_wq_ll *wq = &rq->wq;
+ struct mlx5e_rx_wqe *wqe;
+ __be16 wqe_ix_be;
+ u16 wqe_ix;
+
+ /* UMR WQE (if in progress) is always at wq->head */
+ if (test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
+ mlx5e_free_rx_fragmented_mpwqe(rq, &rq->wqe_info[wq->head]);
+
+ while (!mlx5_wq_ll_is_empty(wq)) {
+ wqe_ix_be = *wq->tail_next;
+ wqe_ix = be16_to_cpu(wqe_ix_be);
+ wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix);
+ rq->dealloc_wqe(rq, wqe_ix);
+ mlx5_wq_ll_pop(&rq->wq, wqe_ix_be,
+ &wqe->next.next_wqe_index);
+ }
+}
+
static int mlx5e_open_rq(struct mlx5e_channel *c,
struct mlx5e_rq_param *param,
struct mlx5e_rq *rq)
@@ -548,8 +565,6 @@ static int mlx5e_open_rq(struct mlx5e_channel *c,
if (param->am_enabled)
set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
- set_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
-
sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_NOP;
sq->ico_wqe_info[pi].num_wqebbs = 1;
mlx5e_send_nop(sq, true); /* trigger mlx5e_post_rx_wqes() */
@@ -566,23 +581,8 @@ err_destroy_rq:
static void mlx5e_close_rq(struct mlx5e_rq *rq)
{
- int tout = 0;
- int err;
-
- clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
+ set_bit(MLX5E_RQ_STATE_FLUSH, &rq->state);
napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
-
- err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
- while (!mlx5_wq_ll_is_empty(&rq->wq) && !err &&
- tout++ < MLX5_EN_QP_FLUSH_MAX_ITER)
- msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT);
-
- if (err || tout == MLX5_EN_QP_FLUSH_MAX_ITER)
- set_bit(MLX5E_RQ_STATE_FLUSH_TIMEOUT, &rq->state);
-
- /* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */
- napi_synchronize(&rq->channel->napi);
-
cancel_work_sync(&rq->am.work);
mlx5e_disable_rq(rq);
@@ -821,7 +821,6 @@ static int mlx5e_open_sq(struct mlx5e_channel *c,
goto err_disable_sq;
if (sq->txq) {
- set_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
netdev_tx_reset_queue(sq->txq);
netif_tx_start_queue(sq->txq);
}
@@ -845,38 +844,20 @@ static inline void netif_tx_disable_queue(struct netdev_queue *txq)
static void mlx5e_close_sq(struct mlx5e_sq *sq)
{
- int tout = 0;
- int err;
+ set_bit(MLX5E_SQ_STATE_FLUSH, &sq->state);
+ /* prevent netif_tx_wake_queue */
+ napi_synchronize(&sq->channel->napi);
if (sq->txq) {
- clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
- /* prevent netif_tx_wake_queue */
- napi_synchronize(&sq->channel->napi);
netif_tx_disable_queue(sq->txq);
- /* ensure hw is notified of all pending wqes */
+ /* last doorbell out, godspeed .. */
if (mlx5e_sq_has_room_for(sq, 1))
mlx5e_send_nop(sq, true);
-
- err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY,
- MLX5_SQC_STATE_ERR, false, 0);
- if (err)
- set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
- }
-
- /* wait till sq is empty, unless a TX timeout occurred on this SQ */
- while (sq->cc != sq->pc &&
- !test_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state)) {
- msleep(MLX5_EN_QP_FLUSH_MSLEEP_QUANT);
- if (tout++ > MLX5_EN_QP_FLUSH_MAX_ITER)
- set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
}
- /* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */
- napi_synchronize(&sq->channel->napi);
-
- mlx5e_free_tx_descs(sq);
mlx5e_disable_sq(sq);
+ mlx5e_free_tx_descs(sq);
mlx5e_destroy_sq(sq);
}
@@ -1826,10 +1807,6 @@ int mlx5e_open_locked(struct net_device *netdev)
netif_set_real_num_tx_queues(netdev, num_txqs);
netif_set_real_num_rx_queues(netdev, priv->params.num_channels);
- err = mlx5e_set_dev_port_mtu(netdev);
- if (err)
- goto err_clear_state_opened_flag;
-
err = mlx5e_open_channels(priv);
if (err) {
netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n",
@@ -2573,6 +2550,7 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
u16 max_mtu;
u16 min_mtu;
int err = 0;
+ bool reset;
mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
@@ -2588,13 +2566,18 @@ static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
mutex_lock(&priv->state_lock);
+ reset = !priv->params.lro_en &&
+ (priv->params.rq_wq_type !=
+ MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
+
was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
- if (was_opened)
+ if (was_opened && reset)
mlx5e_close_locked(netdev);
netdev->mtu = new_mtu;
+ mlx5e_set_dev_port_mtu(netdev);
- if (was_opened)
+ if (was_opened && reset)
err = mlx5e_open_locked(netdev);
mutex_unlock(&priv->state_lock);
@@ -2794,7 +2777,7 @@ static void mlx5e_tx_timeout(struct net_device *dev)
if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i)))
continue;
sched_work = true;
- set_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state);
+ set_bit(MLX5E_SQ_STATE_FLUSH, &sq->state);
netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n",
i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc);
}
@@ -3231,8 +3214,8 @@ static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv)
struct mlx5_create_mkey_mbox_in *in;
struct mlx5_mkey_seg *mkc;
int inlen = sizeof(*in);
- u64 npages =
- priv->profile->max_nch(mdev) * MLX5_CHANNEL_MAX_NUM_MTTS;
+ u64 npages = MLX5E_REQUIRED_MTTS(priv->profile->max_nch(mdev),
+ BIT(MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW));
int err;
in = mlx5_vzalloc(inlen);
@@ -3246,10 +3229,12 @@ static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv)
MLX5_PERM_LOCAL_WRITE |
MLX5_ACCESS_MODE_MTT;
+ npages = min_t(u32, ALIGN(U16_MAX, 4) * 2, npages);
+
mkc->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
mkc->flags_pd = cpu_to_be32(mdev->mlx5e_res.pdn);
mkc->len = cpu_to_be64(npages << PAGE_SHIFT);
- mkc->xlt_oct_size = cpu_to_be32(mlx5e_get_mtt_octw(npages));
+ mkc->xlt_oct_size = cpu_to_be32(MLX5_MTT_OCTW(npages));
mkc->log2_page_size = PAGE_SHIFT;
err = mlx5_core_create_mkey(mdev, &priv->umr_mkey, in, inlen, NULL,
@@ -3385,6 +3370,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
queue_work(priv->wq, &priv->set_rx_mode_work);
if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
+ mlx5_query_nic_vport_mac_address(mdev, 0, rep.hw_id);
rep.load = mlx5e_nic_rep_load;
rep.unload = mlx5e_nic_rep_unload;
rep.vport = 0;
@@ -3463,6 +3449,8 @@ void *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
mlx5e_init_l2_addr(priv);
+ mlx5e_set_dev_port_mtu(netdev);
+
err = register_netdev(netdev);
if (err) {
mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
@@ -3501,16 +3489,20 @@ static void mlx5e_register_vport_rep(struct mlx5_core_dev *mdev)
struct mlx5_eswitch *esw = mdev->priv.eswitch;
int total_vfs = MLX5_TOTAL_VPORTS(mdev);
int vport;
+ u8 mac[ETH_ALEN];
if (!MLX5_CAP_GEN(mdev, vport_group_manager))
return;
+ mlx5_query_nic_vport_mac_address(mdev, 0, mac);
+
for (vport = 1; vport < total_vfs; vport++) {
struct mlx5_eswitch_rep rep;
rep.load = mlx5e_vport_rep_load;
rep.unload = mlx5e_vport_rep_unload;
rep.vport = vport;
+ ether_addr_copy(rep.hw_id, mac);
mlx5_eswitch_register_vport_rep(esw, &rep);
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 1c7d8b8..134de4a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -135,17 +135,16 @@ static const struct ethtool_ops mlx5e_rep_ethtool_ops = {
int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr)
{
struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5_eswitch_rep *rep = priv->ppriv;
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
- u8 mac[ETH_ALEN];
if (esw->mode == SRIOV_NONE)
return -EOPNOTSUPP;
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
- mlx5_query_nic_vport_mac_address(priv->mdev, 0, mac);
attr->u.ppid.id_len = ETH_ALEN;
- memcpy(&attr->u.ppid.id, &mac, ETH_ALEN);
+ ether_addr_copy(attr->u.ppid.id, rep->hw_id);
break;
default:
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 9f2a16a..e7c969d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -324,9 +324,9 @@ mlx5e_copy_skb_header_fragmented_mpwqe(struct device *pdev,
}
}
-static u16 mlx5e_get_wqe_mtt_offset(u16 rq_ix, u16 wqe_ix)
+static u32 mlx5e_get_wqe_mtt_offset(struct mlx5e_rq *rq, u16 wqe_ix)
{
- return rq_ix * MLX5_CHANNEL_MAX_NUM_MTTS +
+ return rq->mpwqe_mtt_offset +
wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8);
}
@@ -340,7 +340,7 @@ static void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
struct mlx5_wqe_data_seg *dseg = &wqe->data;
struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
u8 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
- u16 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq->ix, ix);
+ u32 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq, ix);
memset(wqe, 0, sizeof(*wqe));
cseg->opmod_idx_opcode =
@@ -353,9 +353,9 @@ static void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN;
ucseg->klm_octowords =
- cpu_to_be16(mlx5e_get_mtt_octw(MLX5_MPWRQ_PAGES_PER_WQE));
+ cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE));
ucseg->bsf_octowords =
- cpu_to_be16(mlx5e_get_mtt_octw(umr_wqe_mtt_offset));
+ cpu_to_be16(MLX5_MTT_OCTW(umr_wqe_mtt_offset));
ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
dseg->lkey = sq->mkey_be;
@@ -423,7 +423,7 @@ static int mlx5e_alloc_rx_fragmented_mpwqe(struct mlx5e_rq *rq,
{
struct mlx5e_mpw_info *wi = &rq->wqe_info[ix];
int mtt_sz = mlx5e_get_wqe_mtt_sz();
- u32 dma_offset = mlx5e_get_wqe_mtt_offset(rq->ix, ix) << PAGE_SHIFT;
+ u64 dma_offset = (u64)mlx5e_get_wqe_mtt_offset(rq, ix) << PAGE_SHIFT;
int i;
wi->umr.dma_info = kmalloc(sizeof(*wi->umr.dma_info) *
@@ -506,6 +506,12 @@ void mlx5e_post_rx_fragmented_mpwqe(struct mlx5e_rq *rq)
struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head);
clear_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state);
+
+ if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state))) {
+ mlx5e_free_rx_fragmented_mpwqe(rq, &rq->wqe_info[wq->head]);
+ return;
+ }
+
mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index));
rq->stats.mpwqe_frag++;
@@ -595,26 +601,9 @@ void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
wi->free_wqe(rq, wi);
}
-void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
-{
- struct mlx5_wq_ll *wq = &rq->wq;
- struct mlx5e_rx_wqe *wqe;
- __be16 wqe_ix_be;
- u16 wqe_ix;
-
- while (!mlx5_wq_ll_is_empty(wq)) {
- wqe_ix_be = *wq->tail_next;
- wqe_ix = be16_to_cpu(wqe_ix_be);
- wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix);
- rq->dealloc_wqe(rq, wqe_ix);
- mlx5_wq_ll_pop(&rq->wq, wqe_ix_be,
- &wqe->next.next_wqe_index);
- }
-}
-
#define RQ_CANNOT_POST(rq) \
- (!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state) || \
- test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
+ (test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state) || \
+ test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
{
@@ -648,24 +637,32 @@ bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe,
u32 cqe_bcnt)
{
- struct ethhdr *eth = (struct ethhdr *)(skb->data);
- struct iphdr *ipv4 = (struct iphdr *)(skb->data + ETH_HLEN);
- struct ipv6hdr *ipv6 = (struct ipv6hdr *)(skb->data + ETH_HLEN);
+ struct ethhdr *eth = (struct ethhdr *)(skb->data);
+ struct iphdr *ipv4;
+ struct ipv6hdr *ipv6;
struct tcphdr *tcp;
+ int network_depth = 0;
+ __be16 proto;
+ u16 tot_len;
u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
int tcp_ack = ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA == l4_hdr_type) ||
(CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA == l4_hdr_type));
- u16 tot_len = cqe_bcnt - ETH_HLEN;
+ skb->mac_len = ETH_HLEN;
+ proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth);
+
+ ipv4 = (struct iphdr *)(skb->data + network_depth);
+ ipv6 = (struct ipv6hdr *)(skb->data + network_depth);
+ tot_len = cqe_bcnt - network_depth;
- if (eth->h_proto == htons(ETH_P_IP)) {
- tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
+ if (proto == htons(ETH_P_IP)) {
+ tcp = (struct tcphdr *)(skb->data + network_depth +
sizeof(struct iphdr));
ipv6 = NULL;
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
} else {
- tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
+ tcp = (struct tcphdr *)(skb->data + network_depth +
sizeof(struct ipv6hdr));
ipv4 = NULL;
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
@@ -916,7 +913,7 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
int work_done = 0;
- if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH_TIMEOUT, &rq->state)))
+ if (unlikely(test_bit(MLX5E_RQ_STATE_FLUSH, &rq->state)))
return 0;
if (cq->decmprs_left)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index 7b9d8a9..499487c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -70,6 +70,7 @@ struct mlx5e_sw_stats {
u64 tx_queue_stopped;
u64 tx_queue_wake;
u64 tx_queue_dropped;
+ u64 tx_xmit_more;
u64 rx_wqe_err;
u64 rx_mpwqe_filler;
u64 rx_mpwqe_frag;
@@ -101,6 +102,7 @@ static const struct counter_desc sw_stats_desc[] = {
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_stopped) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_wake) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_queue_dropped) },
+ { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, tx_xmit_more) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_wqe_err) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_filler) },
{ MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_mpwqe_frag) },
@@ -298,6 +300,7 @@ struct mlx5e_sq_stats {
/* commonly accessed in data path */
u64 packets;
u64 bytes;
+ u64 xmit_more;
u64 tso_packets;
u64 tso_bytes;
u64 tso_inner_packets;
@@ -324,6 +327,7 @@ static const struct counter_desc sq_stats_desc[] = {
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, stopped) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, wake) },
{ MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, dropped) },
+ { MLX5E_DECLARE_TX_STAT(struct mlx5e_sq_stats, xmit_more) },
};
#define NUM_SW_COUNTERS ARRAY_SIZE(sw_stats_desc)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 0f19b01..22cfc4a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -170,7 +170,7 @@ static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
struct flow_dissector_key_control *key =
skb_flow_dissector_target(f->dissector,
- FLOW_DISSECTOR_KEY_BASIC,
+ FLOW_DISSECTOR_KEY_CONTROL,
f->key);
addr_type = key->addr_type;
}
@@ -318,6 +318,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
u32 *action, u32 *flow_tag)
{
const struct tc_action *a;
+ LIST_HEAD(actions);
if (tc_no_actions(exts))
return -EINVAL;
@@ -325,7 +326,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
*flow_tag = MLX5_FS_DEFAULT_FLOW_TAG;
*action = 0;
- tc_for_each_action(a, exts) {
+ tcf_exts_to_list(exts, &actions);
+ list_for_each_entry(a, &actions, list) {
/* Only support a single action per rule */
if (*action)
return -EINVAL;
@@ -362,13 +364,15 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
u32 *action, u32 *dest_vport)
{
const struct tc_action *a;
+ LIST_HEAD(actions);
if (tc_no_actions(exts))
return -EINVAL;
*action = 0;
- tc_for_each_action(a, exts) {
+ tcf_exts_to_list(exts, &actions);
+ list_for_each_entry(a, &actions, list) {
/* Only support a single action per rule */
if (*action)
return -EINVAL;
@@ -503,6 +507,7 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow;
struct tc_action *a;
struct mlx5_fc *counter;
+ LIST_HEAD(actions);
u64 bytes;
u64 packets;
u64 lastuse;
@@ -518,7 +523,8 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv,
mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse);
- tc_for_each_action(a, f->exts)
+ tcf_exts_to_list(f->exts, &actions);
+ list_for_each_entry(a, &actions, list)
tcf_action_stats_update(a, bytes, packets, lastuse);
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index e073bf59..eb0e725 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -356,6 +356,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
sq->stats.stopped++;
}
+ sq->stats.xmit_more += skb->xmit_more;
if (!skb->xmit_more || netif_xmit_stopped(sq->txq)) {
int bf_sz = 0;
@@ -394,35 +395,6 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
return mlx5e_sq_xmit(sq, skb);
}
-void mlx5e_free_tx_descs(struct mlx5e_sq *sq)
-{
- struct mlx5e_tx_wqe_info *wi;
- struct sk_buff *skb;
- u16 ci;
- int i;
-
- while (sq->cc != sq->pc) {
- ci = sq->cc & sq->wq.sz_m1;
- skb = sq->skb[ci];
- wi = &sq->wqe_info[ci];
-
- if (!skb) { /* nop */
- sq->cc++;
- continue;
- }
-
- for (i = 0; i < wi->num_dma; i++) {
- struct mlx5e_sq_dma *dma =
- mlx5e_dma_get(sq, sq->dma_fifo_cc++);
-
- mlx5e_tx_dma_unmap(sq->pdev, dma);
- }
-
- dev_kfree_skb_any(skb);
- sq->cc += wi->num_wqebbs;
- }
-}
-
bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
{
struct mlx5e_sq *sq;
@@ -434,7 +406,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
sq = container_of(cq, struct mlx5e_sq, cq);
- if (unlikely(test_bit(MLX5E_SQ_STATE_TX_TIMEOUT, &sq->state)))
+ if (unlikely(test_bit(MLX5E_SQ_STATE_FLUSH, &sq->state)))
return false;
npkts = 0;
@@ -512,11 +484,39 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget)
netdev_tx_completed_queue(sq->txq, npkts, nbytes);
if (netif_tx_queue_stopped(sq->txq) &&
- mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM) &&
- likely(test_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state))) {
- netif_tx_wake_queue(sq->txq);
- sq->stats.wake++;
+ mlx5e_sq_has_room_for(sq, MLX5E_SQ_STOP_ROOM)) {
+ netif_tx_wake_queue(sq->txq);
+ sq->stats.wake++;
}
return (i == MLX5E_TX_CQ_POLL_BUDGET);
}
+
+void mlx5e_free_tx_descs(struct mlx5e_sq *sq)
+{
+ struct mlx5e_tx_wqe_info *wi;
+ struct sk_buff *skb;
+ u16 ci;
+ int i;
+
+ while (sq->cc != sq->pc) {
+ ci = sq->cc & sq->wq.sz_m1;
+ skb = sq->skb[ci];
+ wi = &sq->wqe_info[ci];
+
+ if (!skb) { /* nop */
+ sq->cc++;
+ continue;
+ }
+
+ for (i = 0; i < wi->num_dma; i++) {
+ struct mlx5e_sq_dma *dma =
+ mlx5e_dma_get(sq, sq->dma_fifo_cc++);
+
+ mlx5e_tx_dma_unmap(sq->pdev, dma);
+ }
+
+ dev_kfree_skb_any(skb);
+ sq->cc += wi->num_wqebbs;
+ }
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
index 64ae2e8..9bf33bb 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -51,16 +51,18 @@ struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq)
static void mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
{
+ struct mlx5e_sq *sq = container_of(cq, struct mlx5e_sq, cq);
struct mlx5_wq_cyc *wq;
struct mlx5_cqe64 *cqe;
- struct mlx5e_sq *sq;
u16 sqcc;
+ if (unlikely(test_bit(MLX5E_SQ_STATE_FLUSH, &sq->state)))
+ return;
+
cqe = mlx5e_get_cqe(cq);
if (likely(!cqe))
return;
- sq = container_of(cq, struct mlx5e_sq, cq);
wq = &sq->wq;
/* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index f6d6677..b247949 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1451,7 +1451,8 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
esw_debug(esw->dev, "Enabling VPORT(%d)\n", vport_num);
- if (vport_num) { /* Only VFs need ACLs for VST and spoofchk filtering */
+ /* Only VFs need ACLs for VST and spoofchk filtering */
+ if (vport_num && esw->mode == SRIOV_LEGACY) {
esw_vport_ingress_config(esw, vport);
esw_vport_egress_config(esw, vport);
}
@@ -1502,7 +1503,7 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
*/
esw_vport_change_handle_locked(vport);
vport->enabled_events = 0;
- if (vport_num) {
+ if (vport_num && esw->mode == SRIOV_LEGACY) {
esw_vport_disable_egress_acl(esw, vport);
esw_vport_disable_ingress_acl(esw, vport);
}
@@ -1553,6 +1554,7 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
abort:
esw_enable_vport(esw, 0, UC_ADDR_CHANGE);
+ esw->mode = SRIOV_NONE;
return err;
}
@@ -1767,7 +1769,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
vport, err);
mutex_lock(&esw->state_lock);
- if (evport->enabled)
+ if (evport->enabled && esw->mode == SRIOV_LEGACY)
err = esw_vport_ingress_config(esw, evport);
mutex_unlock(&esw->state_lock);
return err;
@@ -1839,7 +1841,7 @@ int mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw,
mutex_lock(&esw->state_lock);
evport->vlan = vlan;
evport->qos = qos;
- if (evport->enabled) {
+ if (evport->enabled && esw->mode == SRIOV_LEGACY) {
err = esw_vport_ingress_config(esw, evport);
if (err)
goto out;
@@ -1868,10 +1870,11 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
mutex_lock(&esw->state_lock);
pschk = evport->spoofchk;
evport->spoofchk = spoofchk;
- if (evport->enabled)
+ if (evport->enabled && esw->mode == SRIOV_LEGACY) {
err = esw_vport_ingress_config(esw, evport);
- if (err)
- evport->spoofchk = pschk;
+ if (err)
+ evport->spoofchk = pschk;
+ }
mutex_unlock(&esw->state_lock);
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index c0b0560..a961409 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -174,6 +174,7 @@ struct mlx5_eswitch_rep {
void *priv_data;
struct list_head vport_sqs_list;
bool valid;
+ u8 hw_id[ETH_ALEN];
};
struct mlx5_esw_offload {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index a357e8e..7de40e6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -113,7 +113,7 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *esw, int vport, u32 sqn
dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT;
dest.vport_num = vport;
- flow_rule = mlx5_add_flow_rule(esw->fdb_table.fdb, spec,
+ flow_rule = mlx5_add_flow_rule(esw->fdb_table.offloads.fdb, spec,
MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
0, &dest);
if (IS_ERR(flow_rule))
@@ -446,7 +446,7 @@ out:
static int esw_offloads_start(struct mlx5_eswitch *esw)
{
- int err, num_vfs = esw->dev->priv.sriov.num_vfs;
+ int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
if (esw->mode != SRIOV_LEGACY) {
esw_warn(esw->dev, "Can't set offloads mode, SRIOV legacy not enabled\n");
@@ -455,8 +455,12 @@ static int esw_offloads_start(struct mlx5_eswitch *esw)
mlx5_eswitch_disable_sriov(esw);
err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
- if (err)
- esw_warn(esw->dev, "Failed set eswitch to offloads, err %d\n", err);
+ if (err) {
+ esw_warn(esw->dev, "Failed setting eswitch to offloads, err %d\n", err);
+ err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
+ if (err1)
+ esw_warn(esw->dev, "Failed setting eswitch back to legacy, err %d\n", err);
+ }
return err;
}
@@ -508,12 +512,16 @@ create_ft_err:
static int esw_offloads_stop(struct mlx5_eswitch *esw)
{
- int err, num_vfs = esw->dev->priv.sriov.num_vfs;
+ int err, err1, num_vfs = esw->dev->priv.sriov.num_vfs;
mlx5_eswitch_disable_sriov(esw);
err = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_LEGACY);
- if (err)
- esw_warn(esw->dev, "Failed set eswitch legacy mode. err %d\n", err);
+ if (err) {
+ esw_warn(esw->dev, "Failed setting eswitch to legacy, err %d\n", err);
+ err1 = mlx5_eswitch_enable_sriov(esw, num_vfs, SRIOV_OFFLOADS);
+ if (err1)
+ esw_warn(esw->dev, "Failed setting eswitch back to offloads, err %d\n", err);
+ }
return err;
}
@@ -535,7 +543,7 @@ void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports)
esw_destroy_offloads_fdb_table(esw);
}
-static int mlx5_esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
+static int esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
{
switch (mode) {
case DEVLINK_ESWITCH_MODE_LEGACY:
@@ -551,6 +559,22 @@ static int mlx5_esw_mode_from_devlink(u16 mode, u16 *mlx5_mode)
return 0;
}
+static int esw_mode_to_devlink(u16 mlx5_mode, u16 *mode)
+{
+ switch (mlx5_mode) {
+ case SRIOV_LEGACY:
+ *mode = DEVLINK_ESWITCH_MODE_LEGACY;
+ break;
+ case SRIOV_OFFLOADS:
+ *mode = DEVLINK_ESWITCH_MODE_SWITCHDEV;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
{
struct mlx5_core_dev *dev;
@@ -566,7 +590,7 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode)
if (cur_mlx5_mode == SRIOV_NONE)
return -EOPNOTSUPP;
- if (mlx5_esw_mode_from_devlink(mode, &mlx5_mode))
+ if (esw_mode_from_devlink(mode, &mlx5_mode))
return -EINVAL;
if (cur_mlx5_mode == mlx5_mode)
@@ -592,9 +616,7 @@ int mlx5_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode)
if (dev->priv.eswitch->mode == SRIOV_NONE)
return -EOPNOTSUPP;
- *mode = dev->priv.eswitch->mode;
-
- return 0;
+ return esw_mode_to_devlink(dev->priv.eswitch->mode, mode);
}
void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index 9134010..287ade1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -425,11 +425,11 @@ struct mlx5_cmd_fc_bulk *
mlx5_cmd_fc_bulk_alloc(struct mlx5_core_dev *dev, u16 id, int num)
{
struct mlx5_cmd_fc_bulk *b;
- int outlen = sizeof(*b) +
+ int outlen =
MLX5_ST_SZ_BYTES(query_flow_counter_out) +
MLX5_ST_SZ_BYTES(traffic_counter) * num;
- b = kzalloc(outlen, GFP_KERNEL);
+ b = kzalloc(sizeof(*b) + outlen, GFP_KERNEL);
if (!b)
return NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 75bb8c8..3d6c1f6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -80,7 +80,7 @@
LEFTOVERS_NUM_PRIOS)
#define ETHTOOL_PRIO_NUM_LEVELS 1
-#define ETHTOOL_NUM_PRIOS 10
+#define ETHTOOL_NUM_PRIOS 11
#define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS)
/* Vlan, mac, ttc, aRFS */
#define KERNEL_NIC_PRIO_NUM_LEVELS 4
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
index c2877e9..3a9195b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c
@@ -126,12 +126,21 @@ static struct rb_node *mlx5_fc_stats_query(struct mlx5_core_dev *dev,
for (node = &first->node; node; node = rb_next(node)) {
struct mlx5_fc *counter = rb_entry(node, struct mlx5_fc, node);
struct mlx5_fc_cache *c = &counter->cache;
+ u64 packets;
+ u64 bytes;
if (counter->id > last_id)
break;
mlx5_cmd_fc_bulk_get(dev, b,
- counter->id, &c->packets, &c->bytes);
+ counter->id, &packets, &bytes);
+
+ if (c->packets == packets)
+ continue;
+
+ c->packets = packets;
+ c->bytes = bytes;
+ c->lastuse = jiffies;
}
out:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 4f491d4..2385bae 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -1420,36 +1420,12 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
dev_info(&pdev->dev, "%s was called\n", __func__);
mlx5_enter_error_state(dev);
mlx5_unload_one(dev, priv);
+ pci_save_state(pdev);
mlx5_pci_disable_device(dev);
return state == pci_channel_io_perm_failure ?
PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
}
-static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
-{
- struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
- int err = 0;
-
- dev_info(&pdev->dev, "%s was called\n", __func__);
-
- err = mlx5_pci_enable_device(dev);
- if (err) {
- dev_err(&pdev->dev, "%s: mlx5_pci_enable_device failed with error code: %d\n"
- , __func__, err);
- return PCI_ERS_RESULT_DISCONNECT;
- }
- pci_set_master(pdev);
- pci_set_power_state(pdev, PCI_D0);
- pci_restore_state(pdev);
-
- return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
-}
-
-void mlx5_disable_device(struct mlx5_core_dev *dev)
-{
- mlx5_pci_err_detected(dev->pdev, 0);
-}
-
/* wait for the device to show vital signs by waiting
* for the health counter to start counting.
*/
@@ -1477,21 +1453,44 @@ static int wait_vital(struct pci_dev *pdev)
return -ETIMEDOUT;
}
-static void mlx5_pci_resume(struct pci_dev *pdev)
+static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
{
struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
- struct mlx5_priv *priv = &dev->priv;
int err;
dev_info(&pdev->dev, "%s was called\n", __func__);
- pci_save_state(pdev);
- err = wait_vital(pdev);
+ err = mlx5_pci_enable_device(dev);
if (err) {
+ dev_err(&pdev->dev, "%s: mlx5_pci_enable_device failed with error code: %d\n"
+ , __func__, err);
+ return PCI_ERS_RESULT_DISCONNECT;
+ }
+
+ pci_set_master(pdev);
+ pci_restore_state(pdev);
+
+ if (wait_vital(pdev)) {
dev_err(&pdev->dev, "%s: wait_vital timed out\n", __func__);
- return;
+ return PCI_ERS_RESULT_DISCONNECT;
}
+ return PCI_ERS_RESULT_RECOVERED;
+}
+
+void mlx5_disable_device(struct mlx5_core_dev *dev)
+{
+ mlx5_pci_err_detected(dev->pdev, 0);
+}
+
+static void mlx5_pci_resume(struct pci_dev *pdev)
+{
+ struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+ struct mlx5_priv *priv = &dev->priv;
+ int err;
+
+ dev_info(&pdev->dev, "%s was called\n", __func__);
+
err = mlx5_load_one(dev, priv);
if (err)
dev_err(&pdev->dev, "%s: mlx5_load_one failed with error code: %d\n"
diff --git a/drivers/net/ethernet/mellanox/mlxsw/port.h b/drivers/net/ethernet/mellanox/mlxsw/port.h
index f33b997..af371a8 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/port.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/port.h
@@ -56,6 +56,7 @@
#define MLXSW_PORT_PHY_BITS_MASK (MLXSW_PORT_MAX_PHY_PORTS - 1)
#define MLXSW_PORT_CPU_PORT 0x0
+#define MLXSW_PORT_ROUTER_PORT (MLXSW_PORT_MAX_PHY_PORTS + 2)
#define MLXSW_PORT_DONT_CARE (MLXSW_PORT_MAX_PORTS)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 7ca9201..1721098 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -3383,6 +3383,15 @@ MLXSW_ITEM32(reg, ritr, ipv4_fe, 0x04, 29, 1);
*/
MLXSW_ITEM32(reg, ritr, ipv6_fe, 0x04, 28, 1);
+/* reg_ritr_lb_en
+ * Loop-back filter enable for unicast packets.
+ * If the flag is set then loop-back filter for unicast packets is
+ * implemented on the RIF. Multicast packets are always subject to
+ * loop-back filtering.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, ritr, lb_en, 0x04, 24, 1);
+
/* reg_ritr_virtual_router
* Virtual router ID associated with the router interface.
* Access: RW
@@ -3484,6 +3493,7 @@ static inline void mlxsw_reg_ritr_pack(char *payload, bool enable,
mlxsw_reg_ritr_op_set(payload, op);
mlxsw_reg_ritr_rif_set(payload, rif);
mlxsw_reg_ritr_ipv4_fe_set(payload, 1);
+ mlxsw_reg_ritr_lb_en_set(payload, 1);
mlxsw_reg_ritr_mtu_set(payload, mtu);
mlxsw_reg_ritr_if_mac_memcpy_to(payload, mac);
}
@@ -4000,6 +4010,7 @@ static inline void mlxsw_reg_ralue_pack(char *payload,
{
MLXSW_REG_ZERO(ralue, payload);
mlxsw_reg_ralue_protocol_set(payload, protocol);
+ mlxsw_reg_ralue_op_set(payload, op);
mlxsw_reg_ralue_virtual_router_set(payload, virtual_router);
mlxsw_reg_ralue_prefix_len_set(payload, prefix_len);
mlxsw_reg_ralue_entry_type_set(payload,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index c3e6150..d48873b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -56,6 +56,7 @@
#include <generated/utsrelease.h>
#include <net/pkt_cls.h>
#include <net/tc_act/tc_mirred.h>
+#include <net/netevent.h>
#include "spectrum.h"
#include "core.h"
@@ -942,8 +943,8 @@ static void mlxsw_sp_port_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_vport)
kfree(mlxsw_sp_vport);
}
-int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
- u16 vid)
+static int mlxsw_sp_port_add_vid(struct net_device *dev,
+ __be16 __always_unused proto, u16 vid)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp_port *mlxsw_sp_vport;
@@ -956,16 +957,12 @@ int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
if (!vid)
return 0;
- if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid)) {
- netdev_warn(dev, "VID=%d already configured\n", vid);
+ if (mlxsw_sp_port_vport_find(mlxsw_sp_port, vid))
return 0;
- }
mlxsw_sp_vport = mlxsw_sp_port_vport_create(mlxsw_sp_port, vid);
- if (!mlxsw_sp_vport) {
- netdev_err(dev, "Failed to create vPort for VID=%d\n", vid);
+ if (!mlxsw_sp_vport)
return -ENOMEM;
- }
/* When adding the first VLAN interface on a bridged port we need to
* transition all the active 802.1Q bridge VLANs to use explicit
@@ -973,24 +970,17 @@ int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
*/
if (list_is_singular(&mlxsw_sp_port->vports_list)) {
err = mlxsw_sp_port_vp_mode_trans(mlxsw_sp_port);
- if (err) {
- netdev_err(dev, "Failed to set to Virtual mode\n");
+ if (err)
goto err_port_vp_mode_trans;
- }
}
err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, false);
- if (err) {
- netdev_err(dev, "Failed to disable learning for VID=%d\n", vid);
+ if (err)
goto err_port_vid_learning_set;
- }
err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, true, untagged);
- if (err) {
- netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
- vid);
+ if (err)
goto err_port_add_vid;
- }
return 0;
@@ -1010,7 +1000,6 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev,
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
struct mlxsw_sp_port *mlxsw_sp_vport;
struct mlxsw_sp_fid *f;
- int err;
/* VLAN 0 is removed from HW filter when device goes down, but
* it is reserved in our case, so simply return.
@@ -1019,23 +1008,12 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev,
return 0;
mlxsw_sp_vport = mlxsw_sp_port_vport_find(mlxsw_sp_port, vid);
- if (!mlxsw_sp_vport) {
- netdev_warn(dev, "VID=%d does not exist\n", vid);
+ if (WARN_ON(!mlxsw_sp_vport))
return 0;
- }
- err = mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
- if (err) {
- netdev_err(dev, "Failed to set VLAN membership for VID=%d\n",
- vid);
- return err;
- }
+ mlxsw_sp_port_vlan_set(mlxsw_sp_vport, vid, vid, false, false);
- err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
- if (err) {
- netdev_err(dev, "Failed to enable learning for VID=%d\n", vid);
- return err;
- }
+ mlxsw_sp_port_vid_learning_set(mlxsw_sp_vport, vid, true);
/* Drop FID reference. If this was the last reference the
* resources will be freed.
@@ -1048,13 +1026,8 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev,
* transition all active 802.1Q bridge VLANs to use VID to FID
* mappings and set port's mode to VLAN mode.
*/
- if (list_is_singular(&mlxsw_sp_port->vports_list)) {
- err = mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
- if (err) {
- netdev_err(dev, "Failed to set to VLAN mode\n");
- return err;
- }
- }
+ if (list_is_singular(&mlxsw_sp_port->vports_list))
+ mlxsw_sp_port_vlan_mode_trans(mlxsw_sp_port);
mlxsw_sp_port_vport_destroy(mlxsw_sp_vport);
@@ -1149,6 +1122,7 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
bool ingress)
{
const struct tc_action *a;
+ LIST_HEAD(actions);
int err;
if (!tc_single_action(cls->exts)) {
@@ -1156,7 +1130,8 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
return -ENOTSUPP;
}
- tc_for_each_action(a, cls->exts) {
+ tcf_exts_to_list(cls->exts, &actions);
+ list_for_each_entry(a, &actions, list) {
if (!is_tcf_mirred_mirror(a) || protocol != htons(ETH_P_ALL))
return -ENOTSUPP;
@@ -2076,6 +2051,18 @@ static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
return 0;
}
+static int mlxsw_sp_port_pvid_vport_create(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ mlxsw_sp_port->pvid = 1;
+
+ return mlxsw_sp_port_add_vid(mlxsw_sp_port->dev, 0, 1);
+}
+
+static int mlxsw_sp_port_pvid_vport_destroy(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ return mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1);
+}
+
static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
bool split, u8 module, u8 width, u8 lane)
{
@@ -2119,6 +2106,13 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
+ err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_swid_set;
+ }
+
err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
@@ -2144,13 +2138,6 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_port_system_port_mapping_set;
}
- err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
- if (err) {
- dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
- mlxsw_sp_port->local_port);
- goto err_port_swid_set;
- }
-
err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
@@ -2191,7 +2178,15 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_port_dcb_init;
}
+ err = mlxsw_sp_port_pvid_vport_create(mlxsw_sp_port);
+ if (err) {
+ dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create PVID vPort\n",
+ mlxsw_sp_port->local_port);
+ goto err_port_pvid_vport_create;
+ }
+
mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
+ mlxsw_sp->ports[local_port] = mlxsw_sp_port;
err = register_netdev(dev);
if (err) {
dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
@@ -2208,27 +2203,26 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_core_port_init;
}
- err = mlxsw_sp_port_vlan_init(mlxsw_sp_port);
- if (err)
- goto err_port_vlan_init;
-
- mlxsw_sp->ports[local_port] = mlxsw_sp_port;
return 0;
-err_port_vlan_init:
- mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
err_core_port_init:
unregister_netdev(dev);
err_register_netdev:
+ mlxsw_sp->ports[local_port] = NULL;
+ mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
+ mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port);
+err_port_pvid_vport_create:
+ mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
err_port_dcb_init:
err_port_ets_init:
err_port_buffers_init:
err_port_admin_status_set:
err_port_mtu_set:
err_port_speed_by_width_set:
-err_port_swid_set:
err_port_system_port_mapping_set:
err_dev_addr_init:
+ mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
+err_port_swid_set:
free_percpu(mlxsw_sp_port->pcpu_stats);
err_alloc_stats:
kfree(mlxsw_sp_port->untagged_vlans);
@@ -2245,12 +2239,12 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
if (!mlxsw_sp_port)
return;
- mlxsw_sp->ports[local_port] = NULL;
mlxsw_core_port_fini(&mlxsw_sp_port->core_port);
unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
- mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
- mlxsw_sp_port_kill_vid(mlxsw_sp_port->dev, 0, 1);
+ mlxsw_sp->ports[local_port] = NULL;
mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
+ mlxsw_sp_port_pvid_vport_destroy(mlxsw_sp_port);
+ mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
free_percpu(mlxsw_sp_port->pcpu_stats);
@@ -2662,6 +2656,26 @@ static const struct mlxsw_rx_listener mlxsw_sp_rx_listener[] = {
{
.func = mlxsw_sp_rx_listener_func,
.local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_MTUERROR,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_TTLERROR,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_LBERROR,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
+ .trap_id = MLXSW_TRAP_ID_OSPF,
+ },
+ {
+ .func = mlxsw_sp_rx_listener_func,
+ .local_port = MLXSW_PORT_DONT_CARE,
.trap_id = MLXSW_TRAP_ID_IP2ME,
},
{
@@ -3311,6 +3325,39 @@ static struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp,
return mlxsw_sp_fid_find(mlxsw_sp, fid);
}
+static enum mlxsw_flood_table_type mlxsw_sp_flood_table_type_get(u16 fid)
+{
+ return mlxsw_sp_fid_is_vfid(fid) ? MLXSW_REG_SFGC_TABLE_TYPE_FID :
+ MLXSW_REG_SFGC_TABLE_TYPE_FID_OFFEST;
+}
+
+static u16 mlxsw_sp_flood_table_index_get(u16 fid)
+{
+ return mlxsw_sp_fid_is_vfid(fid) ? mlxsw_sp_fid_to_vfid(fid) : fid;
+}
+
+static int mlxsw_sp_router_port_flood_set(struct mlxsw_sp *mlxsw_sp, u16 fid,
+ bool set)
+{
+ enum mlxsw_flood_table_type table_type;
+ char *sftr_pl;
+ u16 index;
+ int err;
+
+ sftr_pl = kmalloc(MLXSW_REG_SFTR_LEN, GFP_KERNEL);
+ if (!sftr_pl)
+ return -ENOMEM;
+
+ table_type = mlxsw_sp_flood_table_type_get(fid);
+ index = mlxsw_sp_flood_table_index_get(fid);
+ mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, index, table_type,
+ 1, MLXSW_PORT_ROUTER_PORT, set);
+ err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
+
+ kfree(sftr_pl);
+ return err;
+}
+
static enum mlxsw_reg_ritr_if_type mlxsw_sp_rif_type_get(u16 fid)
{
if (mlxsw_sp_fid_is_vfid(fid))
@@ -3347,10 +3394,14 @@ static int mlxsw_sp_rif_bridge_create(struct mlxsw_sp *mlxsw_sp,
if (rif == MLXSW_SP_RIF_MAX)
return -ERANGE;
- err = mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, true);
+ err = mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, true);
if (err)
return err;
+ err = mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, true);
+ if (err)
+ goto err_rif_bridge_op;
+
err = mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, true);
if (err)
goto err_rif_fdb_op;
@@ -3372,6 +3423,8 @@ err_rif_alloc:
mlxsw_sp_rif_fdb_op(mlxsw_sp, l3_dev->dev_addr, f->fid, false);
err_rif_fdb_op:
mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false);
+err_rif_bridge_op:
+ mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
return err;
}
@@ -3391,6 +3444,8 @@ void mlxsw_sp_rif_bridge_destroy(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_rif_bridge_op(mlxsw_sp, l3_dev, f->fid, rif, false);
+ mlxsw_sp_router_port_flood_set(mlxsw_sp, f->fid, false);
+
netdev_dbg(l3_dev, "RIF=%d destroyed\n", rif);
}
@@ -4487,18 +4542,26 @@ static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = {
.priority = 10, /* Must be called before FIB notifier block */
};
+static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = {
+ .notifier_call = mlxsw_sp_router_netevent_event,
+};
+
static int __init mlxsw_sp_module_init(void)
{
int err;
register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
+ register_netevent_notifier(&mlxsw_sp_router_netevent_nb);
+
err = mlxsw_core_driver_register(&mlxsw_sp_driver);
if (err)
goto err_core_driver_register;
return 0;
err_core_driver_register:
+ unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
+ unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
return err;
}
@@ -4506,6 +4569,7 @@ err_core_driver_register:
static void __exit mlxsw_sp_module_exit(void)
{
mlxsw_core_driver_unregister(&mlxsw_sp_driver);
+ unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index f69aa37..ac48abe 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -536,8 +536,6 @@ int mlxsw_sp_port_vid_to_fid_set(struct mlxsw_sp_port *mlxsw_sp_port,
u16 vid);
int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
u16 vid_end, bool is_member, bool untagged);
-int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto,
- u16 vid);
int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
bool set);
void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port);
@@ -589,6 +587,8 @@ int mlxsw_sp_router_neigh_construct(struct net_device *dev,
struct neighbour *n);
void mlxsw_sp_router_neigh_destroy(struct net_device *dev,
struct neighbour *n);
+int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
+ unsigned long event, void *ptr);
int mlxsw_sp_kvdl_alloc(struct mlxsw_sp *mlxsw_sp, unsigned int entry_count);
void mlxsw_sp_kvdl_free(struct mlxsw_sp *mlxsw_sp, int entry_index);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
index 074cdda..953b214 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c
@@ -330,7 +330,7 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_cpu_port_sb_cms[] = {
MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM,
- MLXSW_SP_CPU_PORT_SB_CM,
+ MLXSW_SP_SB_CM(MLXSW_SP_BYTES_TO_CELLS(10000), 0, 0),
MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM,
MLXSW_SP_CPU_PORT_SB_CM,
@@ -717,22 +717,18 @@ int mlxsw_sp_sb_tc_pool_bind_set(struct mlxsw_core_port *mlxsw_core_port,
u8 local_port = mlxsw_sp_port->local_port;
u8 pg_buff = tc_index;
enum mlxsw_reg_sbxx_dir dir = pool_type;
- u8 pool = pool_index;
+ u8 pool = pool_get(pool_index);
u32 max_buff;
int err;
+ if (dir != dir_get(pool_index))
+ return -EINVAL;
+
err = mlxsw_sp_sb_threshold_in(mlxsw_sp, pool, dir,
threshold, &max_buff);
if (err)
return err;
- if (pool_type == DEVLINK_SB_POOL_TYPE_EGRESS) {
- if (pool < MLXSW_SP_SB_POOL_COUNT)
- return -EINVAL;
- pool -= MLXSW_SP_SB_POOL_COUNT;
- } else if (pool >= MLXSW_SP_SB_POOL_COUNT) {
- return -EINVAL;
- }
return mlxsw_sp_sb_cm_write(mlxsw_sp, local_port, pg_buff, dir,
0, max_buff, pool);
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
index 01cfb75..b6ed7f7 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dcb.c
@@ -341,6 +341,8 @@ static int mlxsw_sp_port_pfc_set(struct mlxsw_sp_port *mlxsw_sp_port,
char pfcc_pl[MLXSW_REG_PFCC_LEN];
mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
+ mlxsw_reg_pfcc_pprx_set(pfcc_pl, mlxsw_sp_port->link.rx_pause);
+ mlxsw_reg_pfcc_pptx_set(pfcc_pl, mlxsw_sp_port->link.tx_pause);
mlxsw_reg_pfcc_prio_pack(pfcc_pl, pfc->pfc_en);
return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
@@ -351,17 +353,17 @@ static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev,
struct ieee_pfc *pfc)
{
struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
+ bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
int err;
- if ((mlxsw_sp_port->link.tx_pause || mlxsw_sp_port->link.rx_pause) &&
- pfc->pfc_en) {
+ if (pause_en && pfc->pfc_en) {
netdev_err(dev, "PAUSE frames already enabled on port\n");
return -EINVAL;
}
err = __mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu,
mlxsw_sp_port->dcb.ets->prio_tc,
- false, pfc);
+ pause_en, pfc);
if (err) {
netdev_err(dev, "Failed to configure port's headroom for PFC\n");
return err;
@@ -380,7 +382,7 @@ static int mlxsw_sp_dcbnl_ieee_setpfc(struct net_device *dev,
err_port_pfc_set:
__mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu,
- mlxsw_sp_port->dcb.ets->prio_tc, false,
+ mlxsw_sp_port->dcb.ets->prio_tc, pause_en,
mlxsw_sp_port->dcb.pfc);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 81418d6..3f5c51d 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -107,6 +107,7 @@ mlxsw_sp_prefix_usage_clear(struct mlxsw_sp_prefix_usage *prefix_usage,
}
struct mlxsw_sp_fib_key {
+ struct net_device *dev;
unsigned char addr[sizeof(struct in6_addr)];
unsigned char prefix_len;
};
@@ -123,7 +124,7 @@ struct mlxsw_sp_fib_entry {
struct rhash_head ht_node;
struct mlxsw_sp_fib_key key;
enum mlxsw_sp_fib_entry_type type;
- u8 added:1;
+ unsigned int ref_count;
u16 rif; /* used for action local */
struct mlxsw_sp_vr *vr;
struct list_head nexthop_group_node;
@@ -171,13 +172,15 @@ static void mlxsw_sp_fib_entry_remove(struct mlxsw_sp_fib *fib,
static struct mlxsw_sp_fib_entry *
mlxsw_sp_fib_entry_create(struct mlxsw_sp_fib *fib, const void *addr,
- size_t addr_len, unsigned char prefix_len)
+ size_t addr_len, unsigned char prefix_len,
+ struct net_device *dev)
{
struct mlxsw_sp_fib_entry *fib_entry;
fib_entry = kzalloc(sizeof(*fib_entry), GFP_KERNEL);
if (!fib_entry)
return NULL;
+ fib_entry->key.dev = dev;
memcpy(fib_entry->key.addr, addr, addr_len);
fib_entry->key.prefix_len = prefix_len;
return fib_entry;
@@ -190,10 +193,13 @@ static void mlxsw_sp_fib_entry_destroy(struct mlxsw_sp_fib_entry *fib_entry)
static struct mlxsw_sp_fib_entry *
mlxsw_sp_fib_entry_lookup(struct mlxsw_sp_fib *fib, const void *addr,
- size_t addr_len, unsigned char prefix_len)
+ size_t addr_len, unsigned char prefix_len,
+ struct net_device *dev)
{
- struct mlxsw_sp_fib_key key = {{ 0 } };
+ struct mlxsw_sp_fib_key key;
+ memset(&key, 0, sizeof(key));
+ key.dev = dev;
memcpy(key.addr, addr, addr_len);
key.prefix_len = prefix_len;
return rhashtable_lookup_fast(&fib->ht, &key, mlxsw_sp_fib_ht_params);
@@ -657,7 +663,7 @@ int mlxsw_sp_router_neigh_construct(struct net_device *dev,
return 0;
}
- r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev);
+ r = mlxsw_sp_rif_find_by_dev(mlxsw_sp, n->dev);
if (WARN_ON(!r))
return -EINVAL;
@@ -938,8 +944,8 @@ static void mlxsw_sp_router_neigh_update_hw(struct work_struct *work)
mlxsw_sp_port_dev_put(mlxsw_sp_port);
}
-static int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
- unsigned long event, void *ptr)
+int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
+ unsigned long event, void *ptr)
{
struct mlxsw_sp_neigh_entry *neigh_entry;
struct mlxsw_sp_port *mlxsw_sp_port;
@@ -1009,10 +1015,6 @@ static int mlxsw_sp_router_netevent_event(struct notifier_block *unused,
return NOTIFY_DONE;
}
-static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = {
- .notifier_call = mlxsw_sp_router_netevent_event,
-};
-
static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
{
int err;
@@ -1027,10 +1029,6 @@ static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
*/
mlxsw_sp_router_neighs_update_interval_init(mlxsw_sp);
- err = register_netevent_notifier(&mlxsw_sp_router_netevent_nb);
- if (err)
- goto err_register_netevent_notifier;
-
/* Create the delayed works for the activity_update */
INIT_DELAYED_WORK(&mlxsw_sp->router.neighs_update.dw,
mlxsw_sp_router_neighs_update_work);
@@ -1039,17 +1037,12 @@ static int mlxsw_sp_neigh_init(struct mlxsw_sp *mlxsw_sp)
mlxsw_core_schedule_dw(&mlxsw_sp->router.neighs_update.dw, 0);
mlxsw_core_schedule_dw(&mlxsw_sp->router.nexthop_probe_dw, 0);
return 0;
-
-err_register_netevent_notifier:
- rhashtable_destroy(&mlxsw_sp->router.neigh_ht);
- return err;
}
static void mlxsw_sp_neigh_fini(struct mlxsw_sp *mlxsw_sp)
{
cancel_delayed_work_sync(&mlxsw_sp->router.neighs_update.dw);
cancel_delayed_work_sync(&mlxsw_sp->router.nexthop_probe_dw);
- unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
rhashtable_destroy(&mlxsw_sp->router.neigh_ht);
}
@@ -1524,7 +1517,14 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp)
return err;
mlxsw_sp_lpm_init(mlxsw_sp);
mlxsw_sp_vrs_init(mlxsw_sp);
- return mlxsw_sp_neigh_init(mlxsw_sp);
+ err = mlxsw_sp_neigh_init(mlxsw_sp);
+ if (err)
+ goto err_neigh_init;
+ return 0;
+
+err_neigh_init:
+ __mlxsw_sp_router_fini(mlxsw_sp);
+ return err;
}
void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp)
@@ -1626,11 +1626,8 @@ static int mlxsw_sp_fib_entry_op(struct mlxsw_sp *mlxsw_sp,
static int mlxsw_sp_fib_entry_update(struct mlxsw_sp *mlxsw_sp,
struct mlxsw_sp_fib_entry *fib_entry)
{
- enum mlxsw_reg_ralue_op op;
-
- op = !fib_entry->added ? MLXSW_REG_RALUE_OP_WRITE_WRITE :
- MLXSW_REG_RALUE_OP_WRITE_UPDATE;
- return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry, op);
+ return mlxsw_sp_fib_entry_op(mlxsw_sp, fib_entry,
+ MLXSW_REG_RALUE_OP_WRITE_WRITE);
}
static int mlxsw_sp_fib_entry_del(struct mlxsw_sp *mlxsw_sp,
@@ -1651,9 +1648,10 @@ static void mlxsw_sp_router_fib4_add_info_destroy(void const *data)
const struct mlxsw_sp_router_fib4_add_info *info = data;
struct mlxsw_sp_fib_entry *fib_entry = info->fib_entry;
struct mlxsw_sp *mlxsw_sp = info->mlxsw_sp;
+ struct mlxsw_sp_vr *vr = fib_entry->vr;
mlxsw_sp_fib_entry_destroy(fib_entry);
- mlxsw_sp_vr_put(mlxsw_sp, fib_entry->vr);
+ mlxsw_sp_vr_put(mlxsw_sp, vr);
kfree(info);
}
@@ -1694,34 +1692,93 @@ mlxsw_sp_router_fib4_entry_fini(struct mlxsw_sp *mlxsw_sp,
mlxsw_sp_nexthop_group_put(mlxsw_sp, fib_entry);
}
-static int
-mlxsw_sp_router_fib4_add_prepare(struct mlxsw_sp_port *mlxsw_sp_port,
- const struct switchdev_obj_ipv4_fib *fib4,
- struct switchdev_trans *trans)
+static struct mlxsw_sp_fib_entry *
+mlxsw_sp_fib_entry_get(struct mlxsw_sp *mlxsw_sp,
+ const struct switchdev_obj_ipv4_fib *fib4)
{
- struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
- struct mlxsw_sp_router_fib4_add_info *info;
struct mlxsw_sp_fib_entry *fib_entry;
+ struct fib_info *fi = fib4->fi;
struct mlxsw_sp_vr *vr;
int err;
vr = mlxsw_sp_vr_get(mlxsw_sp, fib4->dst_len, fib4->tb_id,
MLXSW_SP_L3_PROTO_IPV4);
if (IS_ERR(vr))
- return PTR_ERR(vr);
+ return ERR_CAST(vr);
+ fib_entry = mlxsw_sp_fib_entry_lookup(vr->fib, &fib4->dst,
+ sizeof(fib4->dst),
+ fib4->dst_len, fi->fib_dev);
+ if (fib_entry) {
+ /* Already exists, just take a reference */
+ fib_entry->ref_count++;
+ return fib_entry;
+ }
fib_entry = mlxsw_sp_fib_entry_create(vr->fib, &fib4->dst,
- sizeof(fib4->dst), fib4->dst_len);
+ sizeof(fib4->dst),
+ fib4->dst_len, fi->fib_dev);
if (!fib_entry) {
err = -ENOMEM;
goto err_fib_entry_create;
}
fib_entry->vr = vr;
+ fib_entry->ref_count = 1;
err = mlxsw_sp_router_fib4_entry_init(mlxsw_sp, fib4, fib_entry);
if (err)
goto err_fib4_entry_init;
+ return fib_entry;
+
+err_fib4_entry_init:
+ mlxsw_sp_fib_entry_destroy(fib_entry);
+err_fib_entry_create:
+ mlxsw_sp_vr_put(mlxsw_sp, vr);
+
+ return ERR_PTR(err);
+}
+
+static struct mlxsw_sp_fib_entry *
+mlxsw_sp_fib_entry_find(struct mlxsw_sp *mlxsw_sp,
+ const struct switchdev_obj_ipv4_fib *fib4)
+{
+ struct mlxsw_sp_vr *vr;
+
+ vr = mlxsw_sp_vr_find(mlxsw_sp, fib4->tb_id, MLXSW_SP_L3_PROTO_IPV4);
+ if (!vr)
+ return NULL;
+
+ return mlxsw_sp_fib_entry_lookup(vr->fib, &fib4->dst,
+ sizeof(fib4->dst), fib4->dst_len,
+ fib4->fi->fib_dev);
+}
+
+void mlxsw_sp_fib_entry_put(struct mlxsw_sp *mlxsw_sp,
+ struct mlxsw_sp_fib_entry *fib_entry)
+{
+ struct mlxsw_sp_vr *vr = fib_entry->vr;
+
+ if (--fib_entry->ref_count == 0) {
+ mlxsw_sp_router_fib4_entry_fini(mlxsw_sp, fib_entry);
+ mlxsw_sp_fib_entry_destroy(fib_entry);
+ }
+ mlxsw_sp_vr_put(mlxsw_sp, vr);
+}
+
+static int
+mlxsw_sp_router_fib4_add_prepare(struct mlxsw_sp_port *mlxsw_sp_port,
+ const struct switchdev_obj_ipv4_fib *fib4,
+ struct switchdev_trans *trans)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ struct mlxsw_sp_router_fib4_add_info *info;
+ struct mlxsw_sp_fib_entry *fib_entry;
+ int err;
+
+ fib_entry = mlxsw_sp_fib_entry_get(mlxsw_sp, fib4);
+ if (IS_ERR(fib_entry))
+ return PTR_ERR(fib_entry);
+
info = kmalloc(sizeof(*info), GFP_KERNEL);
if (!info) {
err = -ENOMEM;
@@ -1735,11 +1792,7 @@ mlxsw_sp_router_fib4_add_prepare(struct mlxsw_sp_port *mlxsw_sp_port,
return 0;
err_alloc_info:
- mlxsw_sp_router_fib4_entry_fini(mlxsw_sp, fib_entry);
-err_fib4_entry_init:
- mlxsw_sp_fib_entry_destroy(fib_entry);
-err_fib_entry_create:
- mlxsw_sp_vr_put(mlxsw_sp, vr);
+ mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
return err;
}
@@ -1758,11 +1811,14 @@ mlxsw_sp_router_fib4_add_commit(struct mlxsw_sp_port *mlxsw_sp_port,
fib_entry = info->fib_entry;
kfree(info);
+ if (fib_entry->ref_count != 1)
+ return 0;
+
vr = fib_entry->vr;
- err = mlxsw_sp_fib_entry_insert(fib_entry->vr->fib, fib_entry);
+ err = mlxsw_sp_fib_entry_insert(vr->fib, fib_entry);
if (err)
goto err_fib_entry_insert;
- err = mlxsw_sp_fib_entry_update(mlxsw_sp, fib_entry);
+ err = mlxsw_sp_fib_entry_update(mlxsw_sp_port->mlxsw_sp, fib_entry);
if (err)
goto err_fib_entry_add;
return 0;
@@ -1770,9 +1826,7 @@ mlxsw_sp_router_fib4_add_commit(struct mlxsw_sp_port *mlxsw_sp_port,
err_fib_entry_add:
mlxsw_sp_fib_entry_remove(vr->fib, fib_entry);
err_fib_entry_insert:
- mlxsw_sp_router_fib4_entry_fini(mlxsw_sp, fib_entry);
- mlxsw_sp_fib_entry_destroy(fib_entry);
- mlxsw_sp_vr_put(mlxsw_sp, vr);
+ mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
return err;
}
@@ -1792,23 +1846,18 @@ int mlxsw_sp_router_fib4_del(struct mlxsw_sp_port *mlxsw_sp_port,
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
struct mlxsw_sp_fib_entry *fib_entry;
- struct mlxsw_sp_vr *vr;
- vr = mlxsw_sp_vr_find(mlxsw_sp, fib4->tb_id, MLXSW_SP_L3_PROTO_IPV4);
- if (!vr) {
- dev_warn(mlxsw_sp->bus_info->dev, "Failed to find virtual router for FIB4 entry being removed.\n");
- return -ENOENT;
- }
- fib_entry = mlxsw_sp_fib_entry_lookup(vr->fib, &fib4->dst,
- sizeof(fib4->dst), fib4->dst_len);
+ fib_entry = mlxsw_sp_fib_entry_find(mlxsw_sp, fib4);
if (!fib_entry) {
dev_warn(mlxsw_sp->bus_info->dev, "Failed to find FIB4 entry being removed.\n");
return -ENOENT;
}
- mlxsw_sp_fib_entry_del(mlxsw_sp_port->mlxsw_sp, fib_entry);
- mlxsw_sp_fib_entry_remove(vr->fib, fib_entry);
- mlxsw_sp_router_fib4_entry_fini(mlxsw_sp, fib_entry);
- mlxsw_sp_fib_entry_destroy(fib_entry);
- mlxsw_sp_vr_put(mlxsw_sp, vr);
+
+ if (fib_entry->ref_count == 1) {
+ mlxsw_sp_fib_entry_del(mlxsw_sp, fib_entry);
+ mlxsw_sp_fib_entry_remove(fib_entry->vr->fib, fib_entry);
+ }
+
+ mlxsw_sp_fib_entry_put(mlxsw_sp, fib_entry);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index a1ad5e6..7b654c51 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -167,8 +167,8 @@ static int mlxsw_sp_port_attr_stp_state_set(struct mlxsw_sp_port *mlxsw_sp_port,
}
static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 idx_begin, u16 idx_end, bool set,
- bool only_uc)
+ u16 idx_begin, u16 idx_end, bool uc_set,
+ bool bm_set)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
u16 local_port = mlxsw_sp_port->local_port;
@@ -187,28 +187,22 @@ static int __mlxsw_sp_port_flood_set(struct mlxsw_sp_port *mlxsw_sp_port,
return -ENOMEM;
mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin,
- table_type, range, local_port, set);
+ table_type, range, local_port, uc_set);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
if (err)
goto buffer_out;
- /* Flooding control allows one to decide whether a given port will
- * flood unicast traffic for which there is no FDB entry.
- */
- if (only_uc)
- goto buffer_out;
-
mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_BM, idx_begin,
- table_type, range, local_port, set);
+ table_type, range, local_port, bm_set);
err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
if (err)
goto err_flood_bm_set;
- else
- goto buffer_out;
+
+ goto buffer_out;
err_flood_bm_set:
mlxsw_reg_sftr_pack(sftr_pl, MLXSW_SP_FLOOD_TABLE_UC, idx_begin,
- table_type, range, local_port, !set);
+ table_type, range, local_port, !uc_set);
mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sftr), sftr_pl);
buffer_out:
kfree(sftr_pl);
@@ -257,8 +251,7 @@ int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 fid,
* the start of the vFIDs range.
*/
vfid = mlxsw_sp_fid_to_vfid(fid);
- return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set,
- false);
+ return __mlxsw_sp_port_flood_set(mlxsw_sp_vport, vfid, vfid, set, set);
}
static int mlxsw_sp_port_attr_br_flags_set(struct mlxsw_sp_port *mlxsw_sp_port,
@@ -450,6 +443,8 @@ void mlxsw_sp_fid_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *f)
kfree(f);
+ mlxsw_sp_fid_map(mlxsw_sp, fid, false);
+
mlxsw_sp_fid_op(mlxsw_sp, fid, false);
}
@@ -458,6 +453,9 @@ static int __mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port,
{
struct mlxsw_sp_fid *f;
+ if (test_bit(fid, mlxsw_sp_port->active_vlans))
+ return 0;
+
f = mlxsw_sp_fid_find(mlxsw_sp_port->mlxsw_sp, fid);
if (!f) {
f = mlxsw_sp_fid_create(mlxsw_sp_port->mlxsw_sp, fid);
@@ -515,7 +513,7 @@ static int mlxsw_sp_port_fid_join(struct mlxsw_sp_port *mlxsw_sp_port,
}
err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, fid_begin, fid_end,
- true, false);
+ mlxsw_sp_port->uc_flood, true);
if (err)
goto err_port_flood_set;
@@ -997,13 +995,13 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev,
}
static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
- u16 vid_begin, u16 vid_end, bool init)
+ u16 vid_begin, u16 vid_end)
{
struct net_device *dev = mlxsw_sp_port->dev;
u16 vid, pvid;
int err;
- if (!init && !mlxsw_sp_port->bridged)
+ if (!mlxsw_sp_port->bridged)
return -EINVAL;
err = __mlxsw_sp_port_vlans_set(mlxsw_sp_port, vid_begin, vid_end,
@@ -1014,9 +1012,6 @@ static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
return err;
}
- if (init)
- goto out;
-
pvid = mlxsw_sp_port->pvid;
if (pvid >= vid_begin && pvid <= vid_end) {
err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
@@ -1028,7 +1023,6 @@ static int __mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_port_fid_leave(mlxsw_sp_port, vid_begin, vid_end);
-out:
/* Changing activity bits only if HW operation succeded */
for (vid = vid_begin; vid <= vid_end; vid++)
clear_bit(vid, mlxsw_sp_port->active_vlans);
@@ -1039,8 +1033,8 @@ out:
static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port,
const struct switchdev_obj_port_vlan *vlan)
{
- return __mlxsw_sp_port_vlans_del(mlxsw_sp_port,
- vlan->vid_begin, vlan->vid_end, false);
+ return __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vlan->vid_begin,
+ vlan->vid_end);
}
void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port)
@@ -1048,7 +1042,7 @@ void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port)
u16 vid;
for_each_set_bit(vid, mlxsw_sp_port->active_vlans, VLAN_N_VID)
- __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid, false);
+ __mlxsw_sp_port_vlans_del(mlxsw_sp_port, vid, vid);
}
static int
@@ -1546,32 +1540,6 @@ void mlxsw_sp_switchdev_fini(struct mlxsw_sp *mlxsw_sp)
mlxsw_sp_fdb_fini(mlxsw_sp);
}
-int mlxsw_sp_port_vlan_init(struct mlxsw_sp_port *mlxsw_sp_port)
-{
- struct net_device *dev = mlxsw_sp_port->dev;
- int err;
-
- /* Allow only untagged packets to ingress and tag them internally
- * with VID 1.
- */
- mlxsw_sp_port->pvid = 1;
- err = __mlxsw_sp_port_vlans_del(mlxsw_sp_port, 0, VLAN_N_VID - 1,
- true);
- if (err) {
- netdev_err(dev, "Unable to init VLANs\n");
- return err;
- }
-
- /* Add implicit VLAN interface in the device, so that untagged
- * packets will be classified to the default vFID.
- */
- err = mlxsw_sp_port_add_vid(dev, 0, 1);
- if (err)
- netdev_err(dev, "Failed to configure default vFID\n");
-
- return err;
-}
-
void mlxsw_sp_port_switchdev_init(struct mlxsw_sp_port *mlxsw_sp_port)
{
mlxsw_sp_port->dev->switchdev_ops = &mlxsw_sp_port_switchdev_ops;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index 470d769..ed8e301 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -56,6 +56,10 @@ enum {
MLXSW_TRAP_ID_IGMP_V3_REPORT = 0x34,
MLXSW_TRAP_ID_ARPBC = 0x50,
MLXSW_TRAP_ID_ARPUC = 0x51,
+ MLXSW_TRAP_ID_MTUERROR = 0x52,
+ MLXSW_TRAP_ID_TTLERROR = 0x53,
+ MLXSW_TRAP_ID_LBERROR = 0x54,
+ MLXSW_TRAP_ID_OSPF = 0x55,
MLXSW_TRAP_ID_IP2ME = 0x5F,
MLXSW_TRAP_ID_RTR_INGRESS0 = 0x70,
MLXSW_TRAP_ID_HOST_MISS_IPV4 = 0x90,
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 88678c1..39dadfc 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -41,7 +41,6 @@
* Chris Telfer <chris.telfer@netronome.com>
*/
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -1441,10 +1440,6 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
nfp_net_set_hash(nn->netdev, skb, rxd);
- /* Pad small frames to minimum */
- if (skb_put_padto(skb, 60))
- break;
-
/* Stats update */
u64_stats_update_begin(&r_vec->rx_sync);
r_vec->rx_pkts++;
@@ -2049,12 +2044,16 @@ static int nfp_net_netdev_open(struct net_device *netdev)
nn->rx_rings = kcalloc(nn->num_rx_rings, sizeof(*nn->rx_rings),
GFP_KERNEL);
- if (!nn->rx_rings)
+ if (!nn->rx_rings) {
+ err = -ENOMEM;
goto err_free_lsc;
+ }
nn->tx_rings = kcalloc(nn->num_tx_rings, sizeof(*nn->tx_rings),
GFP_KERNEL);
- if (!nn->tx_rings)
+ if (!nn->tx_rings) {
+ err = -ENOMEM;
goto err_free_rx_rings;
+ }
for (r = 0; r < nn->num_r_vecs; r++) {
err = nfp_net_prepare_vector(nn, &nn->r_vecs[r], r);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
index 7d7933d..4c98972 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c
@@ -40,7 +40,6 @@
* Brad Petrus <brad.petrus@netronome.com>
*/
-#include <linux/version.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
index 37abef0..f7062cb 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c
@@ -38,7 +38,6 @@
* Rolf Neugebauer <rolf.neugebauer@netronome.com>
*/
-#include <linux/version.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -134,7 +133,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
}
nfp_net_get_fw_version(&fw_ver, ctrl_bar);
- if (fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
+ if (fw_ver.resv || fw_ver.class != NFP_NET_CFG_VERSION_CLASS_GENERIC) {
dev_err(&pdev->dev, "Unknown Firmware ABI %d.%d.%d.%d\n",
fw_ver.resv, fw_ver.class, fw_ver.major, fw_ver.minor);
err = -EINVAL;
@@ -142,9 +141,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev,
}
/* Determine stride */
- if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 0) ||
- nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1) ||
- nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0x12, 0x48)) {
+ if (nfp_net_fw_ver_eq(&fw_ver, 0, 0, 0, 1)) {
stride = 2;
tx_bar_no = NFP_NET_Q0_BAR;
rx_bar_no = NFP_NET_Q1_BAR;
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 4d4ecba..8e13ec8 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -475,14 +475,6 @@ static void __lpc_get_mac(struct netdata_local *pldat, u8 *mac)
mac[5] = tmp >> 8;
}
-static void __lpc_eth_clock_enable(struct netdata_local *pldat, bool enable)
-{
- if (enable)
- clk_prepare_enable(pldat->clk);
- else
- clk_disable_unprepare(pldat->clk);
-}
-
static void __lpc_params_setup(struct netdata_local *pldat)
{
u32 tmp;
@@ -1056,7 +1048,7 @@ static int lpc_eth_close(struct net_device *ndev)
writel(0, LPC_ENET_MAC2(pldat->net_base));
spin_unlock_irqrestore(&pldat->lock, flags);
- __lpc_eth_clock_enable(pldat, false);
+ clk_disable_unprepare(pldat->clk);
return 0;
}
@@ -1197,11 +1189,14 @@ static int lpc_eth_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
static int lpc_eth_open(struct net_device *ndev)
{
struct netdata_local *pldat = netdev_priv(ndev);
+ int ret;
if (netif_msg_ifup(pldat))
dev_dbg(&pldat->pdev->dev, "enabling %s\n", ndev->name);
- __lpc_eth_clock_enable(pldat, true);
+ ret = clk_prepare_enable(pldat->clk);
+ if (ret)
+ return ret;
/* Suspended PHY makes LPC ethernet core block, so resume now */
phy_resume(ndev->phydev);
@@ -1320,7 +1315,9 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
}
/* Enable network clock */
- __lpc_eth_clock_enable(pldat, true);
+ ret = clk_prepare_enable(pldat->clk);
+ if (ret)
+ goto err_out_clk_put;
/* Map IO space */
pldat->net_base = ioremap(res->start, resource_size(res));
@@ -1454,6 +1451,7 @@ err_out_iounmap:
iounmap(pldat->net_base);
err_out_disable_clocks:
clk_disable_unprepare(pldat->clk);
+err_out_clk_put:
clk_put(pldat->clk);
err_out_free_dev:
free_netdev(ndev);
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 35e5377..45ab746 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -561,9 +561,18 @@ struct qed_dev {
static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
u32 concrete_fid)
{
+ u8 vfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID);
u8 pfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID);
+ u8 vf_valid = GET_FIELD(concrete_fid,
+ PXP_CONCRETE_FID_VFVALID);
+ u8 sw_fid;
- return pfid;
+ if (vf_valid)
+ sw_fid = vfid + MAX_NUM_PFS;
+ else
+ sw_fid = pfid;
+
+ return sw_fid;
}
#define PURE_LB_TC 8
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index d0dc28f..3656d2f 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -19,6 +19,7 @@
#include "qed_dcbx.h"
#include "qed_hsi.h"
#include "qed_sp.h"
+#include "qed_sriov.h"
#ifdef CONFIG_DCB
#include <linux/qed/qed_eth_if.h>
#endif
@@ -52,40 +53,94 @@ static bool qed_dcbx_app_ethtype(u32 app_info_bitmap)
DCBX_APP_SF_ETHTYPE);
}
+static bool qed_dcbx_ieee_app_ethtype(u32 app_info_bitmap)
+{
+ u8 mfw_val = QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE);
+
+ /* Old MFW */
+ if (mfw_val == DCBX_APP_SF_IEEE_RESERVED)
+ return qed_dcbx_app_ethtype(app_info_bitmap);
+
+ return !!(mfw_val == DCBX_APP_SF_IEEE_ETHTYPE);
+}
+
static bool qed_dcbx_app_port(u32 app_info_bitmap)
{
return !!(QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) ==
DCBX_APP_SF_PORT);
}
-static bool qed_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id)
+static bool qed_dcbx_ieee_app_port(u32 app_info_bitmap, u8 type)
{
- return !!(qed_dcbx_app_ethtype(app_info_bitmap) &&
- proto_id == QED_ETH_TYPE_DEFAULT);
+ u8 mfw_val = QED_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF_IEEE);
+
+ /* Old MFW */
+ if (mfw_val == DCBX_APP_SF_IEEE_RESERVED)
+ return qed_dcbx_app_port(app_info_bitmap);
+
+ return !!(mfw_val == type || mfw_val == DCBX_APP_SF_IEEE_TCP_UDP_PORT);
}
-static bool qed_dcbx_iscsi_tlv(u32 app_info_bitmap, u16 proto_id)
+static bool qed_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
{
- return !!(qed_dcbx_app_port(app_info_bitmap) &&
- proto_id == QED_TCP_PORT_ISCSI);
+ bool ethtype;
+
+ if (ieee)
+ ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap);
+ else
+ ethtype = qed_dcbx_app_ethtype(app_info_bitmap);
+
+ return !!(ethtype && (proto_id == QED_ETH_TYPE_DEFAULT));
}
-static bool qed_dcbx_fcoe_tlv(u32 app_info_bitmap, u16 proto_id)
+static bool qed_dcbx_iscsi_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
{
- return !!(qed_dcbx_app_ethtype(app_info_bitmap) &&
- proto_id == QED_ETH_TYPE_FCOE);
+ bool port;
+
+ if (ieee)
+ port = qed_dcbx_ieee_app_port(app_info_bitmap,
+ DCBX_APP_SF_IEEE_TCP_PORT);
+ else
+ port = qed_dcbx_app_port(app_info_bitmap);
+
+ return !!(port && (proto_id == QED_TCP_PORT_ISCSI));
}
-static bool qed_dcbx_roce_tlv(u32 app_info_bitmap, u16 proto_id)
+static bool qed_dcbx_fcoe_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
{
- return !!(qed_dcbx_app_ethtype(app_info_bitmap) &&
- proto_id == QED_ETH_TYPE_ROCE);
+ bool ethtype;
+
+ if (ieee)
+ ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap);
+ else
+ ethtype = qed_dcbx_app_ethtype(app_info_bitmap);
+
+ return !!(ethtype && (proto_id == QED_ETH_TYPE_FCOE));
}
-static bool qed_dcbx_roce_v2_tlv(u32 app_info_bitmap, u16 proto_id)
+static bool qed_dcbx_roce_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
{
- return !!(qed_dcbx_app_port(app_info_bitmap) &&
- proto_id == QED_UDP_PORT_TYPE_ROCE_V2);
+ bool ethtype;
+
+ if (ieee)
+ ethtype = qed_dcbx_ieee_app_ethtype(app_info_bitmap);
+ else
+ ethtype = qed_dcbx_app_ethtype(app_info_bitmap);
+
+ return !!(ethtype && (proto_id == QED_ETH_TYPE_ROCE));
+}
+
+static bool qed_dcbx_roce_v2_tlv(u32 app_info_bitmap, u16 proto_id, bool ieee)
+{
+ bool port;
+
+ if (ieee)
+ port = qed_dcbx_ieee_app_port(app_info_bitmap,
+ DCBX_APP_SF_IEEE_UDP_PORT);
+ else
+ port = qed_dcbx_app_port(app_info_bitmap);
+
+ return !!(port && (proto_id == QED_UDP_PORT_TYPE_ROCE_V2));
}
static void
@@ -164,17 +219,17 @@ qed_dcbx_update_app_info(struct qed_dcbx_results *p_data,
static bool
qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn,
u32 app_prio_bitmap,
- u16 id, enum dcbx_protocol_type *type)
+ u16 id, enum dcbx_protocol_type *type, bool ieee)
{
- if (qed_dcbx_fcoe_tlv(app_prio_bitmap, id)) {
+ if (qed_dcbx_fcoe_tlv(app_prio_bitmap, id, ieee)) {
*type = DCBX_PROTOCOL_FCOE;
- } else if (qed_dcbx_roce_tlv(app_prio_bitmap, id)) {
+ } else if (qed_dcbx_roce_tlv(app_prio_bitmap, id, ieee)) {
*type = DCBX_PROTOCOL_ROCE;
- } else if (qed_dcbx_iscsi_tlv(app_prio_bitmap, id)) {
+ } else if (qed_dcbx_iscsi_tlv(app_prio_bitmap, id, ieee)) {
*type = DCBX_PROTOCOL_ISCSI;
- } else if (qed_dcbx_default_tlv(app_prio_bitmap, id)) {
+ } else if (qed_dcbx_default_tlv(app_prio_bitmap, id, ieee)) {
*type = DCBX_PROTOCOL_ETH;
- } else if (qed_dcbx_roce_v2_tlv(app_prio_bitmap, id)) {
+ } else if (qed_dcbx_roce_v2_tlv(app_prio_bitmap, id, ieee)) {
*type = DCBX_PROTOCOL_ROCE_V2;
} else {
*type = DCBX_MAX_PROTOCOL_TYPE;
@@ -194,17 +249,18 @@ static int
qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
struct qed_dcbx_results *p_data,
struct dcbx_app_priority_entry *p_tbl,
- u32 pri_tc_tbl, int count, bool dcbx_enabled)
+ u32 pri_tc_tbl, int count, u8 dcbx_version)
{
u8 tc, priority_map;
enum dcbx_protocol_type type;
+ bool enable, ieee;
u16 protocol_id;
int priority;
- bool enable;
int i;
DP_VERBOSE(p_hwfn, QED_MSG_DCB, "Num APP entries = %d\n", count);
+ ieee = (dcbx_version == DCBX_CONFIG_VERSION_IEEE);
/* Parse APP TLV */
for (i = 0; i < count; i++) {
protocol_id = QED_MFW_GET_FIELD(p_tbl[i].entry,
@@ -219,7 +275,7 @@ qed_dcbx_process_tlv(struct qed_hwfn *p_hwfn,
tc = QED_DCBX_PRIO2TC(pri_tc_tbl, priority);
if (qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry,
- protocol_id, &type)) {
+ protocol_id, &type, ieee)) {
/* ETH always have the enable bit reset, as it gets
* vlan information per packet. For other protocols,
* should be set according to the dcbx_enabled
@@ -275,15 +331,12 @@ static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn)
struct dcbx_ets_feature *p_ets;
struct qed_hw_info *p_info;
u32 pri_tc_tbl, flags;
- bool dcbx_enabled;
+ u8 dcbx_version;
int num_entries;
int rc = 0;
- /* If DCBx version is non zero, then negotiation was
- * successfuly performed
- */
flags = p_hwfn->p_dcbx_info->operational.flags;
- dcbx_enabled = !!QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION);
+ dcbx_version = QED_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION);
p_app = &p_hwfn->p_dcbx_info->operational.features.app;
p_tbl = p_app->app_pri_tbl;
@@ -295,13 +348,13 @@ static int qed_dcbx_process_mib_info(struct qed_hwfn *p_hwfn)
num_entries = QED_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES);
rc = qed_dcbx_process_tlv(p_hwfn, &data, p_tbl, pri_tc_tbl,
- num_entries, dcbx_enabled);
+ num_entries, dcbx_version);
if (rc)
return rc;
p_info->num_tc = QED_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_MAX_TCS);
data.pf_id = p_hwfn->rel_pf_id;
- data.dcbx_enabled = dcbx_enabled;
+ data.dcbx_enabled = !!dcbx_version;
qed_dcbx_dp_protocol(p_hwfn, &data);
@@ -400,7 +453,7 @@ static void
qed_dcbx_get_app_data(struct qed_hwfn *p_hwfn,
struct dcbx_app_priority_feature *p_app,
struct dcbx_app_priority_entry *p_tbl,
- struct qed_dcbx_params *p_params)
+ struct qed_dcbx_params *p_params, bool ieee)
{
struct qed_app_entry *entry;
u8 pri_map;
@@ -414,15 +467,46 @@ qed_dcbx_get_app_data(struct qed_hwfn *p_hwfn,
DCBX_APP_NUM_ENTRIES);
for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
entry = &p_params->app_entry[i];
- entry->ethtype = !(QED_MFW_GET_FIELD(p_tbl[i].entry,
- DCBX_APP_SF));
+ if (ieee) {
+ u8 sf_ieee;
+ u32 val;
+
+ sf_ieee = QED_MFW_GET_FIELD(p_tbl[i].entry,
+ DCBX_APP_SF_IEEE);
+ switch (sf_ieee) {
+ case DCBX_APP_SF_IEEE_RESERVED:
+ /* Old MFW */
+ val = QED_MFW_GET_FIELD(p_tbl[i].entry,
+ DCBX_APP_SF);
+ entry->sf_ieee = val ?
+ QED_DCBX_SF_IEEE_TCP_UDP_PORT :
+ QED_DCBX_SF_IEEE_ETHTYPE;
+ break;
+ case DCBX_APP_SF_IEEE_ETHTYPE:
+ entry->sf_ieee = QED_DCBX_SF_IEEE_ETHTYPE;
+ break;
+ case DCBX_APP_SF_IEEE_TCP_PORT:
+ entry->sf_ieee = QED_DCBX_SF_IEEE_TCP_PORT;
+ break;
+ case DCBX_APP_SF_IEEE_UDP_PORT:
+ entry->sf_ieee = QED_DCBX_SF_IEEE_UDP_PORT;
+ break;
+ case DCBX_APP_SF_IEEE_TCP_UDP_PORT:
+ entry->sf_ieee = QED_DCBX_SF_IEEE_TCP_UDP_PORT;
+ break;
+ }
+ } else {
+ entry->ethtype = !(QED_MFW_GET_FIELD(p_tbl[i].entry,
+ DCBX_APP_SF));
+ }
+
pri_map = QED_MFW_GET_FIELD(p_tbl[i].entry, DCBX_APP_PRI_MAP);
entry->prio = ffs(pri_map) - 1;
entry->proto_id = QED_MFW_GET_FIELD(p_tbl[i].entry,
DCBX_APP_PROTOCOL_ID);
qed_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry,
entry->proto_id,
- &entry->proto_type);
+ &entry->proto_type, ieee);
}
DP_VERBOSE(p_hwfn, QED_MSG_DCB,
@@ -483,7 +567,7 @@ qed_dcbx_get_ets_data(struct qed_hwfn *p_hwfn,
bw_map[1] = be32_to_cpu(p_ets->tc_bw_tbl[1]);
tsa_map[0] = be32_to_cpu(p_ets->tc_tsa_tbl[0]);
tsa_map[1] = be32_to_cpu(p_ets->tc_tsa_tbl[1]);
- pri_map = be32_to_cpu(p_ets->pri_tc_tbl[0]);
+ pri_map = p_ets->pri_tc_tbl[0];
for (i = 0; i < QED_MAX_PFC_PRIORITIES; i++) {
p_params->ets_tc_bw_tbl[i] = ((u8 *)bw_map)[i];
p_params->ets_tc_tsa_tbl[i] = ((u8 *)tsa_map)[i];
@@ -500,9 +584,9 @@ qed_dcbx_get_common_params(struct qed_hwfn *p_hwfn,
struct dcbx_app_priority_feature *p_app,
struct dcbx_app_priority_entry *p_tbl,
struct dcbx_ets_feature *p_ets,
- u32 pfc, struct qed_dcbx_params *p_params)
+ u32 pfc, struct qed_dcbx_params *p_params, bool ieee)
{
- qed_dcbx_get_app_data(p_hwfn, p_app, p_tbl, p_params);
+ qed_dcbx_get_app_data(p_hwfn, p_app, p_tbl, p_params, ieee);
qed_dcbx_get_ets_data(p_hwfn, p_ets, p_params);
qed_dcbx_get_pfc_data(p_hwfn, pfc, p_params);
}
@@ -516,7 +600,7 @@ qed_dcbx_get_local_params(struct qed_hwfn *p_hwfn,
p_feat = &p_hwfn->p_dcbx_info->local_admin.features;
qed_dcbx_get_common_params(p_hwfn, &p_feat->app,
p_feat->app.app_pri_tbl, &p_feat->ets,
- p_feat->pfc, &params->local.params);
+ p_feat->pfc, &params->local.params, false);
params->local.valid = true;
}
@@ -529,7 +613,7 @@ qed_dcbx_get_remote_params(struct qed_hwfn *p_hwfn,
p_feat = &p_hwfn->p_dcbx_info->remote.features;
qed_dcbx_get_common_params(p_hwfn, &p_feat->app,
p_feat->app.app_pri_tbl, &p_feat->ets,
- p_feat->pfc, &params->remote.params);
+ p_feat->pfc, &params->remote.params, false);
params->remote.valid = true;
}
@@ -574,7 +658,8 @@ qed_dcbx_get_operational_params(struct qed_hwfn *p_hwfn,
qed_dcbx_get_common_params(p_hwfn, &p_feat->app,
p_feat->app.app_pri_tbl, &p_feat->ets,
- p_feat->pfc, &params->operational.params);
+ p_feat->pfc, &params->operational.params,
+ p_operational->ieee);
qed_dcbx_get_priority_info(p_hwfn, &p_operational->app_prio, p_results);
err = QED_MFW_GET_FIELD(p_feat->app.flags, DCBX_APP_ERROR);
p_operational->err = err;
@@ -861,6 +946,9 @@ static int qed_dcbx_query_params(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt;
int rc;
+ if (IS_VF(p_hwfn->cdev))
+ return -EINVAL;
+
p_ptt = qed_ptt_acquire(p_hwfn);
if (!p_ptt)
return -EBUSY;
@@ -900,6 +988,7 @@ qed_dcbx_set_pfc_data(struct qed_hwfn *p_hwfn,
if (p_params->pfc.prio[i])
pfc_map |= BIT(i);
+ *pfc &= ~DCBX_PFC_PRI_EN_BITMAP_MASK;
*pfc |= (pfc_map << DCBX_PFC_PRI_EN_BITMAP_SHIFT);
DP_VERBOSE(p_hwfn, QED_MSG_DCB, "pfc = 0x%x\n", *pfc);
@@ -944,7 +1033,6 @@ qed_dcbx_set_ets_data(struct qed_hwfn *p_hwfn,
val = (((u32)p_params->ets_pri_tc_tbl[i]) << ((7 - i) * 4));
p_ets->pri_tc_tbl[0] |= val;
}
- p_ets->pri_tc_tbl[0] = cpu_to_be32(p_ets->pri_tc_tbl[0]);
for (i = 0; i < 2; i++) {
p_ets->tc_bw_tbl[i] = cpu_to_be32(p_ets->tc_bw_tbl[i]);
p_ets->tc_tsa_tbl[i] = cpu_to_be32(p_ets->tc_tsa_tbl[i]);
@@ -954,7 +1042,7 @@ qed_dcbx_set_ets_data(struct qed_hwfn *p_hwfn,
static void
qed_dcbx_set_app_data(struct qed_hwfn *p_hwfn,
struct dcbx_app_priority_feature *p_app,
- struct qed_dcbx_params *p_params)
+ struct qed_dcbx_params *p_params, bool ieee)
{
u32 *entry;
int i;
@@ -975,12 +1063,45 @@ qed_dcbx_set_app_data(struct qed_hwfn *p_hwfn,
for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
entry = &p_app->app_pri_tbl[i].entry;
- *entry &= ~DCBX_APP_SF_MASK;
- if (p_params->app_entry[i].ethtype)
- *entry |= ((u32)DCBX_APP_SF_ETHTYPE <<
- DCBX_APP_SF_SHIFT);
- else
- *entry |= ((u32)DCBX_APP_SF_PORT << DCBX_APP_SF_SHIFT);
+ *entry = 0;
+ if (ieee) {
+ *entry &= ~(DCBX_APP_SF_IEEE_MASK | DCBX_APP_SF_MASK);
+ switch (p_params->app_entry[i].sf_ieee) {
+ case QED_DCBX_SF_IEEE_ETHTYPE:
+ *entry |= ((u32)DCBX_APP_SF_IEEE_ETHTYPE <<
+ DCBX_APP_SF_IEEE_SHIFT);
+ *entry |= ((u32)DCBX_APP_SF_ETHTYPE <<
+ DCBX_APP_SF_SHIFT);
+ break;
+ case QED_DCBX_SF_IEEE_TCP_PORT:
+ *entry |= ((u32)DCBX_APP_SF_IEEE_TCP_PORT <<
+ DCBX_APP_SF_IEEE_SHIFT);
+ *entry |= ((u32)DCBX_APP_SF_PORT <<
+ DCBX_APP_SF_SHIFT);
+ break;
+ case QED_DCBX_SF_IEEE_UDP_PORT:
+ *entry |= ((u32)DCBX_APP_SF_IEEE_UDP_PORT <<
+ DCBX_APP_SF_IEEE_SHIFT);
+ *entry |= ((u32)DCBX_APP_SF_PORT <<
+ DCBX_APP_SF_SHIFT);
+ break;
+ case QED_DCBX_SF_IEEE_TCP_UDP_PORT:
+ *entry |= ((u32)DCBX_APP_SF_IEEE_TCP_UDP_PORT <<
+ DCBX_APP_SF_IEEE_SHIFT);
+ *entry |= ((u32)DCBX_APP_SF_PORT <<
+ DCBX_APP_SF_SHIFT);
+ break;
+ }
+ } else {
+ *entry &= ~DCBX_APP_SF_MASK;
+ if (p_params->app_entry[i].ethtype)
+ *entry |= ((u32)DCBX_APP_SF_ETHTYPE <<
+ DCBX_APP_SF_SHIFT);
+ else
+ *entry |= ((u32)DCBX_APP_SF_PORT <<
+ DCBX_APP_SF_SHIFT);
+ }
+
*entry &= ~DCBX_APP_PROTOCOL_ID_MASK;
*entry |= ((u32)p_params->app_entry[i].proto_id <<
DCBX_APP_PROTOCOL_ID_SHIFT);
@@ -995,15 +1116,19 @@ qed_dcbx_set_local_params(struct qed_hwfn *p_hwfn,
struct dcbx_local_params *local_admin,
struct qed_dcbx_set *params)
{
+ bool ieee = false;
+
local_admin->flags = 0;
memcpy(&local_admin->features,
&p_hwfn->p_dcbx_info->operational.features,
sizeof(local_admin->features));
- if (params->enabled)
+ if (params->enabled) {
local_admin->config = params->ver_num;
- else
+ ieee = !!(params->ver_num & DCBX_CONFIG_VERSION_IEEE);
+ } else {
local_admin->config = DCBX_CONFIG_VERSION_DISABLED;
+ }
if (params->override_flags & QED_DCBX_OVERRIDE_PFC_CFG)
qed_dcbx_set_pfc_data(p_hwfn, &local_admin->features.pfc,
@@ -1015,7 +1140,7 @@ qed_dcbx_set_local_params(struct qed_hwfn *p_hwfn,
if (params->override_flags & QED_DCBX_OVERRIDE_APP_CFG)
qed_dcbx_set_app_data(p_hwfn, &local_admin->features.app,
- &params->config.params);
+ &params->config.params, ieee);
}
int qed_dcbx_config_params(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
@@ -1064,7 +1189,7 @@ int qed_dcbx_get_config_params(struct qed_hwfn *p_hwfn,
return 0;
}
- dcbx_info = kmalloc(sizeof(*dcbx_info), GFP_KERNEL);
+ dcbx_info = kzalloc(sizeof(*dcbx_info), GFP_KERNEL);
if (!dcbx_info) {
DP_ERR(p_hwfn, "Failed to allocate struct qed_dcbx_info\n");
return -ENOMEM;
@@ -1101,7 +1226,7 @@ static struct qed_dcbx_get *qed_dcbnl_get_dcbx(struct qed_hwfn *hwfn,
{
struct qed_dcbx_get *dcbx_info;
- dcbx_info = kmalloc(sizeof(*dcbx_info), GFP_KERNEL);
+ dcbx_info = kzalloc(sizeof(*dcbx_info), GFP_KERNEL);
if (!dcbx_info) {
DP_ERR(hwfn->cdev, "Failed to allocate memory for dcbx_info\n");
return NULL;
@@ -1596,8 +1721,10 @@ static int qed_dcbnl_setapp(struct qed_dev *cdev,
if ((entry->ethtype == ethtype) && (entry->proto_id == idval))
break;
/* First empty slot */
- if (!entry->proto_id)
+ if (!entry->proto_id) {
+ dcbx_set.config.params.num_app_entries++;
break;
+ }
}
if (i == QED_DCBX_MAX_APP_PROTOCOL) {
@@ -2117,8 +2244,10 @@ int qed_dcbnl_ieee_setapp(struct qed_dev *cdev, struct dcb_app *app)
(entry->proto_id == app->protocol))
break;
/* First empty slot */
- if (!entry->proto_id)
+ if (!entry->proto_id) {
+ dcbx_set.config.params.num_app_entries++;
break;
+ }
}
if (i == QED_DCBX_MAX_APP_PROTOCOL) {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index 5927840..6f9d3b8 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -6850,6 +6850,14 @@ struct dcbx_app_priority_entry {
#define DCBX_APP_SF_SHIFT 8
#define DCBX_APP_SF_ETHTYPE 0
#define DCBX_APP_SF_PORT 1
+#define DCBX_APP_SF_IEEE_MASK 0x0000f000
+#define DCBX_APP_SF_IEEE_SHIFT 12
+#define DCBX_APP_SF_IEEE_RESERVED 0
+#define DCBX_APP_SF_IEEE_ETHTYPE 1
+#define DCBX_APP_SF_IEEE_TCP_PORT 2
+#define DCBX_APP_SF_IEEE_UDP_PORT 3
+#define DCBX_APP_SF_IEEE_TCP_UDP_PORT 4
+
#define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000
#define DCBX_APP_PROTOCOL_ID_SHIFT 16
};
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index a240f26..f776a77 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -1153,8 +1153,8 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn,
p_drv_version = &union_data.drv_version;
p_drv_version->version = p_ver->version;
- for (i = 0; i < MCP_DRV_VER_STR_SIZE - 1; i += 4) {
- val = cpu_to_be32(p_ver->name[i]);
+ for (i = 0; i < (MCP_DRV_VER_STR_SIZE - 4) / sizeof(u32); i++) {
+ val = cpu_to_be32(*((u32 *)&p_ver->name[i * sizeof(u32)]));
*(__be32 *)&p_drv_version->name[i * sizeof(u32)] = val;
}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index e4bd02e..9544e4c 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -722,11 +722,14 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb,
txq->tx_db.data.bd_prod =
cpu_to_le16(qed_chain_get_prod_idx(&txq->tx_pbl));
- if (!skb->xmit_more || netif_tx_queue_stopped(netdev_txq))
+ if (!skb->xmit_more || netif_xmit_stopped(netdev_txq))
qede_update_tx_producer(txq);
if (unlikely(qed_chain_get_elem_left(&txq->tx_pbl)
< (MAX_SKB_FRAGS + 1))) {
+ if (skb->xmit_more)
+ qede_update_tx_producer(txq);
+
netif_tx_stop_queue(netdev_txq);
DP_VERBOSE(edev, NETIF_MSG_TX_QUEUED,
"Stop queue was called\n");
@@ -2517,7 +2520,8 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
edev->ops->register_ops(cdev, &qede_ll_ops, edev);
#ifdef CONFIG_DCB
- qede_set_dcbnl_ops(edev->ndev);
+ if (!IS_VF(edev))
+ qede_set_dcbnl_ops(edev->ndev);
#endif
INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index fd973f4..49bad00 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -37,8 +37,8 @@
#define _QLCNIC_LINUX_MAJOR 5
#define _QLCNIC_LINUX_MINOR 3
-#define _QLCNIC_LINUX_SUBVERSION 64
-#define QLCNIC_LINUX_VERSIONID "5.3.64"
+#define _QLCNIC_LINUX_SUBVERSION 65
+#define QLCNIC_LINUX_VERSIONID "5.3.65"
#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 87c642d..fedd736 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -102,7 +102,6 @@
#define QLCNIC_RESPONSE_DESC 0x05
#define QLCNIC_LRO_DESC 0x12
-#define QLCNIC_TX_POLL_BUDGET 128
#define QLCNIC_TCP_HDR_SIZE 20
#define QLCNIC_TCP_TS_OPTION_SIZE 12
#define QLCNIC_FETCH_RING_ID(handle) ((handle) >> 63)
@@ -2008,7 +2007,6 @@ static int qlcnic_83xx_msix_tx_poll(struct napi_struct *napi, int budget)
struct qlcnic_host_tx_ring *tx_ring;
struct qlcnic_adapter *adapter;
- budget = QLCNIC_TX_POLL_BUDGET;
tx_ring = container_of(napi, struct qlcnic_host_tx_ring, napi);
adapter = tx_ring->adapter;
work_done = qlcnic_process_cmd_ring(adapter, tx_ring, budget);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
index 017d8c2c..24061b9 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
@@ -156,10 +156,8 @@ struct qlcnic_vf_info {
spinlock_t vlan_list_lock; /* Lock for VLAN list */
};
-struct qlcnic_async_work_list {
+struct qlcnic_async_cmd {
struct list_head list;
- struct work_struct work;
- void *ptr;
struct qlcnic_cmd_args *cmd;
};
@@ -168,7 +166,10 @@ struct qlcnic_back_channel {
struct workqueue_struct *bc_trans_wq;
struct workqueue_struct *bc_async_wq;
struct workqueue_struct *bc_flr_wq;
- struct list_head async_list;
+ struct qlcnic_adapter *adapter;
+ struct list_head async_cmd_list;
+ struct work_struct vf_async_work;
+ spinlock_t queue_lock; /* async_cmd_list queue lock */
};
struct qlcnic_sriov {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 7327b72..d710705 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -29,6 +29,7 @@
#define QLC_83XX_VF_RESET_FAIL_THRESH 8
#define QLC_BC_CMD_MAX_RETRY_CNT 5
+static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work);
static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *);
static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *, u32);
static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *);
@@ -177,7 +178,10 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
}
bc->bc_async_wq = wq;
- INIT_LIST_HEAD(&bc->async_list);
+ INIT_LIST_HEAD(&bc->async_cmd_list);
+ INIT_WORK(&bc->vf_async_work, qlcnic_sriov_handle_async_issue_cmd);
+ spin_lock_init(&bc->queue_lock);
+ bc->adapter = adapter;
for (i = 0; i < num_vfs; i++) {
vf = &sriov->vf_info[i];
@@ -1517,17 +1521,21 @@ static void qlcnic_vf_add_mc_list(struct net_device *netdev, const u8 *mac,
void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc)
{
- struct list_head *head = &bc->async_list;
- struct qlcnic_async_work_list *entry;
+ struct list_head *head = &bc->async_cmd_list;
+ struct qlcnic_async_cmd *entry;
flush_workqueue(bc->bc_async_wq);
+ cancel_work_sync(&bc->vf_async_work);
+
+ spin_lock(&bc->queue_lock);
while (!list_empty(head)) {
- entry = list_entry(head->next, struct qlcnic_async_work_list,
+ entry = list_entry(head->next, struct qlcnic_async_cmd,
list);
- cancel_work_sync(&entry->work);
list_del(&entry->list);
+ kfree(entry->cmd);
kfree(entry);
}
+ spin_unlock(&bc->queue_lock);
}
void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
@@ -1587,57 +1595,64 @@ void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work)
{
- struct qlcnic_async_work_list *entry;
- struct qlcnic_adapter *adapter;
+ struct qlcnic_async_cmd *entry, *tmp;
+ struct qlcnic_back_channel *bc;
struct qlcnic_cmd_args *cmd;
+ struct list_head *head;
+ LIST_HEAD(del_list);
+
+ bc = container_of(work, struct qlcnic_back_channel, vf_async_work);
+ head = &bc->async_cmd_list;
+
+ spin_lock(&bc->queue_lock);
+ list_splice_init(head, &del_list);
+ spin_unlock(&bc->queue_lock);
+
+ list_for_each_entry_safe(entry, tmp, &del_list, list) {
+ list_del(&entry->list);
+ cmd = entry->cmd;
+ __qlcnic_sriov_issue_cmd(bc->adapter, cmd);
+ kfree(entry);
+ }
+
+ if (!list_empty(head))
+ queue_work(bc->bc_async_wq, &bc->vf_async_work);
- entry = container_of(work, struct qlcnic_async_work_list, work);
- adapter = entry->ptr;
- cmd = entry->cmd;
- __qlcnic_sriov_issue_cmd(adapter, cmd);
return;
}
-static struct qlcnic_async_work_list *
-qlcnic_sriov_get_free_node_async_work(struct qlcnic_back_channel *bc)
+static struct qlcnic_async_cmd *
+qlcnic_sriov_alloc_async_cmd(struct qlcnic_back_channel *bc,
+ struct qlcnic_cmd_args *cmd)
{
- struct list_head *node;
- struct qlcnic_async_work_list *entry = NULL;
- u8 empty = 0;
+ struct qlcnic_async_cmd *entry = NULL;
- list_for_each(node, &bc->async_list) {
- entry = list_entry(node, struct qlcnic_async_work_list, list);
- if (!work_pending(&entry->work)) {
- empty = 1;
- break;
- }
- }
+ entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
+ if (!entry)
+ return NULL;
- if (!empty) {
- entry = kzalloc(sizeof(struct qlcnic_async_work_list),
- GFP_ATOMIC);
- if (entry == NULL)
- return NULL;
- list_add_tail(&entry->list, &bc->async_list);
- }
+ entry->cmd = cmd;
+
+ spin_lock(&bc->queue_lock);
+ list_add_tail(&entry->list, &bc->async_cmd_list);
+ spin_unlock(&bc->queue_lock);
return entry;
}
static void qlcnic_sriov_schedule_async_cmd(struct qlcnic_back_channel *bc,
- work_func_t func, void *data,
struct qlcnic_cmd_args *cmd)
{
- struct qlcnic_async_work_list *entry = NULL;
+ struct qlcnic_async_cmd *entry = NULL;
- entry = qlcnic_sriov_get_free_node_async_work(bc);
- if (!entry)
+ entry = qlcnic_sriov_alloc_async_cmd(bc, cmd);
+ if (!entry) {
+ qlcnic_free_mbx_args(cmd);
+ kfree(cmd);
return;
+ }
- entry->ptr = data;
- entry->cmd = cmd;
- INIT_WORK(&entry->work, func);
- queue_work(bc->bc_async_wq, &entry->work);
+ queue_work(bc->bc_async_wq, &bc->vf_async_work);
}
static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter,
@@ -1649,8 +1664,8 @@ static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter,
if (adapter->need_fw_reset)
return -EIO;
- qlcnic_sriov_schedule_async_cmd(bc, qlcnic_sriov_handle_async_issue_cmd,
- adapter, cmd);
+ qlcnic_sriov_schedule_async_cmd(bc, cmd);
+
return 0;
}
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index deae10d..5297bf7 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -467,8 +467,8 @@ static int cp_rx_poll(struct napi_struct *napi, int budget)
unsigned int rx_tail = cp->rx_tail;
int rx;
-rx_status_loop:
rx = 0;
+rx_status_loop:
cpw16(IntrStatus, cp_rx_intr_mask);
while (rx < budget) {
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 799d58d..054e795 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -201,9 +201,14 @@ static const u16 sh_eth_offset_fast_rz[SH_ETH_MAX_REGISTER_OFFSET] = {
[ARSTR] = 0x0000,
[TSU_CTRST] = 0x0004,
+ [TSU_FWSLC] = 0x0038,
[TSU_VTAG0] = 0x0058,
[TSU_ADSBSY] = 0x0060,
[TSU_TEN] = 0x0064,
+ [TSU_POST1] = 0x0070,
+ [TSU_POST2] = 0x0074,
+ [TSU_POST3] = 0x0078,
+ [TSU_POST4] = 0x007c,
[TSU_ADRH0] = 0x0100,
[TXNLCR0] = 0x0080,
@@ -2786,6 +2791,8 @@ static void sh_eth_tsu_init(struct sh_eth_private *mdp)
{
if (sh_eth_is_rz_fast_ether(mdp)) {
sh_eth_tsu_write(mdp, 0, TSU_TEN); /* Disable all CAM entry */
+ sh_eth_tsu_write(mdp, TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL,
+ TSU_FWSLC); /* Enable POST registers */
return;
}
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index f658fee..e00a669 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -1517,13 +1517,14 @@ static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask)
}
#if BITS_PER_LONG == 64
+ BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 2);
mask[0] = raw_mask[0];
mask[1] = raw_mask[1];
#else
+ BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 3);
mask[0] = raw_mask[0] & 0xffffffff;
mask[1] = raw_mask[0] >> 32;
mask[2] = raw_mask[1] & 0xffffffff;
- mask[3] = raw_mask[1] >> 32;
#endif
}
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 726b80f..503a3b6 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -2275,6 +2275,13 @@ static int smc_drv_probe(struct platform_device *pdev)
if (pd) {
memcpy(&lp->cfg, pd, sizeof(lp->cfg));
lp->io_shift = SMC91X_IO_SHIFT(lp->cfg.flags);
+
+ if (!SMC_8BIT(lp) && !SMC_16BIT(lp)) {
+ dev_err(&pdev->dev,
+ "at least one of 8-bit or 16-bit access support is required.\n");
+ ret = -ENXIO;
+ goto out_free_netdev;
+ }
}
#if IS_BUILTIN(CONFIG_OF)
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index 1a55c79..ea84654 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -37,6 +37,27 @@
#include <linux/smc91x.h>
/*
+ * Any 16-bit access is performed with two 8-bit accesses if the hardware
+ * can't do it directly. Most registers are 16-bit so those are mandatory.
+ */
+#define SMC_outw_b(x, a, r) \
+ do { \
+ unsigned int __val16 = (x); \
+ unsigned int __reg = (r); \
+ SMC_outb(__val16, a, __reg); \
+ SMC_outb(__val16 >> 8, a, __reg + (1 << SMC_IO_SHIFT)); \
+ } while (0)
+
+#define SMC_inw_b(a, r) \
+ ({ \
+ unsigned int __val16; \
+ unsigned int __reg = r; \
+ __val16 = SMC_inb(a, __reg); \
+ __val16 |= SMC_inb(a, __reg + (1 << SMC_IO_SHIFT)) << 8; \
+ __val16; \
+ })
+
+/*
* Define your architecture specific bus configuration parameters here.
*/
@@ -55,10 +76,30 @@
#define SMC_IO_SHIFT (lp->io_shift)
#define SMC_inb(a, r) readb((a) + (r))
-#define SMC_inw(a, r) readw((a) + (r))
+#define SMC_inw(a, r) \
+ ({ \
+ unsigned int __smc_r = r; \
+ SMC_16BIT(lp) ? readw((a) + __smc_r) : \
+ SMC_8BIT(lp) ? SMC_inw_b(a, __smc_r) : \
+ ({ BUG(); 0; }); \
+ })
+
#define SMC_inl(a, r) readl((a) + (r))
#define SMC_outb(v, a, r) writeb(v, (a) + (r))
+#define SMC_outw(v, a, r) \
+ do { \
+ unsigned int __v = v, __smc_r = r; \
+ if (SMC_16BIT(lp)) \
+ __SMC_outw(__v, a, __smc_r); \
+ else if (SMC_8BIT(lp)) \
+ SMC_outw_b(__v, a, __smc_r); \
+ else \
+ BUG(); \
+ } while (0)
+
#define SMC_outl(v, a, r) writel(v, (a) + (r))
+#define SMC_insb(a, r, p, l) readsb((a) + (r), p, l)
+#define SMC_outsb(a, r, p, l) writesb((a) + (r), p, l)
#define SMC_insw(a, r, p, l) readsw((a) + (r), p, l)
#define SMC_outsw(a, r, p, l) writesw((a) + (r), p, l)
#define SMC_insl(a, r, p, l) readsl((a) + (r), p, l)
@@ -66,7 +107,7 @@
#define SMC_IRQ_FLAGS (-1) /* from resource */
/* We actually can't write halfwords properly if not word aligned */
-static inline void SMC_outw(u16 val, void __iomem *ioaddr, int reg)
+static inline void __SMC_outw(u16 val, void __iomem *ioaddr, int reg)
{
if ((machine_is_mainstone() || machine_is_stargate2() ||
machine_is_pxa_idp()) && reg & 2) {
@@ -416,24 +457,8 @@ smc_pxa_dma_insw(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma,
#if ! SMC_CAN_USE_16BIT
-/*
- * Any 16-bit access is performed with two 8-bit accesses if the hardware
- * can't do it directly. Most registers are 16-bit so those are mandatory.
- */
-#define SMC_outw(x, ioaddr, reg) \
- do { \
- unsigned int __val16 = (x); \
- SMC_outb( __val16, ioaddr, reg ); \
- SMC_outb( __val16 >> 8, ioaddr, reg + (1 << SMC_IO_SHIFT));\
- } while (0)
-#define SMC_inw(ioaddr, reg) \
- ({ \
- unsigned int __val16; \
- __val16 = SMC_inb( ioaddr, reg ); \
- __val16 |= SMC_inb( ioaddr, reg + (1 << SMC_IO_SHIFT)) << 8; \
- __val16; \
- })
-
+#define SMC_outw(x, ioaddr, reg) SMC_outw_b(x, ioaddr, reg)
+#define SMC_inw(ioaddr, reg) SMC_inw_b(ioaddr, reg)
#define SMC_insw(a, r, p, l) BUG()
#define SMC_outsw(a, r, p, l) BUG()
@@ -445,7 +470,9 @@ smc_pxa_dma_insw(void __iomem *ioaddr, struct smc_local *lp, int reg, int dma,
#endif
#if ! SMC_CAN_USE_8BIT
+#undef SMC_inb
#define SMC_inb(ioaddr, reg) ({ BUG(); 0; })
+#undef SMC_outb
#define SMC_outb(x, ioaddr, reg) BUG()
#define SMC_insb(a, r, p, l) BUG()
#define SMC_outsb(a, r, p, l) BUG()
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index ca31345..4f8910b 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -1099,15 +1099,8 @@ static int smsc911x_mii_init(struct platform_device *pdev,
goto err_out_free_bus_2;
}
- if (smsc911x_mii_probe(dev) < 0) {
- SMSC_WARN(pdata, probe, "Error registering mii bus");
- goto err_out_unregister_bus_3;
- }
-
return 0;
-err_out_unregister_bus_3:
- mdiobus_unregister(pdata->mii_bus);
err_out_free_bus_2:
mdiobus_free(pdata->mii_bus);
err_out_1:
@@ -1514,23 +1507,90 @@ static void smsc911x_disable_irq_chip(struct net_device *dev)
smsc911x_reg_write(pdata, INT_STS, 0xFFFFFFFF);
}
+static irqreturn_t smsc911x_irqhandler(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct smsc911x_data *pdata = netdev_priv(dev);
+ u32 intsts = smsc911x_reg_read(pdata, INT_STS);
+ u32 inten = smsc911x_reg_read(pdata, INT_EN);
+ int serviced = IRQ_NONE;
+ u32 temp;
+
+ if (unlikely(intsts & inten & INT_STS_SW_INT_)) {
+ temp = smsc911x_reg_read(pdata, INT_EN);
+ temp &= (~INT_EN_SW_INT_EN_);
+ smsc911x_reg_write(pdata, INT_EN, temp);
+ smsc911x_reg_write(pdata, INT_STS, INT_STS_SW_INT_);
+ pdata->software_irq_signal = 1;
+ smp_wmb();
+ serviced = IRQ_HANDLED;
+ }
+
+ if (unlikely(intsts & inten & INT_STS_RXSTOP_INT_)) {
+ /* Called when there is a multicast update scheduled and
+ * it is now safe to complete the update */
+ SMSC_TRACE(pdata, intr, "RX Stop interrupt");
+ smsc911x_reg_write(pdata, INT_STS, INT_STS_RXSTOP_INT_);
+ if (pdata->multicast_update_pending)
+ smsc911x_rx_multicast_update_workaround(pdata);
+ serviced = IRQ_HANDLED;
+ }
+
+ if (intsts & inten & INT_STS_TDFA_) {
+ temp = smsc911x_reg_read(pdata, FIFO_INT);
+ temp |= FIFO_INT_TX_AVAIL_LEVEL_;
+ smsc911x_reg_write(pdata, FIFO_INT, temp);
+ smsc911x_reg_write(pdata, INT_STS, INT_STS_TDFA_);
+ netif_wake_queue(dev);
+ serviced = IRQ_HANDLED;
+ }
+
+ if (unlikely(intsts & inten & INT_STS_RXE_)) {
+ SMSC_TRACE(pdata, intr, "RX Error interrupt");
+ smsc911x_reg_write(pdata, INT_STS, INT_STS_RXE_);
+ serviced = IRQ_HANDLED;
+ }
+
+ if (likely(intsts & inten & INT_STS_RSFL_)) {
+ if (likely(napi_schedule_prep(&pdata->napi))) {
+ /* Disable Rx interrupts */
+ temp = smsc911x_reg_read(pdata, INT_EN);
+ temp &= (~INT_EN_RSFL_EN_);
+ smsc911x_reg_write(pdata, INT_EN, temp);
+ /* Schedule a NAPI poll */
+ __napi_schedule(&pdata->napi);
+ } else {
+ SMSC_WARN(pdata, rx_err, "napi_schedule_prep failed");
+ }
+ serviced = IRQ_HANDLED;
+ }
+
+ return serviced;
+}
+
static int smsc911x_open(struct net_device *dev)
{
struct smsc911x_data *pdata = netdev_priv(dev);
unsigned int timeout;
unsigned int temp;
unsigned int intcfg;
+ int retval;
+ int irq_flags;
- /* if the phy is not yet registered, retry later*/
+ /* find and start the given phy */
if (!dev->phydev) {
- SMSC_WARN(pdata, hw, "phy_dev is NULL");
- return -EAGAIN;
+ retval = smsc911x_mii_probe(dev);
+ if (retval < 0) {
+ SMSC_WARN(pdata, probe, "Error starting phy");
+ goto out;
+ }
}
/* Reset the LAN911x */
- if (smsc911x_soft_reset(pdata)) {
+ retval = smsc911x_soft_reset(pdata);
+ if (retval) {
SMSC_WARN(pdata, hw, "soft reset failed");
- return -EIO;
+ goto mii_free_out;
}
smsc911x_reg_write(pdata, HW_CFG, 0x00050000);
@@ -1586,6 +1646,15 @@ static int smsc911x_open(struct net_device *dev)
pdata->software_irq_signal = 0;
smp_wmb();
+ irq_flags = irq_get_trigger_type(dev->irq);
+ retval = request_irq(dev->irq, smsc911x_irqhandler,
+ irq_flags | IRQF_SHARED, dev->name, dev);
+ if (retval) {
+ SMSC_WARN(pdata, probe,
+ "Unable to claim requested irq: %d", dev->irq);
+ goto mii_free_out;
+ }
+
temp = smsc911x_reg_read(pdata, INT_EN);
temp |= INT_EN_SW_INT_EN_;
smsc911x_reg_write(pdata, INT_EN, temp);
@@ -1600,7 +1669,8 @@ static int smsc911x_open(struct net_device *dev)
if (!pdata->software_irq_signal) {
netdev_warn(dev, "ISR failed signaling test (IRQ %d)\n",
dev->irq);
- return -ENODEV;
+ retval = -ENODEV;
+ goto irq_stop_out;
}
SMSC_TRACE(pdata, ifup, "IRQ handler passed test using IRQ %d",
dev->irq);
@@ -1646,6 +1716,14 @@ static int smsc911x_open(struct net_device *dev)
netif_start_queue(dev);
return 0;
+
+irq_stop_out:
+ free_irq(dev->irq, dev);
+mii_free_out:
+ phy_disconnect(dev->phydev);
+ dev->phydev = NULL;
+out:
+ return retval;
}
/* Entry point for stopping the interface */
@@ -1667,9 +1745,15 @@ static int smsc911x_stop(struct net_device *dev)
dev->stats.rx_dropped += smsc911x_reg_read(pdata, RX_DROP);
smsc911x_tx_update_txcounters(dev);
+ free_irq(dev->irq, dev);
+
/* Bring the PHY down */
- if (dev->phydev)
+ if (dev->phydev) {
phy_stop(dev->phydev);
+ phy_disconnect(dev->phydev);
+ dev->phydev = NULL;
+ }
+ netif_carrier_off(dev);
SMSC_TRACE(pdata, ifdown, "Interface stopped");
return 0;
@@ -1811,67 +1895,6 @@ static void smsc911x_set_multicast_list(struct net_device *dev)
spin_unlock_irqrestore(&pdata->mac_lock, flags);
}
-static irqreturn_t smsc911x_irqhandler(int irq, void *dev_id)
-{
- struct net_device *dev = dev_id;
- struct smsc911x_data *pdata = netdev_priv(dev);
- u32 intsts = smsc911x_reg_read(pdata, INT_STS);
- u32 inten = smsc911x_reg_read(pdata, INT_EN);
- int serviced = IRQ_NONE;
- u32 temp;
-
- if (unlikely(intsts & inten & INT_STS_SW_INT_)) {
- temp = smsc911x_reg_read(pdata, INT_EN);
- temp &= (~INT_EN_SW_INT_EN_);
- smsc911x_reg_write(pdata, INT_EN, temp);
- smsc911x_reg_write(pdata, INT_STS, INT_STS_SW_INT_);
- pdata->software_irq_signal = 1;
- smp_wmb();
- serviced = IRQ_HANDLED;
- }
-
- if (unlikely(intsts & inten & INT_STS_RXSTOP_INT_)) {
- /* Called when there is a multicast update scheduled and
- * it is now safe to complete the update */
- SMSC_TRACE(pdata, intr, "RX Stop interrupt");
- smsc911x_reg_write(pdata, INT_STS, INT_STS_RXSTOP_INT_);
- if (pdata->multicast_update_pending)
- smsc911x_rx_multicast_update_workaround(pdata);
- serviced = IRQ_HANDLED;
- }
-
- if (intsts & inten & INT_STS_TDFA_) {
- temp = smsc911x_reg_read(pdata, FIFO_INT);
- temp |= FIFO_INT_TX_AVAIL_LEVEL_;
- smsc911x_reg_write(pdata, FIFO_INT, temp);
- smsc911x_reg_write(pdata, INT_STS, INT_STS_TDFA_);
- netif_wake_queue(dev);
- serviced = IRQ_HANDLED;
- }
-
- if (unlikely(intsts & inten & INT_STS_RXE_)) {
- SMSC_TRACE(pdata, intr, "RX Error interrupt");
- smsc911x_reg_write(pdata, INT_STS, INT_STS_RXE_);
- serviced = IRQ_HANDLED;
- }
-
- if (likely(intsts & inten & INT_STS_RSFL_)) {
- if (likely(napi_schedule_prep(&pdata->napi))) {
- /* Disable Rx interrupts */
- temp = smsc911x_reg_read(pdata, INT_EN);
- temp &= (~INT_EN_RSFL_EN_);
- smsc911x_reg_write(pdata, INT_EN, temp);
- /* Schedule a NAPI poll */
- __napi_schedule(&pdata->napi);
- } else {
- SMSC_WARN(pdata, rx_err, "napi_schedule_prep failed");
- }
- serviced = IRQ_HANDLED;
- }
-
- return serviced;
-}
-
#ifdef CONFIG_NET_POLL_CONTROLLER
static void smsc911x_poll_controller(struct net_device *dev)
{
@@ -2291,16 +2314,14 @@ static int smsc911x_drv_remove(struct platform_device *pdev)
pdata = netdev_priv(dev);
BUG_ON(!pdata);
BUG_ON(!pdata->ioaddr);
- BUG_ON(!dev->phydev);
+ WARN_ON(dev->phydev);
SMSC_TRACE(pdata, ifdown, "Stopping driver");
- phy_disconnect(dev->phydev);
mdiobus_unregister(pdata->mii_bus);
mdiobus_free(pdata->mii_bus);
unregister_netdev(dev);
- free_irq(dev->irq, dev);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"smsc911x-memory");
if (!res)
@@ -2385,8 +2406,7 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
struct smsc911x_data *pdata;
struct smsc911x_platform_config *config = dev_get_platdata(&pdev->dev);
struct resource *res;
- unsigned int intcfg = 0;
- int res_size, irq, irq_flags;
+ int res_size, irq;
int retval;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
@@ -2425,7 +2445,6 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
pdata = netdev_priv(dev);
dev->irq = irq;
- irq_flags = irq_get_trigger_type(irq);
pdata->ioaddr = ioremap_nocache(res->start, res_size);
pdata->dev = dev;
@@ -2472,43 +2491,23 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
if (retval < 0)
goto out_disable_resources;
- /* configure irq polarity and type before connecting isr */
- if (pdata->config.irq_polarity == SMSC911X_IRQ_POLARITY_ACTIVE_HIGH)
- intcfg |= INT_CFG_IRQ_POL_;
-
- if (pdata->config.irq_type == SMSC911X_IRQ_TYPE_PUSH_PULL)
- intcfg |= INT_CFG_IRQ_TYPE_;
-
- smsc911x_reg_write(pdata, INT_CFG, intcfg);
-
- /* Ensure interrupts are globally disabled before connecting ISR */
- smsc911x_disable_irq_chip(dev);
+ netif_carrier_off(dev);
- retval = request_irq(dev->irq, smsc911x_irqhandler,
- irq_flags | IRQF_SHARED, dev->name, dev);
+ retval = smsc911x_mii_init(pdev, dev);
if (retval) {
- SMSC_WARN(pdata, probe,
- "Unable to claim requested irq: %d", dev->irq);
+ SMSC_WARN(pdata, probe, "Error %i initialising mii", retval);
goto out_disable_resources;
}
- netif_carrier_off(dev);
-
retval = register_netdev(dev);
if (retval) {
SMSC_WARN(pdata, probe, "Error %i registering device", retval);
- goto out_free_irq;
+ goto out_disable_resources;
} else {
SMSC_TRACE(pdata, probe,
"Network interface: \"%s\"", dev->name);
}
- retval = smsc911x_mii_init(pdev, dev);
- if (retval) {
- SMSC_WARN(pdata, probe, "Error %i initialising mii", retval);
- goto out_unregister_netdev_5;
- }
-
spin_lock_irq(&pdata->mac_lock);
/* Check if mac address has been specified when bringing interface up */
@@ -2544,10 +2543,6 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
return 0;
-out_unregister_netdev_5:
- unregister_netdev(dev);
-out_free_irq:
- free_irq(dev->irq, dev);
out_disable_resources:
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index cbefe9e..885a5e6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -261,7 +261,7 @@ static void dwmac1000_pmt(struct mac_device_info *hw, unsigned long mode)
}
if (mode & WAKE_UCAST) {
pr_debug("GMAC: WOL on global unicast\n");
- pmt |= global_unicast;
+ pmt |= power_down | global_unicast | wake_up_frame_en;
}
writel(pmt, ioaddr + GMAC_PMT);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index df5580d..51019b7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -102,7 +102,7 @@ static void dwmac4_pmt(struct mac_device_info *hw, unsigned long mode)
}
if (mode & WAKE_UCAST) {
pr_debug("GMAC: WOL on global unicast\n");
- pmt |= global_unicast;
+ pmt |= power_down | global_unicast | wake_up_frame_en;
}
writel(pmt, ioaddr + GMAC_PMT);
diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
index 9f159a7..4490eba 100644
--- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c
+++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
@@ -1246,7 +1246,7 @@ static int dwceqos_mii_init(struct net_local *lp)
lp->mii_bus->read = &dwceqos_mdio_read;
lp->mii_bus->write = &dwceqos_mdio_write;
lp->mii_bus->priv = lp;
- lp->mii_bus->parent = &lp->ndev->dev;
+ lp->mii_bus->parent = &lp->pdev->dev;
of_address_to_resource(lp->pdev->dev.of_node, 0, &res);
snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%.8llx",
@@ -1622,13 +1622,7 @@ static void dwceqos_init_hw(struct net_local *lp)
DWCEQOS_MMC_CTRL_RSTONRD);
dwceqos_enable_mmc_interrupt(lp);
- /* Enable Interrupts */
- dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE,
- DWCEQOS_DMA_CH0_IE_NIE |
- DWCEQOS_DMA_CH0_IE_RIE | DWCEQOS_DMA_CH0_IE_TIE |
- DWCEQOS_DMA_CH0_IE_AIE |
- DWCEQOS_DMA_CH0_IE_FBEE);
-
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, 0);
dwceqos_write(lp, REG_DWCEQOS_MAC_IE, 0);
dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, DWCEQOS_MAC_CFG_IPC |
@@ -1905,6 +1899,15 @@ static int dwceqos_open(struct net_device *ndev)
netif_start_queue(ndev);
tasklet_enable(&lp->tx_bdreclaim_tasklet);
+ /* Enable Interrupts -- do this only after we enable NAPI and the
+ * tasklet.
+ */
+ dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE,
+ DWCEQOS_DMA_CH0_IE_NIE |
+ DWCEQOS_DMA_CH0_IE_RIE | DWCEQOS_DMA_CH0_IE_TIE |
+ DWCEQOS_DMA_CH0_IE_AIE |
+ DWCEQOS_DMA_CH0_IE_FBEE);
+
return 0;
}
@@ -2850,25 +2853,17 @@ static int dwceqos_probe(struct platform_device *pdev)
ndev->features = ndev->hw_features;
- netif_napi_add(ndev, &lp->napi, dwceqos_rx_poll, NAPI_POLL_WEIGHT);
-
- ret = register_netdev(ndev);
- if (ret) {
- dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
- goto err_out_clk_dis_aper;
- }
-
lp->phy_ref_clk = devm_clk_get(&pdev->dev, "phy_ref_clk");
if (IS_ERR(lp->phy_ref_clk)) {
dev_err(&pdev->dev, "phy_ref_clk clock not found.\n");
ret = PTR_ERR(lp->phy_ref_clk);
- goto err_out_unregister_netdev;
+ goto err_out_clk_dis_aper;
}
ret = clk_prepare_enable(lp->phy_ref_clk);
if (ret) {
dev_err(&pdev->dev, "Unable to enable device clock.\n");
- goto err_out_unregister_netdev;
+ goto err_out_clk_dis_aper;
}
lp->phy_node = of_parse_phandle(lp->pdev->dev.of_node,
@@ -2877,7 +2872,7 @@ static int dwceqos_probe(struct platform_device *pdev)
ret = of_phy_register_fixed_link(lp->pdev->dev.of_node);
if (ret < 0) {
dev_err(&pdev->dev, "invalid fixed-link");
- goto err_out_unregister_clk_notifier;
+ goto err_out_clk_dis_phy;
}
lp->phy_node = of_node_get(lp->pdev->dev.of_node);
@@ -2886,7 +2881,7 @@ static int dwceqos_probe(struct platform_device *pdev)
ret = of_get_phy_mode(lp->pdev->dev.of_node);
if (ret < 0) {
dev_err(&lp->pdev->dev, "error in getting phy i/f\n");
- goto err_out_unregister_clk_notifier;
+ goto err_out_clk_dis_phy;
}
lp->phy_interface = ret;
@@ -2894,14 +2889,14 @@ static int dwceqos_probe(struct platform_device *pdev)
ret = dwceqos_mii_init(lp);
if (ret) {
dev_err(&lp->pdev->dev, "error in dwceqos_mii_init\n");
- goto err_out_unregister_clk_notifier;
+ goto err_out_clk_dis_phy;
}
ret = dwceqos_mii_probe(ndev);
if (ret != 0) {
netdev_err(ndev, "mii_probe fail.\n");
ret = -ENXIO;
- goto err_out_unregister_clk_notifier;
+ goto err_out_clk_dis_phy;
}
dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0);
@@ -2919,7 +2914,7 @@ static int dwceqos_probe(struct platform_device *pdev)
if (ret) {
dev_err(&lp->pdev->dev, "Unable to retrieve DT, error %d\n",
ret);
- goto err_out_unregister_clk_notifier;
+ goto err_out_clk_dis_phy;
}
dev_info(&lp->pdev->dev, "pdev->id %d, baseaddr 0x%08lx, irq %d\n",
pdev->id, ndev->base_addr, ndev->irq);
@@ -2929,18 +2924,24 @@ static int dwceqos_probe(struct platform_device *pdev)
if (ret) {
dev_err(&lp->pdev->dev, "Unable to request IRQ %d, error %d\n",
ndev->irq, ret);
- goto err_out_unregister_clk_notifier;
+ goto err_out_clk_dis_phy;
}
if (netif_msg_probe(lp))
netdev_dbg(ndev, "net_local@%p\n", lp);
+ netif_napi_add(ndev, &lp->napi, dwceqos_rx_poll, NAPI_POLL_WEIGHT);
+
+ ret = register_netdev(ndev);
+ if (ret) {
+ dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
+ goto err_out_clk_dis_phy;
+ }
+
return 0;
-err_out_unregister_clk_notifier:
+err_out_clk_dis_phy:
clk_disable_unprepare(lp->phy_ref_clk);
-err_out_unregister_netdev:
- unregister_netdev(ndev);
err_out_clk_dis_aper:
clk_disable_unprepare(lp->apb_pclk);
err_out_free_netdev:
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 7452b5f..7108c68 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -1987,7 +1987,7 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if ((readl(nic->regs + FPGA_VER) & 0xFFF) >= 378) {
err = pci_enable_msi(pdev);
if (err)
- pr_err("Can't eneble msi. error is %d\n", err);
+ pr_err("Can't enable msi. error is %d\n", err);
else
nic->irq_type = IRQ_MSI;
} else
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index c51f346..f85d605 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -734,6 +734,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
netif_receive_skb(skb);
ndev->stats.rx_bytes += len;
ndev->stats.rx_packets++;
+ kmemleak_not_leak(new_skb);
} else {
ndev->stats.rx_dropped++;
new_skb = skb;
@@ -1325,6 +1326,7 @@ static int cpsw_ndo_open(struct net_device *ndev)
kfree_skb(skb);
goto err_cleanup;
}
+ kmemleak_not_leak(skb);
}
/* continue even if we didn't manage to submit all
* receive descs
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index 01a7714..8fd1312 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -166,6 +166,7 @@ static struct platform_driver tsi_eth_driver = {
static void tsi108_timed_checker(unsigned long dev_ptr);
+#ifdef DEBUG
static void dump_eth_one(struct net_device *dev)
{
struct tsi108_prv_data *data = netdev_priv(dev);
@@ -190,6 +191,7 @@ static void dump_eth_one(struct net_device *dev)
TSI_READ(TSI108_EC_RXESTAT),
TSI_READ(TSI108_EC_RXERR), data->rxpending);
}
+#endif
/* Synchronization is needed between the thread and up/down events.
* Note that the PHY is accessed through the same registers for both
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 3cee84a..93dc10b 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -1131,11 +1131,13 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
lp->rx_ping_pong = get_bool(ofdev, "xlnx,rx-ping-pong");
mac_address = of_get_mac_address(ofdev->dev.of_node);
- if (mac_address)
+ if (mac_address) {
/* Set the MAC address. */
memcpy(ndev->dev_addr, mac_address, ETH_ALEN);
- else
- dev_warn(dev, "No MAC address found\n");
+ } else {
+ dev_warn(dev, "No MAC address found, using random\n");
+ eth_hw_addr_random(ndev);
+ }
/* Clear the Tx CSR's in case this is a restart */
__raw_writel(0, lp->base_addr + XEL_TSR_OFFSET);
OpenPOWER on IntegriCloud