summaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c107
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h18
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c57
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h31
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c9
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c20
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c4
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c4
-rw-r--r--drivers/net/ethernet/brocade/bna/cna_fwimg.c7
-rw-r--r--drivers/net/ethernet/cavium/Kconfig40
-rw-r--r--drivers/net/ethernet/cavium/Makefile5
-rw-r--r--drivers/net/ethernet/cavium/thunder/Makefile11
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h422
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_main.c932
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic_reg.h213
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c600
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c1331
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c1545
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h381
-rw-r--r--drivers/net/ethernet/cavium/thunder/q_struct.h701
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c966
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h220
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h101
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c238
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c292
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c174
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c82
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c870
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h121
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_values.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h56
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c49
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h18
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c36
-rw-r--r--drivers/net/ethernet/dec/tulip/de4x5.c10
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c2
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c6
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c16
-rw-r--r--drivers/net/ethernet/ibm/emac/core.h7
-rw-r--r--drivers/net/ethernet/intel/e100.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/defines.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h8
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c29
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c15
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.h11
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/manage.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/manage.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c173
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.h2
-rw-r--r--drivers/net/ethernet/intel/e1000e/ptp.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/regs.h4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c30
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_fcoe.c11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c39
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c144
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c7
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c158
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h1
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c34
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c56
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c376
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c112
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Makefile3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c96
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c36
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cq.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h520
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c679
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c858
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c1899
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c249
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tx.c344
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c107
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/flow_table.c422
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c90
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c337
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mcg.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h19
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c162
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.c169
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/transobj.h47
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/uar.c44
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c84
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.h41
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.c183
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.h171
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c2
-rw-r--r--drivers/net/ethernet/neterion/s2io.c6
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c77
-rw-r--r--drivers/net/ethernet/rocker/rocker.c611
-rw-r--r--drivers/net/ethernet/sfc/Kconfig9
-rw-r--r--drivers/net/ethernet/sfc/ef10.c539
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.c57
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.h5
-rw-r--r--drivers/net/ethernet/sfc/efx.c57
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c100
-rw-r--r--drivers/net/ethernet/sfc/mcdi.h6
-rw-r--r--drivers/net/ethernet/sfc/mcdi_pcol.h30
-rw-r--r--drivers/net/ethernet/sfc/mcdi_port.c12
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h2
-rw-r--r--drivers/net/ethernet/sfc/nic.h117
-rw-r--r--drivers/net/ethernet/sfc/rx.c42
-rw-r--r--drivers/net/ethernet/sfc/sriov.c11
-rw-r--r--drivers/net/ethernet/sfc/sriov.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig14
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c365
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h6
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c106
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c18
-rw-r--r--drivers/net/ethernet/ti/cpsw.c9
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c45
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.h2
-rw-r--r--drivers/net/geneve.c32
-rw-r--r--drivers/net/hyperv/hyperv_net.h1
-rw-r--r--drivers/net/hyperv/netvsc.c11
-rw-r--r--drivers/net/hyperv/netvsc_drv.c4
-rw-r--r--drivers/net/hyperv/rndis_filter.c16
-rw-r--r--drivers/net/ieee802154/Kconfig10
-rw-r--r--drivers/net/ieee802154/Makefile1
-rw-r--r--drivers/net/ieee802154/at86rf230.c376
-rw-r--r--drivers/net/ieee802154/at86rf230.h220
-rw-r--r--drivers/net/ieee802154/atusb.c699
-rw-r--r--drivers/net/ieee802154/atusb.h84
-rw-r--r--drivers/net/ieee802154/cc2520.c2
-rw-r--r--drivers/net/ieee802154/fakelb.c209
-rw-r--r--drivers/net/ieee802154/mrf24j40.c2
-rw-r--r--drivers/net/irda/irda-usb.c4
-rw-r--r--drivers/net/phy/Kconfig6
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/bcm7xxx.c2
-rw-r--r--drivers/net/phy/dp83640.c23
-rw-r--r--drivers/net/phy/dp83867.c239
-rw-r--r--drivers/net/phy/icplus.c5
-rw-r--r--drivers/net/phy/marvell.c10
-rw-r--r--drivers/net/phy/phy.c3
-rw-r--r--drivers/net/vxlan.c5
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c4
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c4
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c12
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.c2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h6
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c4
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c23
-rw-r--r--drivers/net/wireless/libertas/cfg.c13
-rw-r--r--drivers/net/wireless/libertas/cfg.h3
-rw-r--r--drivers/net/wireless/libertas/cmd.h3
-rw-r--r--drivers/net/wireless/libertas/cmdresp.c13
-rw-r--r--drivers/net/wireless/mwifiex/join.c2
-rw-r--r--drivers/net/wireless/mwifiex/sta_event.c2
-rw-r--r--drivers/net/wireless/rndis_wlan.c2
-rw-r--r--drivers/net/xen-netback/netback.c4
-rw-r--r--drivers/net/xen-netback/xenbus.c34
-rw-r--r--drivers/net/xen-netfront.c20
193 files changed, 19577 insertions, 2505 deletions
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index eadcb05..9a83085 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -34,6 +34,7 @@ source "drivers/net/ethernet/adi/Kconfig"
source "drivers/net/ethernet/broadcom/Kconfig"
source "drivers/net/ethernet/brocade/Kconfig"
source "drivers/net/ethernet/calxeda/Kconfig"
+source "drivers/net/ethernet/cavium/Kconfig"
source "drivers/net/ethernet/chelsio/Kconfig"
source "drivers/net/ethernet/cirrus/Kconfig"
source "drivers/net/ethernet/cisco/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 1367afc..4395d99 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -20,6 +20,7 @@ obj-$(CONFIG_NET_BFIN) += adi/
obj-$(CONFIG_NET_VENDOR_BROADCOM) += broadcom/
obj-$(CONFIG_NET_VENDOR_BROCADE) += brocade/
obj-$(CONFIG_NET_CALXEDA_XGMAC) += calxeda/
+obj-$(CONFIG_NET_VENDOR_CAVIUM) += cavium/
obj-$(CONFIG_NET_VENDOR_CHELSIO) += chelsio/
obj-$(CONFIG_NET_VENDOR_CIRRUS) += cirrus/
obj-$(CONFIG_NET_VENDOR_CISCO) += cisco/
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 084a50a..909ad7a 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -524,67 +524,70 @@ static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
dma_unmap_addr_set(cb, dma_addr, 0);
}
-static int bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
- struct bcm_sysport_cb *cb)
+static struct sk_buff *bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
+ struct bcm_sysport_cb *cb)
{
struct device *kdev = &priv->pdev->dev;
struct net_device *ndev = priv->netdev;
+ struct sk_buff *skb, *rx_skb;
dma_addr_t mapping;
- int ret;
- cb->skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH);
- if (!cb->skb) {
+ /* Allocate a new SKB for a new packet */
+ skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH);
+ if (!skb) {
+ priv->mib.alloc_rx_buff_failed++;
netif_err(priv, rx_err, ndev, "SKB alloc failed\n");
- return -ENOMEM;
+ return NULL;
}
- mapping = dma_map_single(kdev, cb->skb->data,
+ mapping = dma_map_single(kdev, skb->data,
RX_BUF_LENGTH, DMA_FROM_DEVICE);
- ret = dma_mapping_error(kdev, mapping);
- if (ret) {
+ if (dma_mapping_error(kdev, mapping)) {
priv->mib.rx_dma_failed++;
- bcm_sysport_free_cb(cb);
+ dev_kfree_skb_any(skb);
netif_err(priv, rx_err, ndev, "DMA mapping failure\n");
- return ret;
+ return NULL;
}
- dma_unmap_addr_set(cb, dma_addr, mapping);
- dma_desc_set_addr(priv, priv->rx_bd_assign_ptr, mapping);
+ /* Grab the current SKB on the ring */
+ rx_skb = cb->skb;
+ if (likely(rx_skb))
+ dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
+ RX_BUF_LENGTH, DMA_FROM_DEVICE);
- priv->rx_bd_assign_index++;
- priv->rx_bd_assign_index &= (priv->num_rx_bds - 1);
- priv->rx_bd_assign_ptr = priv->rx_bds +
- (priv->rx_bd_assign_index * DESC_SIZE);
+ /* Put the new SKB on the ring */
+ cb->skb = skb;
+ dma_unmap_addr_set(cb, dma_addr, mapping);
+ dma_desc_set_addr(priv, cb->bd_addr, mapping);
netif_dbg(priv, rx_status, ndev, "RX refill\n");
- return 0;
+ /* Return the current SKB to the caller */
+ return rx_skb;
}
static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv)
{
struct bcm_sysport_cb *cb;
- int ret = 0;
+ struct sk_buff *skb;
unsigned int i;
for (i = 0; i < priv->num_rx_bds; i++) {
- cb = &priv->rx_cbs[priv->rx_bd_assign_index];
- if (cb->skb)
- continue;
-
- ret = bcm_sysport_rx_refill(priv, cb);
- if (ret)
- break;
+ cb = &priv->rx_cbs[i];
+ skb = bcm_sysport_rx_refill(priv, cb);
+ if (skb)
+ dev_kfree_skb(skb);
+ if (!cb->skb)
+ return -ENOMEM;
}
- return ret;
+ return 0;
}
/* Poll the hardware for up to budget packets to process */
static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
unsigned int budget)
{
- struct device *kdev = &priv->pdev->dev;
struct net_device *ndev = priv->netdev;
unsigned int processed = 0, to_process;
struct bcm_sysport_cb *cb;
@@ -592,7 +595,6 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
unsigned int p_index;
u16 len, status;
struct bcm_rsb *rsb;
- int ret;
/* Determine how much we should process since last call */
p_index = rdma_readl(priv, RDMA_PROD_INDEX);
@@ -610,13 +612,8 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
while ((processed < to_process) && (processed < budget)) {
cb = &priv->rx_cbs[priv->rx_read_ptr];
- skb = cb->skb;
+ skb = bcm_sysport_rx_refill(priv, cb);
- processed++;
- priv->rx_read_ptr++;
-
- if (priv->rx_read_ptr == priv->num_rx_bds)
- priv->rx_read_ptr = 0;
/* We do not have a backing SKB, so we do not a corresponding
* DMA mapping for this incoming packet since
@@ -627,12 +624,9 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
netif_err(priv, rx_err, ndev, "out of memory!\n");
ndev->stats.rx_dropped++;
ndev->stats.rx_errors++;
- goto refill;
+ goto next;
}
- dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
- RX_BUF_LENGTH, DMA_FROM_DEVICE);
-
/* Extract the Receive Status Block prepended */
rsb = (struct bcm_rsb *)skb->data;
len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK;
@@ -644,12 +638,20 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
p_index, priv->rx_c_index, priv->rx_read_ptr,
len, status);
+ if (unlikely(len > RX_BUF_LENGTH)) {
+ netif_err(priv, rx_status, ndev, "oversized packet\n");
+ ndev->stats.rx_length_errors++;
+ ndev->stats.rx_errors++;
+ dev_kfree_skb_any(skb);
+ goto next;
+ }
+
if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) {
netif_err(priv, rx_status, ndev, "fragmented packet!\n");
ndev->stats.rx_dropped++;
ndev->stats.rx_errors++;
- bcm_sysport_free_cb(cb);
- goto refill;
+ dev_kfree_skb_any(skb);
+ goto next;
}
if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) {
@@ -658,8 +660,8 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
ndev->stats.rx_over_errors++;
ndev->stats.rx_dropped++;
ndev->stats.rx_errors++;
- bcm_sysport_free_cb(cb);
- goto refill;
+ dev_kfree_skb_any(skb);
+ goto next;
}
skb_put(skb, len);
@@ -686,10 +688,12 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
ndev->stats.rx_bytes += len;
napi_gro_receive(&priv->napi, skb);
-refill:
- ret = bcm_sysport_rx_refill(priv, cb);
- if (ret)
- priv->mib.alloc_rx_buff_failed++;
+next:
+ processed++;
+ priv->rx_read_ptr++;
+
+ if (priv->rx_read_ptr == priv->num_rx_bds)
+ priv->rx_read_ptr = 0;
}
return processed;
@@ -1330,14 +1334,14 @@ static inline int tdma_enable_set(struct bcm_sysport_priv *priv,
static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
{
+ struct bcm_sysport_cb *cb;
u32 reg;
int ret;
+ int i;
/* Initialize SW view of the RX ring */
priv->num_rx_bds = NUM_RX_DESC;
priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET;
- priv->rx_bd_assign_ptr = priv->rx_bds;
- priv->rx_bd_assign_index = 0;
priv->rx_c_index = 0;
priv->rx_read_ptr = 0;
priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct bcm_sysport_cb),
@@ -1347,6 +1351,11 @@ static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
return -ENOMEM;
}
+ for (i = 0; i < priv->num_rx_bds; i++) {
+ cb = priv->rx_cbs + i;
+ cb->bd_addr = priv->rx_bds + i * DESC_SIZE;
+ }
+
ret = bcm_sysport_alloc_rx_bufs(priv);
if (ret) {
netif_err(priv, hw, priv->netdev, "SKB allocation failed\n");
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index 42a4b4a..f28bf54 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -663,8 +663,6 @@ struct bcm_sysport_priv {
/* Receive queue */
void __iomem *rx_bds;
- void __iomem *rx_bd_assign_ptr;
- unsigned int rx_bd_assign_index;
struct bcm_sysport_cb *rx_cbs;
unsigned int num_rx_bds;
unsigned int rx_read_ptr;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index a3b0f7a..7a4aaa3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -357,6 +357,7 @@ struct sw_tx_bd {
struct sw_rx_page {
struct page *page;
DEFINE_DMA_UNMAP_ADDR(mapping);
+ unsigned int offset;
};
union db_prod {
@@ -381,9 +382,10 @@ union db_prod {
#define PAGES_PER_SGE_SHIFT 0
#define PAGES_PER_SGE (1 << PAGES_PER_SGE_SHIFT)
-#define SGE_PAGE_SIZE PAGE_SIZE
-#define SGE_PAGE_SHIFT PAGE_SHIFT
-#define SGE_PAGE_ALIGN(addr) PAGE_ALIGN((typeof(PAGE_SIZE))(addr))
+#define SGE_PAGE_SHIFT 12
+#define SGE_PAGE_SIZE (1 << SGE_PAGE_SHIFT)
+#define SGE_PAGE_MASK (~(SGE_PAGE_SIZE - 1))
+#define SGE_PAGE_ALIGN(addr) (((addr) + SGE_PAGE_SIZE - 1) & SGE_PAGE_MASK)
#define SGE_PAGES (SGE_PAGE_SIZE * PAGES_PER_SGE)
#define TPA_AGG_SIZE min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) * \
SGE_PAGES), 0xffff)
@@ -526,6 +528,12 @@ enum bnx2x_tpa_mode_t {
TPA_MODE_GRO
};
+struct bnx2x_alloc_pool {
+ struct page *page;
+ dma_addr_t dma;
+ unsigned int offset;
+};
+
struct bnx2x_fastpath {
struct bnx2x *bp; /* parent */
@@ -599,6 +607,8 @@ struct bnx2x_fastpath {
4 (for the digits and to make it DWORD aligned) */
#define FP_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8)
char name[FP_NAME_SIZE];
+
+ struct bnx2x_alloc_pool page_pool;
};
#define bnx2x_fp(bp, nr, var) ((bp)->fp[(nr)].var)
@@ -1774,7 +1784,7 @@ struct bnx2x {
int stats_state;
/* used for synchronization of concurrent threads statistics handling */
- struct mutex stats_lock;
+ struct semaphore stats_lock;
/* used by dmae command loader */
struct dmae_command stats_dmae;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 2ef202d..e2a6533 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -544,30 +544,49 @@ static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
u16 index, gfp_t gfp_mask)
{
- struct page *page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
+ struct bnx2x_alloc_pool *pool = &fp->page_pool;
dma_addr_t mapping;
- if (unlikely(page == NULL)) {
- BNX2X_ERR("Can't alloc sge\n");
- return -ENOMEM;
- }
+ if (!pool->page || (PAGE_SIZE - pool->offset) < SGE_PAGE_SIZE) {
- mapping = dma_map_page(&bp->pdev->dev, page, 0,
- SGE_PAGES, DMA_FROM_DEVICE);
- if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
- __free_pages(page, PAGES_PER_SGE_SHIFT);
- BNX2X_ERR("Can't map sge\n");
- return -ENOMEM;
+ /* put page reference used by the memory pool, since we
+ * won't be using this page as the mempool anymore.
+ */
+ if (pool->page)
+ put_page(pool->page);
+
+ pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
+ if (unlikely(!pool->page)) {
+ BNX2X_ERR("Can't alloc sge\n");
+ return -ENOMEM;
+ }
+
+ pool->dma = dma_map_page(&bp->pdev->dev, pool->page, 0,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ if (unlikely(dma_mapping_error(&bp->pdev->dev,
+ pool->dma))) {
+ __free_pages(pool->page, PAGES_PER_SGE_SHIFT);
+ pool->page = NULL;
+ BNX2X_ERR("Can't map sge\n");
+ return -ENOMEM;
+ }
+ pool->offset = 0;
}
- sw_buf->page = page;
+ get_page(pool->page);
+ sw_buf->page = pool->page;
+ sw_buf->offset = pool->offset;
+
+ mapping = pool->dma + sw_buf->offset;
dma_unmap_addr_set(sw_buf, mapping, mapping);
sge->addr_hi = cpu_to_le32(U64_HI(mapping));
sge->addr_lo = cpu_to_le32(U64_LO(mapping));
+ pool->offset += SGE_PAGE_SIZE;
+
return 0;
}
@@ -629,20 +648,22 @@ static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
return err;
}
- /* Unmap the page as we're going to pass it to the stack */
- dma_unmap_page(&bp->pdev->dev,
- dma_unmap_addr(&old_rx_pg, mapping),
- SGE_PAGES, DMA_FROM_DEVICE);
+ dma_unmap_single(&bp->pdev->dev,
+ dma_unmap_addr(&old_rx_pg, mapping),
+ SGE_PAGE_SIZE, DMA_FROM_DEVICE);
/* Add one frag and update the appropriate fields in the skb */
if (fp->mode == TPA_MODE_LRO)
- skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
+ skb_fill_page_desc(skb, j, old_rx_pg.page,
+ old_rx_pg.offset, frag_len);
else { /* GRO */
int rem;
int offset = 0;
for (rem = frag_len; rem > 0; rem -= gro_size) {
int len = rem > gro_size ? gro_size : rem;
skb_fill_page_desc(skb, frag_id++,
- old_rx_pg.page, offset, len);
+ old_rx_pg.page,
+ old_rx_pg.offset + offset,
+ len);
if (offset)
get_page(old_rx_pg.page);
offset += len;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index d7a7175..2b30081 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -804,9 +804,13 @@ static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
if (!page)
return;
- dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
- SGE_PAGES, DMA_FROM_DEVICE);
- __free_pages(page, PAGES_PER_SGE_SHIFT);
+ /* Since many fragments can share the same page, make sure to
+ * only unmap and free the page once.
+ */
+ dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
+ SGE_PAGE_SIZE, DMA_FROM_DEVICE);
+
+ put_page(page);
sw_buf->page = NULL;
sge->addr_hi = 0;
@@ -964,6 +968,25 @@ static inline void bnx2x_set_fw_mac_addr(__le16 *fw_hi, __le16 *fw_mid,
((u8 *)fw_lo)[1] = mac[4];
}
+static inline void bnx2x_free_rx_mem_pool(struct bnx2x *bp,
+ struct bnx2x_alloc_pool *pool)
+{
+ if (!pool->page)
+ return;
+
+ /* Page was not fully fragmented. Unmap unused space */
+ if (pool->offset < PAGE_SIZE) {
+ dma_addr_t dma = pool->dma + pool->offset;
+ int size = PAGE_SIZE - pool->offset;
+
+ dma_unmap_single(&bp->pdev->dev, dma, size, DMA_FROM_DEVICE);
+ }
+
+ put_page(pool->page);
+
+ pool->page = NULL;
+}
+
static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
struct bnx2x_fastpath *fp, int last)
{
@@ -974,6 +997,8 @@ static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
for (i = 0; i < last; i++)
bnx2x_free_rx_sge(bp, fp, i);
+
+ bnx2x_free_rx_mem_pool(bp, &fp->page_pool);
}
static inline void bnx2x_set_next_page_rx_bd(struct bnx2x_fastpath *fp)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index fd52ce9..33501bc 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -12054,7 +12054,7 @@ static int bnx2x_init_bp(struct bnx2x *bp)
mutex_init(&bp->port.phy_mutex);
mutex_init(&bp->fw_mb_mutex);
mutex_init(&bp->drv_info_mutex);
- mutex_init(&bp->stats_lock);
+ sema_init(&bp->stats_lock, 1);
bp->drv_info_mng_owner = false;
INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
@@ -13690,9 +13690,10 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
cancel_delayed_work_sync(&bp->sp_task);
cancel_delayed_work_sync(&bp->period_task);
- mutex_lock(&bp->stats_lock);
- bp->stats_state = STATS_STATE_DISABLED;
- mutex_unlock(&bp->stats_lock);
+ if (!down_timeout(&bp->stats_lock, HZ / 10)) {
+ bp->stats_state = STATS_STATE_DISABLED;
+ up(&bp->stats_lock);
+ }
bnx2x_save_statistics(bp);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 266b055..69d699f0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -1372,19 +1372,23 @@ void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
* that context in case someone is in the middle of a transition.
* For other events, wait a bit until lock is taken.
*/
- if (!mutex_trylock(&bp->stats_lock)) {
+ if (down_trylock(&bp->stats_lock)) {
if (event == STATS_EVENT_UPDATE)
return;
DP(BNX2X_MSG_STATS,
"Unlikely stats' lock contention [event %d]\n", event);
- mutex_lock(&bp->stats_lock);
+ if (unlikely(down_timeout(&bp->stats_lock, HZ / 10))) {
+ BNX2X_ERR("Failed to take stats lock [event %d]\n",
+ event);
+ return;
+ }
}
bnx2x_stats_stm[state][event].action(bp);
bp->stats_state = bnx2x_stats_stm[state][event].next_state;
- mutex_unlock(&bp->stats_lock);
+ up(&bp->stats_lock);
if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
@@ -1970,7 +1974,11 @@ int bnx2x_stats_safe_exec(struct bnx2x *bp,
/* Wait for statistics to end [while blocking further requests],
* then run supplied function 'safely'.
*/
- mutex_lock(&bp->stats_lock);
+ rc = down_timeout(&bp->stats_lock, HZ / 10);
+ if (unlikely(rc)) {
+ BNX2X_ERR("Failed to take statistics lock for safe execution\n");
+ goto out_no_lock;
+ }
bnx2x_stats_comp(bp);
while (bp->stats_pending && cnt--)
@@ -1988,7 +1996,7 @@ out:
/* No need to restart statistics - if they're enabled, the timer
* will restart the statistics.
*/
- mutex_unlock(&bp->stats_lock);
-
+ up(&bp->stats_lock);
+out_no_lock:
return rc;
}
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 594a2ab..68f3c13 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -2414,7 +2414,7 @@ bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type,
if (status == BFA_STATUS_OK)
bfa_ioc_lpu_start(ioc);
else
- bfa_nw_iocpf_timeout(ioc);
+ bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
return status;
}
@@ -3029,7 +3029,7 @@ bfa_ioc_poll_fwinit(struct bfa_ioc *ioc)
}
if (ioc->iocpf.poll_time >= BFA_IOC_TOV) {
- bfa_nw_iocpf_timeout(ioc);
+ bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
} else {
ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
mod_timer(&ioc->iocpf_timer, jiffies +
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 37072a8..caae6cb 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -3701,10 +3701,6 @@ bnad_pci_probe(struct pci_dev *pdev,
setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
((unsigned long)bnad));
- /* Now start the timer before calling IOC */
- mod_timer(&bnad->bna.ioceth.ioc.iocpf_timer,
- jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ));
-
/*
* Start the chip
* If the call back comes with error, we bail out.
diff --git a/drivers/net/ethernet/brocade/bna/cna_fwimg.c b/drivers/net/ethernet/brocade/bna/cna_fwimg.c
index ebf462d..badea36 100644
--- a/drivers/net/ethernet/brocade/bna/cna_fwimg.c
+++ b/drivers/net/ethernet/brocade/bna/cna_fwimg.c
@@ -30,6 +30,7 @@ cna_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
u32 *bfi_image_size, char *fw_name)
{
const struct firmware *fw;
+ u32 n;
if (request_firmware(&fw, fw_name, &pdev->dev)) {
pr_alert("Can't locate firmware %s\n", fw_name);
@@ -40,6 +41,12 @@ cna_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
*bfi_image_size = fw->size/sizeof(u32);
bfi_fw = fw;
+ /* Convert loaded firmware to host order as it is stored in file
+ * as sequence of LE32 integers.
+ */
+ for (n = 0; n < *bfi_image_size; n++)
+ le32_to_cpus(*bfi_image + n);
+
return *bfi_image;
error:
return NULL;
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
new file mode 100644
index 0000000..fc3d8e3
--- /dev/null
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -0,0 +1,40 @@
+#
+# Cavium ethernet device configuration
+#
+
+config NET_VENDOR_CAVIUM
+ tristate "Cavium ethernet drivers"
+ depends on PCI && 64BIT
+ ---help---
+ Enable support for the Cavium ThunderX Network Interface
+ Controller (NIC). The NIC provides the controller and DMA
+ engines to move network traffic to/from the memory. The NIC
+ works closely with TNS, BGX and SerDes to implement the
+ functions replacing and virtualizing those of a typical
+ standalone PCIe NIC chip.
+
+ If you have a Cavium Thunder board, say Y.
+
+if NET_VENDOR_CAVIUM
+
+config THUNDER_NIC_PF
+ tristate "Thunder Physical function driver"
+ default NET_VENDOR_CAVIUM
+ select THUNDER_NIC_BGX
+ ---help---
+ This driver supports Thunder's NIC physical function.
+
+config THUNDER_NIC_VF
+ tristate "Thunder Virtual function driver"
+ default NET_VENDOR_CAVIUM
+ ---help---
+ This driver supports Thunder's NIC virtual function
+
+config THUNDER_NIC_BGX
+ tristate "Thunder MAC interface driver (BGX)"
+ default NET_VENDOR_CAVIUM
+ ---help---
+ This driver supports programming and controlling of MAC
+ interface from NIC physical function driver.
+
+endif # NET_VENDOR_CAVIUM
diff --git a/drivers/net/ethernet/cavium/Makefile b/drivers/net/ethernet/cavium/Makefile
new file mode 100644
index 0000000..7aac478
--- /dev/null
+++ b/drivers/net/ethernet/cavium/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the Cavium ethernet device drivers.
+#
+
+obj-$(CONFIG_NET_VENDOR_CAVIUM) += thunder/
diff --git a/drivers/net/ethernet/cavium/thunder/Makefile b/drivers/net/ethernet/cavium/thunder/Makefile
new file mode 100644
index 0000000..5c4615c
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/Makefile
@@ -0,0 +1,11 @@
+#
+# Makefile for Cavium's Thunder ethernet device
+#
+
+obj-$(CONFIG_THUNDER_NIC_BGX) += thunder_bgx.o
+obj-$(CONFIG_THUNDER_NIC_PF) += nicpf.o
+obj-$(CONFIG_THUNDER_NIC_VF) += nicvf.o
+
+nicpf-y := nic_main.o
+nicvf-y := nicvf_main.o nicvf_queues.o
+nicvf-y += nicvf_ethtool.o
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
new file mode 100644
index 0000000..a3b43e5
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -0,0 +1,422 @@
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef NIC_H
+#define NIC_H
+
+#include <linux/netdevice.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include "thunder_bgx.h"
+
+/* PCI device IDs */
+#define PCI_DEVICE_ID_THUNDER_NIC_PF 0xA01E
+#define PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF 0x0011
+#define PCI_DEVICE_ID_THUNDER_NIC_VF 0xA034
+#define PCI_DEVICE_ID_THUNDER_BGX 0xA026
+
+/* PCI BAR nos */
+#define PCI_CFG_REG_BAR_NUM 0
+#define PCI_MSIX_REG_BAR_NUM 4
+
+/* NIC SRIOV VF count */
+#define MAX_NUM_VFS_SUPPORTED 128
+#define DEFAULT_NUM_VF_ENABLED 8
+
+#define NIC_TNS_BYPASS_MODE 0
+#define NIC_TNS_MODE 1
+
+/* NIC priv flags */
+#define NIC_SRIOV_ENABLED BIT(0)
+
+/* Min/Max packet size */
+#define NIC_HW_MIN_FRS 64
+#define NIC_HW_MAX_FRS 9200 /* 9216 max packet including FCS */
+
+/* Max pkinds */
+#define NIC_MAX_PKIND 16
+
+/* Rx Channels */
+/* Receive channel configuration in TNS bypass mode
+ * Below is configuration in TNS bypass mode
+ * BGX0-LMAC0-CHAN0 - VNIC CHAN0
+ * BGX0-LMAC1-CHAN0 - VNIC CHAN16
+ * ...
+ * BGX1-LMAC0-CHAN0 - VNIC CHAN128
+ * ...
+ * BGX1-LMAC3-CHAN0 - VNIC CHAN174
+ */
+#define NIC_INTF_COUNT 2 /* Interfaces btw VNIC and TNS/BGX */
+#define NIC_CHANS_PER_INF 128
+#define NIC_MAX_CHANS (NIC_INTF_COUNT * NIC_CHANS_PER_INF)
+#define NIC_CPI_COUNT 2048 /* No of channel parse indices */
+
+/* TNS bypass mode: 1-1 mapping between VNIC and BGX:LMAC */
+#define NIC_MAX_BGX MAX_BGX_PER_CN88XX
+#define NIC_CPI_PER_BGX (NIC_CPI_COUNT / NIC_MAX_BGX)
+#define NIC_MAX_CPI_PER_LMAC 64 /* Max when CPI_ALG is IP diffserv */
+#define NIC_RSSI_PER_BGX (NIC_RSSI_COUNT / NIC_MAX_BGX)
+
+/* Tx scheduling */
+#define NIC_MAX_TL4 1024
+#define NIC_MAX_TL4_SHAPERS 256 /* 1 shaper for 4 TL4s */
+#define NIC_MAX_TL3 256
+#define NIC_MAX_TL3_SHAPERS 64 /* 1 shaper for 4 TL3s */
+#define NIC_MAX_TL2 64
+#define NIC_MAX_TL2_SHAPERS 2 /* 1 shaper for 32 TL2s */
+#define NIC_MAX_TL1 2
+
+/* TNS bypass mode */
+#define NIC_TL2_PER_BGX 32
+#define NIC_TL4_PER_BGX (NIC_MAX_TL4 / NIC_MAX_BGX)
+#define NIC_TL4_PER_LMAC (NIC_MAX_TL4 / NIC_CHANS_PER_INF)
+
+/* NIC VF Interrupts */
+#define NICVF_INTR_CQ 0
+#define NICVF_INTR_SQ 1
+#define NICVF_INTR_RBDR 2
+#define NICVF_INTR_PKT_DROP 3
+#define NICVF_INTR_TCP_TIMER 4
+#define NICVF_INTR_MBOX 5
+#define NICVF_INTR_QS_ERR 6
+
+#define NICVF_INTR_CQ_SHIFT 0
+#define NICVF_INTR_SQ_SHIFT 8
+#define NICVF_INTR_RBDR_SHIFT 16
+#define NICVF_INTR_PKT_DROP_SHIFT 20
+#define NICVF_INTR_TCP_TIMER_SHIFT 21
+#define NICVF_INTR_MBOX_SHIFT 22
+#define NICVF_INTR_QS_ERR_SHIFT 23
+
+#define NICVF_INTR_CQ_MASK (0xFF << NICVF_INTR_CQ_SHIFT)
+#define NICVF_INTR_SQ_MASK (0xFF << NICVF_INTR_SQ_SHIFT)
+#define NICVF_INTR_RBDR_MASK (0x03 << NICVF_INTR_RBDR_SHIFT)
+#define NICVF_INTR_PKT_DROP_MASK BIT(NICVF_INTR_PKT_DROP_SHIFT)
+#define NICVF_INTR_TCP_TIMER_MASK BIT(NICVF_INTR_TCP_TIMER_SHIFT)
+#define NICVF_INTR_MBOX_MASK BIT(NICVF_INTR_MBOX_SHIFT)
+#define NICVF_INTR_QS_ERR_MASK BIT(NICVF_INTR_QS_ERR_SHIFT)
+
+/* MSI-X interrupts */
+#define NIC_PF_MSIX_VECTORS 10
+#define NIC_VF_MSIX_VECTORS 20
+
+#define NIC_PF_INTR_ID_ECC0_SBE 0
+#define NIC_PF_INTR_ID_ECC0_DBE 1
+#define NIC_PF_INTR_ID_ECC1_SBE 2
+#define NIC_PF_INTR_ID_ECC1_DBE 3
+#define NIC_PF_INTR_ID_ECC2_SBE 4
+#define NIC_PF_INTR_ID_ECC2_DBE 5
+#define NIC_PF_INTR_ID_ECC3_SBE 6
+#define NIC_PF_INTR_ID_ECC3_DBE 7
+#define NIC_PF_INTR_ID_MBOX0 8
+#define NIC_PF_INTR_ID_MBOX1 9
+
+/* Global timer for CQ timer thresh interrupts
+ * Calculated for SCLK of 700Mhz
+ * value written should be a 1/16th of what is expected
+ *
+ * 1 tick per 0.05usec = value of 2.2
+ * This 10% would be covered in CQ timer thresh value
+ */
+#define NICPF_CLK_PER_INT_TICK 2
+
+struct nicvf_cq_poll {
+ u8 cq_idx; /* Completion queue index */
+ struct napi_struct napi;
+};
+
+#define NIC_RSSI_COUNT 4096 /* Total no of RSS indices */
+#define NIC_MAX_RSS_HASH_BITS 8
+#define NIC_MAX_RSS_IDR_TBL_SIZE (1 << NIC_MAX_RSS_HASH_BITS)
+#define RSS_HASH_KEY_SIZE 5 /* 320 bit key */
+
+struct nicvf_rss_info {
+ bool enable;
+#define RSS_L2_EXTENDED_HASH_ENA BIT(0)
+#define RSS_IP_HASH_ENA BIT(1)
+#define RSS_TCP_HASH_ENA BIT(2)
+#define RSS_TCP_SYN_DIS BIT(3)
+#define RSS_UDP_HASH_ENA BIT(4)
+#define RSS_L4_EXTENDED_HASH_ENA BIT(5)
+#define RSS_ROCE_ENA BIT(6)
+#define RSS_L3_BI_DIRECTION_ENA BIT(7)
+#define RSS_L4_BI_DIRECTION_ENA BIT(8)
+ u64 cfg;
+ u8 hash_bits;
+ u16 rss_size;
+ u8 ind_tbl[NIC_MAX_RSS_IDR_TBL_SIZE];
+ u64 key[RSS_HASH_KEY_SIZE];
+} ____cacheline_aligned_in_smp;
+
+enum rx_stats_reg_offset {
+ RX_OCTS = 0x0,
+ RX_UCAST = 0x1,
+ RX_BCAST = 0x2,
+ RX_MCAST = 0x3,
+ RX_RED = 0x4,
+ RX_RED_OCTS = 0x5,
+ RX_ORUN = 0x6,
+ RX_ORUN_OCTS = 0x7,
+ RX_FCS = 0x8,
+ RX_L2ERR = 0x9,
+ RX_DRP_BCAST = 0xa,
+ RX_DRP_MCAST = 0xb,
+ RX_DRP_L3BCAST = 0xc,
+ RX_DRP_L3MCAST = 0xd,
+ RX_STATS_ENUM_LAST,
+};
+
+enum tx_stats_reg_offset {
+ TX_OCTS = 0x0,
+ TX_UCAST = 0x1,
+ TX_BCAST = 0x2,
+ TX_MCAST = 0x3,
+ TX_DROP = 0x4,
+ TX_STATS_ENUM_LAST,
+};
+
+struct nicvf_hw_stats {
+ u64 rx_bytes_ok;
+ u64 rx_ucast_frames_ok;
+ u64 rx_bcast_frames_ok;
+ u64 rx_mcast_frames_ok;
+ u64 rx_fcs_errors;
+ u64 rx_l2_errors;
+ u64 rx_drop_red;
+ u64 rx_drop_red_bytes;
+ u64 rx_drop_overrun;
+ u64 rx_drop_overrun_bytes;
+ u64 rx_drop_bcast;
+ u64 rx_drop_mcast;
+ u64 rx_drop_l3_bcast;
+ u64 rx_drop_l3_mcast;
+ u64 tx_bytes_ok;
+ u64 tx_ucast_frames_ok;
+ u64 tx_bcast_frames_ok;
+ u64 tx_mcast_frames_ok;
+ u64 tx_drops;
+};
+
+struct nicvf_drv_stats {
+ /* Rx */
+ u64 rx_frames_ok;
+ u64 rx_frames_64;
+ u64 rx_frames_127;
+ u64 rx_frames_255;
+ u64 rx_frames_511;
+ u64 rx_frames_1023;
+ u64 rx_frames_1518;
+ u64 rx_frames_jumbo;
+ u64 rx_drops;
+ /* Tx */
+ u64 tx_frames_ok;
+ u64 tx_drops;
+ u64 tx_busy;
+ u64 tx_tso;
+};
+
+struct nicvf {
+ struct net_device *netdev;
+ struct pci_dev *pdev;
+ u8 vf_id;
+ u8 node;
+ u8 tns_mode;
+ u16 mtu;
+ struct queue_set *qs;
+ void __iomem *reg_base;
+ bool link_up;
+ u8 duplex;
+ u32 speed;
+ struct page *rb_page;
+ u32 rb_page_offset;
+ bool rb_alloc_fail;
+ bool rb_work_scheduled;
+ struct delayed_work rbdr_work;
+ struct tasklet_struct rbdr_task;
+ struct tasklet_struct qs_err_task;
+ struct tasklet_struct cq_task;
+ struct nicvf_cq_poll *napi[8];
+ struct nicvf_rss_info rss_info;
+ u8 cpi_alg;
+ /* Interrupt coalescing settings */
+ u32 cq_coalesce_usecs;
+
+ u32 msg_enable;
+ struct nicvf_hw_stats stats;
+ struct nicvf_drv_stats drv_stats;
+ struct bgx_stats bgx_stats;
+ struct work_struct reset_task;
+
+ /* MSI-X */
+ bool msix_enabled;
+ u8 num_vec;
+ struct msix_entry msix_entries[NIC_VF_MSIX_VECTORS];
+ char irq_name[NIC_VF_MSIX_VECTORS][20];
+ bool irq_allocated[NIC_VF_MSIX_VECTORS];
+
+ bool pf_ready_to_rcv_msg;
+ bool pf_acked;
+ bool pf_nacked;
+ bool bgx_stats_acked;
+} ____cacheline_aligned_in_smp;
+
+/* PF <--> VF Mailbox communication
+ * Eight 64bit registers are shared between PF and VF.
+ * Separate set for each VF.
+ * Writing '1' into last register mbx7 means end of message.
+ */
+
+/* PF <--> VF mailbox communication */
+#define NIC_PF_VF_MAILBOX_SIZE 2
+#define NIC_MBOX_MSG_TIMEOUT 2000 /* ms */
+
+/* Mailbox message types */
+#define NIC_MBOX_MSG_READY 0x01 /* Is PF ready to rcv msgs */
+#define NIC_MBOX_MSG_ACK 0x02 /* ACK the message received */
+#define NIC_MBOX_MSG_NACK 0x03 /* NACK the message received */
+#define NIC_MBOX_MSG_QS_CFG 0x04 /* Configure Qset */
+#define NIC_MBOX_MSG_RQ_CFG 0x05 /* Configure receive queue */
+#define NIC_MBOX_MSG_SQ_CFG 0x06 /* Configure Send queue */
+#define NIC_MBOX_MSG_RQ_DROP_CFG 0x07 /* Configure receive queue */
+#define NIC_MBOX_MSG_SET_MAC 0x08 /* Add MAC ID to DMAC filter */
+#define NIC_MBOX_MSG_SET_MAX_FRS 0x09 /* Set max frame size */
+#define NIC_MBOX_MSG_CPI_CFG 0x0A /* Config CPI, RSSI */
+#define NIC_MBOX_MSG_RSS_SIZE 0x0B /* Get RSS indir_tbl size */
+#define NIC_MBOX_MSG_RSS_CFG 0x0C /* Config RSS table */
+#define NIC_MBOX_MSG_RSS_CFG_CONT 0x0D /* RSS config continuation */
+#define NIC_MBOX_MSG_RQ_BP_CFG 0x0E /* RQ backpressure config */
+#define NIC_MBOX_MSG_RQ_SW_SYNC 0x0F /* Flush inflight pkts to RQ */
+#define NIC_MBOX_MSG_BGX_STATS 0x10 /* Get stats from BGX */
+#define NIC_MBOX_MSG_BGX_LINK_CHANGE 0x11 /* BGX:LMAC link status */
+#define NIC_MBOX_MSG_CFG_DONE 0x12 /* VF configuration done */
+#define NIC_MBOX_MSG_SHUTDOWN 0x13 /* VF is being shutdown */
+
+struct nic_cfg_msg {
+ u8 msg;
+ u8 vf_id;
+ u8 tns_mode;
+ u8 node_id;
+ u8 mac_addr[ETH_ALEN];
+};
+
+/* Qset configuration */
+struct qs_cfg_msg {
+ u8 msg;
+ u8 num;
+ u64 cfg;
+};
+
+/* Receive queue configuration */
+struct rq_cfg_msg {
+ u8 msg;
+ u8 qs_num;
+ u8 rq_num;
+ u64 cfg;
+};
+
+/* Send queue configuration */
+struct sq_cfg_msg {
+ u8 msg;
+ u8 qs_num;
+ u8 sq_num;
+ u64 cfg;
+};
+
+/* Set VF's MAC address */
+struct set_mac_msg {
+ u8 msg;
+ u8 vf_id;
+ u8 mac_addr[ETH_ALEN];
+};
+
+/* Set Maximum frame size */
+struct set_frs_msg {
+ u8 msg;
+ u8 vf_id;
+ u16 max_frs;
+};
+
+/* Set CPI algorithm type */
+struct cpi_cfg_msg {
+ u8 msg;
+ u8 vf_id;
+ u8 rq_cnt;
+ u8 cpi_alg;
+};
+
+/* Get RSS table size */
+struct rss_sz_msg {
+ u8 msg;
+ u8 vf_id;
+ u16 ind_tbl_size;
+};
+
+/* Set RSS configuration */
+struct rss_cfg_msg {
+ u8 msg;
+ u8 vf_id;
+ u8 hash_bits;
+ u8 tbl_len;
+ u8 tbl_offset;
+#define RSS_IND_TBL_LEN_PER_MBX_MSG 8
+ u8 ind_tbl[RSS_IND_TBL_LEN_PER_MBX_MSG];
+};
+
+struct bgx_stats_msg {
+ u8 msg;
+ u8 vf_id;
+ u8 rx;
+ u8 idx;
+ u64 stats;
+};
+
+/* Physical interface link status */
+struct bgx_link_status {
+ u8 msg;
+ u8 link_up;
+ u8 duplex;
+ u32 speed;
+};
+
+/* 128 bit shared memory between PF and each VF */
+union nic_mbx {
+ struct { u8 msg; } msg;
+ struct nic_cfg_msg nic_cfg;
+ struct qs_cfg_msg qs;
+ struct rq_cfg_msg rq;
+ struct sq_cfg_msg sq;
+ struct set_mac_msg mac;
+ struct set_frs_msg frs;
+ struct cpi_cfg_msg cpi_cfg;
+ struct rss_sz_msg rss_size;
+ struct rss_cfg_msg rss_cfg;
+ struct bgx_stats_msg bgx_stats;
+ struct bgx_link_status link_status;
+};
+
+#define NIC_NODE_ID_MASK 0x03
+#define NIC_NODE_ID_SHIFT 44
+
+static inline int nic_get_node_id(struct pci_dev *pdev)
+{
+ u64 addr = pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM);
+ return ((addr >> NIC_NODE_ID_SHIFT) & NIC_NODE_ID_MASK);
+}
+
+int nicvf_set_real_num_queues(struct net_device *netdev,
+ int tx_queues, int rx_queues);
+int nicvf_open(struct net_device *netdev);
+int nicvf_stop(struct net_device *netdev);
+int nicvf_send_msg_to_pf(struct nicvf *vf, union nic_mbx *mbx);
+void nicvf_config_rss(struct nicvf *nic);
+void nicvf_set_rss_key(struct nicvf *nic);
+void nicvf_set_ethtool_ops(struct net_device *netdev);
+void nicvf_update_stats(struct nicvf *nic);
+void nicvf_update_lmac_stats(struct nicvf *nic);
+
+#endif /* NIC_H */
diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c
new file mode 100644
index 0000000..6e0c031
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/nic_main.c
@@ -0,0 +1,932 @@
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/etherdevice.h>
+#include <linux/of.h>
+
+#include "nic_reg.h"
+#include "nic.h"
+#include "q_struct.h"
+#include "thunder_bgx.h"
+
+#define DRV_NAME "thunder-nic"
+#define DRV_VERSION "1.0"
+
+struct nicpf {
+ struct pci_dev *pdev;
+ u8 rev_id;
+ u8 node;
+ unsigned int flags;
+ u8 num_vf_en; /* No of VF enabled */
+ bool vf_enabled[MAX_NUM_VFS_SUPPORTED];
+ void __iomem *reg_base; /* Register start address */
+ struct pkind_cfg pkind;
+#define NIC_SET_VF_LMAC_MAP(bgx, lmac) (((bgx & 0xF) << 4) | (lmac & 0xF))
+#define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF)
+#define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) (map & 0xF)
+ u8 vf_lmac_map[MAX_LMAC];
+ struct delayed_work dwork;
+ struct workqueue_struct *check_link;
+ u8 link[MAX_LMAC];
+ u8 duplex[MAX_LMAC];
+ u32 speed[MAX_LMAC];
+ u16 cpi_base[MAX_NUM_VFS_SUPPORTED];
+ u16 rss_ind_tbl_size;
+ bool mbx_lock[MAX_NUM_VFS_SUPPORTED];
+
+ /* MSI-X */
+ bool msix_enabled;
+ u8 num_vec;
+ struct msix_entry msix_entries[NIC_PF_MSIX_VECTORS];
+ bool irq_allocated[NIC_PF_MSIX_VECTORS];
+};
+
+/* Supported devices */
+static const struct pci_device_id nic_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_NIC_PF) },
+ { 0, } /* end of table */
+};
+
+MODULE_AUTHOR("Sunil Goutham");
+MODULE_DESCRIPTION("Cavium Thunder NIC Physical Function Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, nic_id_table);
+
+/* The Cavium ThunderX network controller can *only* be found in SoCs
+ * containing the ThunderX ARM64 CPU implementation. All accesses to the device
+ * registers on this platform are implicitly strongly ordered with respect
+ * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use
+ * with no memory barriers in this driver. The readq()/writeq() functions add
+ * explicit ordering operation which in this case are redundant, and only
+ * add overhead.
+ */
+
+/* Register read/write APIs */
+static void nic_reg_write(struct nicpf *nic, u64 offset, u64 val)
+{
+ writeq_relaxed(val, nic->reg_base + offset);
+}
+
+static u64 nic_reg_read(struct nicpf *nic, u64 offset)
+{
+ return readq_relaxed(nic->reg_base + offset);
+}
+
+/* PF -> VF mailbox communication APIs */
+static void nic_enable_mbx_intr(struct nicpf *nic)
+{
+ /* Enable mailbox interrupt for all 128 VFs */
+ nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S, ~0ull);
+ nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S + sizeof(u64), ~0ull);
+}
+
+static void nic_clear_mbx_intr(struct nicpf *nic, int vf, int mbx_reg)
+{
+ nic_reg_write(nic, NIC_PF_MAILBOX_INT + (mbx_reg << 3), BIT_ULL(vf));
+}
+
+static u64 nic_get_mbx_addr(int vf)
+{
+ return NIC_PF_VF_0_127_MAILBOX_0_1 + (vf << NIC_VF_NUM_SHIFT);
+}
+
+/* Send a mailbox message to VF
+ * @vf: vf to which this message to be sent
+ * @mbx: Message to be sent
+ */
+static void nic_send_msg_to_vf(struct nicpf *nic, int vf, union nic_mbx *mbx)
+{
+ void __iomem *mbx_addr = nic->reg_base + nic_get_mbx_addr(vf);
+ u64 *msg = (u64 *)mbx;
+
+ /* In first revision HW, mbox interrupt is triggerred
+ * when PF writes to MBOX(1), in next revisions when
+ * PF writes to MBOX(0)
+ */
+ if (nic->rev_id == 0) {
+ /* see the comment for nic_reg_write()/nic_reg_read()
+ * functions above
+ */
+ writeq_relaxed(msg[0], mbx_addr);
+ writeq_relaxed(msg[1], mbx_addr + 8);
+ } else {
+ writeq_relaxed(msg[1], mbx_addr + 8);
+ writeq_relaxed(msg[0], mbx_addr);
+ }
+}
+
+/* Responds to VF's READY message with VF's
+ * ID, node, MAC address e.t.c
+ * @vf: VF which sent READY message
+ */
+static void nic_mbx_send_ready(struct nicpf *nic, int vf)
+{
+ union nic_mbx mbx = {};
+ int bgx_idx, lmac;
+ const char *mac;
+
+ mbx.nic_cfg.msg = NIC_MBOX_MSG_READY;
+ mbx.nic_cfg.vf_id = vf;
+
+ mbx.nic_cfg.tns_mode = NIC_TNS_BYPASS_MODE;
+
+ bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+
+ mac = bgx_get_lmac_mac(nic->node, bgx_idx, lmac);
+ if (mac)
+ ether_addr_copy((u8 *)&mbx.nic_cfg.mac_addr, mac);
+
+ mbx.nic_cfg.node_id = nic->node;
+ nic_send_msg_to_vf(nic, vf, &mbx);
+}
+
+/* ACKs VF's mailbox message
+ * @vf: VF to which ACK to be sent
+ */
+static void nic_mbx_send_ack(struct nicpf *nic, int vf)
+{
+ union nic_mbx mbx = {};
+
+ mbx.msg.msg = NIC_MBOX_MSG_ACK;
+ nic_send_msg_to_vf(nic, vf, &mbx);
+}
+
+/* NACKs VF's mailbox message that PF is not able to
+ * complete the action
+ * @vf: VF to which ACK to be sent
+ */
+static void nic_mbx_send_nack(struct nicpf *nic, int vf)
+{
+ union nic_mbx mbx = {};
+
+ mbx.msg.msg = NIC_MBOX_MSG_NACK;
+ nic_send_msg_to_vf(nic, vf, &mbx);
+}
+
+/* Flush all in flight receive packets to memory and
+ * bring down an active RQ
+ */
+static int nic_rcv_queue_sw_sync(struct nicpf *nic)
+{
+ u16 timeout = ~0x00;
+
+ nic_reg_write(nic, NIC_PF_SW_SYNC_RX, 0x01);
+ /* Wait till sync cycle is finished */
+ while (timeout) {
+ if (nic_reg_read(nic, NIC_PF_SW_SYNC_RX_DONE) & 0x1)
+ break;
+ timeout--;
+ }
+ nic_reg_write(nic, NIC_PF_SW_SYNC_RX, 0x00);
+ if (!timeout) {
+ dev_err(&nic->pdev->dev, "Receive queue software sync failed");
+ return 1;
+ }
+ return 0;
+}
+
+/* Get BGX Rx/Tx stats and respond to VF's request */
+static void nic_get_bgx_stats(struct nicpf *nic, struct bgx_stats_msg *bgx)
+{
+ int bgx_idx, lmac;
+ union nic_mbx mbx = {};
+
+ bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[bgx->vf_id]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[bgx->vf_id]);
+
+ mbx.bgx_stats.msg = NIC_MBOX_MSG_BGX_STATS;
+ mbx.bgx_stats.vf_id = bgx->vf_id;
+ mbx.bgx_stats.rx = bgx->rx;
+ mbx.bgx_stats.idx = bgx->idx;
+ if (bgx->rx)
+ mbx.bgx_stats.stats = bgx_get_rx_stats(nic->node, bgx_idx,
+ lmac, bgx->idx);
+ else
+ mbx.bgx_stats.stats = bgx_get_tx_stats(nic->node, bgx_idx,
+ lmac, bgx->idx);
+ nic_send_msg_to_vf(nic, bgx->vf_id, &mbx);
+}
+
+/* Update hardware min/max frame size */
+static int nic_update_hw_frs(struct nicpf *nic, int new_frs, int vf)
+{
+ if ((new_frs > NIC_HW_MAX_FRS) || (new_frs < NIC_HW_MIN_FRS)) {
+ dev_err(&nic->pdev->dev,
+ "Invalid MTU setting from VF%d rejected, should be between %d and %d\n",
+ vf, NIC_HW_MIN_FRS, NIC_HW_MAX_FRS);
+ return 1;
+ }
+ new_frs += ETH_HLEN;
+ if (new_frs <= nic->pkind.maxlen)
+ return 0;
+
+ nic->pkind.maxlen = new_frs;
+ nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG, *(u64 *)&nic->pkind);
+ return 0;
+}
+
+/* Set minimum transmit packet size */
+static void nic_set_tx_pkt_pad(struct nicpf *nic, int size)
+{
+ int lmac;
+ u64 lmac_cfg;
+
+ /* Max value that can be set is 60 */
+ if (size > 60)
+ size = 60;
+
+ for (lmac = 0; lmac < (MAX_BGX_PER_CN88XX * MAX_LMAC_PER_BGX); lmac++) {
+ lmac_cfg = nic_reg_read(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3));
+ lmac_cfg &= ~(0xF << 2);
+ lmac_cfg |= ((size / 4) << 2);
+ nic_reg_write(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3), lmac_cfg);
+ }
+}
+
+/* Function to check number of LMACs present and set VF::LMAC mapping.
+ * Mapping will be used while initializing channels.
+ */
+static void nic_set_lmac_vf_mapping(struct nicpf *nic)
+{
+ unsigned bgx_map = bgx_get_map(nic->node);
+ int bgx, next_bgx_lmac = 0;
+ int lmac, lmac_cnt = 0;
+ u64 lmac_credit;
+
+ nic->num_vf_en = 0;
+
+ for (bgx = 0; bgx < NIC_MAX_BGX; bgx++) {
+ if (!(bgx_map & (1 << bgx)))
+ continue;
+ lmac_cnt = bgx_get_lmac_count(nic->node, bgx);
+ for (lmac = 0; lmac < lmac_cnt; lmac++)
+ nic->vf_lmac_map[next_bgx_lmac++] =
+ NIC_SET_VF_LMAC_MAP(bgx, lmac);
+ nic->num_vf_en += lmac_cnt;
+
+ /* Program LMAC credits */
+ lmac_credit = (1ull << 1); /* channel credit enable */
+ lmac_credit |= (0x1ff << 2); /* Max outstanding pkt count */
+ /* 48KB BGX Tx buffer size, each unit is of size 16bytes */
+ lmac_credit |= (((((48 * 1024) / lmac_cnt) -
+ NIC_HW_MAX_FRS) / 16) << 12);
+ lmac = bgx * MAX_LMAC_PER_BGX;
+ for (; lmac < lmac_cnt + (bgx * MAX_LMAC_PER_BGX); lmac++)
+ nic_reg_write(nic,
+ NIC_PF_LMAC_0_7_CREDIT + (lmac * 8),
+ lmac_credit);
+ }
+}
+
+#define BGX0_BLOCK 8
+#define BGX1_BLOCK 9
+
+static void nic_init_hw(struct nicpf *nic)
+{
+ int i;
+
+ /* Reset NIC, in case the driver is repeatedly inserted and removed */
+ nic_reg_write(nic, NIC_PF_SOFT_RESET, 1);
+
+ /* Enable NIC HW block */
+ nic_reg_write(nic, NIC_PF_CFG, 0x3);
+
+ /* Enable backpressure */
+ nic_reg_write(nic, NIC_PF_BP_CFG, (1ULL << 6) | 0x03);
+
+ /* Disable TNS mode on both interfaces */
+ nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG,
+ (NIC_TNS_BYPASS_MODE << 7) | BGX0_BLOCK);
+ nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8),
+ (NIC_TNS_BYPASS_MODE << 7) | BGX1_BLOCK);
+ nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG,
+ (1ULL << 63) | BGX0_BLOCK);
+ nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG + (1 << 8),
+ (1ULL << 63) | BGX1_BLOCK);
+
+ /* PKIND configuration */
+ nic->pkind.minlen = 0;
+ nic->pkind.maxlen = NIC_HW_MAX_FRS + ETH_HLEN;
+ nic->pkind.lenerr_en = 1;
+ nic->pkind.rx_hdr = 0;
+ nic->pkind.hdr_sl = 0;
+
+ for (i = 0; i < NIC_MAX_PKIND; i++)
+ nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG | (i << 3),
+ *(u64 *)&nic->pkind);
+
+ nic_set_tx_pkt_pad(nic, NIC_HW_MIN_FRS);
+
+ /* Timer config */
+ nic_reg_write(nic, NIC_PF_INTR_TIMER_CFG, NICPF_CLK_PER_INT_TICK);
+}
+
+/* Channel parse index configuration */
+static void nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg)
+{
+ u32 vnic, bgx, lmac, chan;
+ u32 padd, cpi_count = 0;
+ u64 cpi_base, cpi, rssi_base, rssi;
+ u8 qset, rq_idx = 0;
+
+ vnic = cfg->vf_id;
+ bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
+
+ chan = (lmac * MAX_BGX_CHANS_PER_LMAC) + (bgx * NIC_CHANS_PER_INF);
+ cpi_base = (lmac * NIC_MAX_CPI_PER_LMAC) + (bgx * NIC_CPI_PER_BGX);
+ rssi_base = (lmac * nic->rss_ind_tbl_size) + (bgx * NIC_RSSI_PER_BGX);
+
+ /* Rx channel configuration */
+ nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_BP_CFG | (chan << 3),
+ (1ull << 63) | (vnic << 0));
+ nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_CFG | (chan << 3),
+ ((u64)cfg->cpi_alg << 62) | (cpi_base << 48));
+
+ if (cfg->cpi_alg == CPI_ALG_NONE)
+ cpi_count = 1;
+ else if (cfg->cpi_alg == CPI_ALG_VLAN) /* 3 bits of PCP */
+ cpi_count = 8;
+ else if (cfg->cpi_alg == CPI_ALG_VLAN16) /* 3 bits PCP + DEI */
+ cpi_count = 16;
+ else if (cfg->cpi_alg == CPI_ALG_DIFF) /* 6bits DSCP */
+ cpi_count = NIC_MAX_CPI_PER_LMAC;
+
+ /* RSS Qset, Qidx mapping */
+ qset = cfg->vf_id;
+ rssi = rssi_base;
+ for (; rssi < (rssi_base + cfg->rq_cnt); rssi++) {
+ nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3),
+ (qset << 3) | rq_idx);
+ rq_idx++;
+ }
+
+ rssi = 0;
+ cpi = cpi_base;
+ for (; cpi < (cpi_base + cpi_count); cpi++) {
+ /* Determine port to channel adder */
+ if (cfg->cpi_alg != CPI_ALG_DIFF)
+ padd = cpi % cpi_count;
+ else
+ padd = cpi % 8; /* 3 bits CS out of 6bits DSCP */
+
+ /* Leave RSS_SIZE as '0' to disable RSS */
+ nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3),
+ (vnic << 24) | (padd << 16) | (rssi_base + rssi));
+
+ if ((rssi + 1) >= cfg->rq_cnt)
+ continue;
+
+ if (cfg->cpi_alg == CPI_ALG_VLAN)
+ rssi++;
+ else if (cfg->cpi_alg == CPI_ALG_VLAN16)
+ rssi = ((cpi - cpi_base) & 0xe) >> 1;
+ else if (cfg->cpi_alg == CPI_ALG_DIFF)
+ rssi = ((cpi - cpi_base) & 0x38) >> 3;
+ }
+ nic->cpi_base[cfg->vf_id] = cpi_base;
+}
+
+/* Responsds to VF with its RSS indirection table size */
+static void nic_send_rss_size(struct nicpf *nic, int vf)
+{
+ union nic_mbx mbx = {};
+ u64 *msg;
+
+ msg = (u64 *)&mbx;
+
+ mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE;
+ mbx.rss_size.ind_tbl_size = nic->rss_ind_tbl_size;
+ nic_send_msg_to_vf(nic, vf, &mbx);
+}
+
+/* Receive side scaling configuration
+ * configure:
+ * - RSS index
+ * - indir table i.e hash::RQ mapping
+ * - no of hash bits to consider
+ */
+static void nic_config_rss(struct nicpf *nic, struct rss_cfg_msg *cfg)
+{
+ u8 qset, idx = 0;
+ u64 cpi_cfg, cpi_base, rssi_base, rssi;
+
+ cpi_base = nic->cpi_base[cfg->vf_id];
+ cpi_cfg = nic_reg_read(nic, NIC_PF_CPI_0_2047_CFG | (cpi_base << 3));
+ rssi_base = (cpi_cfg & 0x0FFF) + cfg->tbl_offset;
+
+ rssi = rssi_base;
+ qset = cfg->vf_id;
+
+ for (; rssi < (rssi_base + cfg->tbl_len); rssi++) {
+ nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3),
+ (qset << 3) | (cfg->ind_tbl[idx] & 0x7));
+ idx++;
+ }
+
+ cpi_cfg &= ~(0xFULL << 20);
+ cpi_cfg |= (cfg->hash_bits << 20);
+ nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi_base << 3), cpi_cfg);
+}
+
+/* 4 level transmit side scheduler configutation
+ * for TNS bypass mode
+ *
+ * Sample configuration for SQ0
+ * VNIC0-SQ0 -> TL4(0) -> TL3[0] -> TL2[0] -> TL1[0] -> BGX0
+ * VNIC1-SQ0 -> TL4(8) -> TL3[2] -> TL2[0] -> TL1[0] -> BGX0
+ * VNIC2-SQ0 -> TL4(16) -> TL3[4] -> TL2[1] -> TL1[0] -> BGX0
+ * VNIC3-SQ0 -> TL4(24) -> TL3[6] -> TL2[1] -> TL1[0] -> BGX0
+ * VNIC4-SQ0 -> TL4(512) -> TL3[128] -> TL2[32] -> TL1[1] -> BGX1
+ * VNIC5-SQ0 -> TL4(520) -> TL3[130] -> TL2[32] -> TL1[1] -> BGX1
+ * VNIC6-SQ0 -> TL4(528) -> TL3[132] -> TL2[33] -> TL1[1] -> BGX1
+ * VNIC7-SQ0 -> TL4(536) -> TL3[134] -> TL2[33] -> TL1[1] -> BGX1
+ */
+static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic, u8 sq_idx)
+{
+ u32 bgx, lmac, chan;
+ u32 tl2, tl3, tl4;
+ u32 rr_quantum;
+
+ bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
+ /* 24 bytes for FCS, IPG and preamble */
+ rr_quantum = ((NIC_HW_MAX_FRS + 24) / 4);
+
+ tl4 = (lmac * NIC_TL4_PER_LMAC) + (bgx * NIC_TL4_PER_BGX);
+ tl4 += sq_idx;
+ tl3 = tl4 / (NIC_MAX_TL4 / NIC_MAX_TL3);
+ nic_reg_write(nic, NIC_PF_QSET_0_127_SQ_0_7_CFG2 |
+ ((u64)vnic << NIC_QS_ID_SHIFT) |
+ ((u32)sq_idx << NIC_Q_NUM_SHIFT), tl4);
+ nic_reg_write(nic, NIC_PF_TL4_0_1023_CFG | (tl4 << 3),
+ ((u64)vnic << 27) | ((u32)sq_idx << 24) | rr_quantum);
+
+ nic_reg_write(nic, NIC_PF_TL3_0_255_CFG | (tl3 << 3), rr_quantum);
+ chan = (lmac * MAX_BGX_CHANS_PER_LMAC) + (bgx * NIC_CHANS_PER_INF);
+ nic_reg_write(nic, NIC_PF_TL3_0_255_CHAN | (tl3 << 3), chan);
+ /* Enable backpressure on the channel */
+ nic_reg_write(nic, NIC_PF_CHAN_0_255_TX_CFG | (chan << 3), 1);
+
+ tl2 = tl3 >> 2;
+ nic_reg_write(nic, NIC_PF_TL3A_0_63_CFG | (tl2 << 3), tl2);
+ nic_reg_write(nic, NIC_PF_TL2_0_63_CFG | (tl2 << 3), rr_quantum);
+ /* No priorities as of now */
+ nic_reg_write(nic, NIC_PF_TL2_0_63_PRI | (tl2 << 3), 0x00);
+}
+
+/* Interrupt handler to handle mailbox messages from VFs */
+static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
+{
+ union nic_mbx mbx = {};
+ u64 *mbx_data;
+ u64 mbx_addr;
+ u64 reg_addr;
+ int bgx, lmac;
+ int i;
+ int ret = 0;
+
+ nic->mbx_lock[vf] = true;
+
+ mbx_addr = nic_get_mbx_addr(vf);
+ mbx_data = (u64 *)&mbx;
+
+ for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) {
+ *mbx_data = nic_reg_read(nic, mbx_addr);
+ mbx_data++;
+ mbx_addr += sizeof(u64);
+ }
+
+ dev_dbg(&nic->pdev->dev, "%s: Mailbox msg %d from VF%d\n",
+ __func__, mbx.msg.msg, vf);
+ switch (mbx.msg.msg) {
+ case NIC_MBOX_MSG_READY:
+ nic_mbx_send_ready(nic, vf);
+ nic->link[vf] = 0;
+ nic->duplex[vf] = 0;
+ nic->speed[vf] = 0;
+ ret = 1;
+ break;
+ case NIC_MBOX_MSG_QS_CFG:
+ reg_addr = NIC_PF_QSET_0_127_CFG |
+ (mbx.qs.num << NIC_QS_ID_SHIFT);
+ nic_reg_write(nic, reg_addr, mbx.qs.cfg);
+ break;
+ case NIC_MBOX_MSG_RQ_CFG:
+ reg_addr = NIC_PF_QSET_0_127_RQ_0_7_CFG |
+ (mbx.rq.qs_num << NIC_QS_ID_SHIFT) |
+ (mbx.rq.rq_num << NIC_Q_NUM_SHIFT);
+ nic_reg_write(nic, reg_addr, mbx.rq.cfg);
+ break;
+ case NIC_MBOX_MSG_RQ_BP_CFG:
+ reg_addr = NIC_PF_QSET_0_127_RQ_0_7_BP_CFG |
+ (mbx.rq.qs_num << NIC_QS_ID_SHIFT) |
+ (mbx.rq.rq_num << NIC_Q_NUM_SHIFT);
+ nic_reg_write(nic, reg_addr, mbx.rq.cfg);
+ break;
+ case NIC_MBOX_MSG_RQ_SW_SYNC:
+ ret = nic_rcv_queue_sw_sync(nic);
+ break;
+ case NIC_MBOX_MSG_RQ_DROP_CFG:
+ reg_addr = NIC_PF_QSET_0_127_RQ_0_7_DROP_CFG |
+ (mbx.rq.qs_num << NIC_QS_ID_SHIFT) |
+ (mbx.rq.rq_num << NIC_Q_NUM_SHIFT);
+ nic_reg_write(nic, reg_addr, mbx.rq.cfg);
+ break;
+ case NIC_MBOX_MSG_SQ_CFG:
+ reg_addr = NIC_PF_QSET_0_127_SQ_0_7_CFG |
+ (mbx.sq.qs_num << NIC_QS_ID_SHIFT) |
+ (mbx.sq.sq_num << NIC_Q_NUM_SHIFT);
+ nic_reg_write(nic, reg_addr, mbx.sq.cfg);
+ nic_tx_channel_cfg(nic, mbx.qs.num, mbx.sq.sq_num);
+ break;
+ case NIC_MBOX_MSG_SET_MAC:
+ lmac = mbx.mac.vf_id;
+ bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]);
+ bgx_set_lmac_mac(nic->node, bgx, lmac, mbx.mac.mac_addr);
+ break;
+ case NIC_MBOX_MSG_SET_MAX_FRS:
+ ret = nic_update_hw_frs(nic, mbx.frs.max_frs,
+ mbx.frs.vf_id);
+ break;
+ case NIC_MBOX_MSG_CPI_CFG:
+ nic_config_cpi(nic, &mbx.cpi_cfg);
+ break;
+ case NIC_MBOX_MSG_RSS_SIZE:
+ nic_send_rss_size(nic, vf);
+ goto unlock;
+ case NIC_MBOX_MSG_RSS_CFG:
+ case NIC_MBOX_MSG_RSS_CFG_CONT:
+ nic_config_rss(nic, &mbx.rss_cfg);
+ break;
+ case NIC_MBOX_MSG_CFG_DONE:
+ /* Last message of VF config msg sequence */
+ nic->vf_enabled[vf] = true;
+ goto unlock;
+ case NIC_MBOX_MSG_SHUTDOWN:
+ /* First msg in VF teardown sequence */
+ nic->vf_enabled[vf] = false;
+ break;
+ case NIC_MBOX_MSG_BGX_STATS:
+ nic_get_bgx_stats(nic, &mbx.bgx_stats);
+ goto unlock;
+ default:
+ dev_err(&nic->pdev->dev,
+ "Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg);
+ break;
+ }
+
+ if (!ret)
+ nic_mbx_send_ack(nic, vf);
+ else if (mbx.msg.msg != NIC_MBOX_MSG_READY)
+ nic_mbx_send_nack(nic, vf);
+unlock:
+ nic->mbx_lock[vf] = false;
+}
+
+static void nic_mbx_intr_handler (struct nicpf *nic, int mbx)
+{
+ u64 intr;
+ u8 vf, vf_per_mbx_reg = 64;
+
+ intr = nic_reg_read(nic, NIC_PF_MAILBOX_INT + (mbx << 3));
+ dev_dbg(&nic->pdev->dev, "PF interrupt Mbox%d 0x%llx\n", mbx, intr);
+ for (vf = 0; vf < vf_per_mbx_reg; vf++) {
+ if (intr & (1ULL << vf)) {
+ dev_dbg(&nic->pdev->dev, "Intr from VF %d\n",
+ vf + (mbx * vf_per_mbx_reg));
+ if ((vf + (mbx * vf_per_mbx_reg)) > nic->num_vf_en)
+ break;
+ nic_handle_mbx_intr(nic, vf + (mbx * vf_per_mbx_reg));
+ nic_clear_mbx_intr(nic, vf, mbx);
+ }
+ }
+}
+
+static irqreturn_t nic_mbx0_intr_handler (int irq, void *nic_irq)
+{
+ struct nicpf *nic = (struct nicpf *)nic_irq;
+
+ nic_mbx_intr_handler(nic, 0);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t nic_mbx1_intr_handler (int irq, void *nic_irq)
+{
+ struct nicpf *nic = (struct nicpf *)nic_irq;
+
+ nic_mbx_intr_handler(nic, 1);
+
+ return IRQ_HANDLED;
+}
+
+static int nic_enable_msix(struct nicpf *nic)
+{
+ int i, ret;
+
+ nic->num_vec = NIC_PF_MSIX_VECTORS;
+
+ for (i = 0; i < nic->num_vec; i++)
+ nic->msix_entries[i].entry = i;
+
+ ret = pci_enable_msix(nic->pdev, nic->msix_entries, nic->num_vec);
+ if (ret) {
+ dev_err(&nic->pdev->dev,
+ "Request for #%d msix vectors failed\n",
+ nic->num_vec);
+ return ret;
+ }
+
+ nic->msix_enabled = 1;
+ return 0;
+}
+
+static void nic_disable_msix(struct nicpf *nic)
+{
+ if (nic->msix_enabled) {
+ pci_disable_msix(nic->pdev);
+ nic->msix_enabled = 0;
+ nic->num_vec = 0;
+ }
+}
+
+static void nic_free_all_interrupts(struct nicpf *nic)
+{
+ int irq;
+
+ for (irq = 0; irq < nic->num_vec; irq++) {
+ if (nic->irq_allocated[irq])
+ free_irq(nic->msix_entries[irq].vector, nic);
+ nic->irq_allocated[irq] = false;
+ }
+}
+
+static int nic_register_interrupts(struct nicpf *nic)
+{
+ int ret;
+
+ /* Enable MSI-X */
+ ret = nic_enable_msix(nic);
+ if (ret)
+ return ret;
+
+ /* Register mailbox interrupt handlers */
+ ret = request_irq(nic->msix_entries[NIC_PF_INTR_ID_MBOX0].vector,
+ nic_mbx0_intr_handler, 0, "NIC Mbox0", nic);
+ if (ret)
+ goto fail;
+
+ nic->irq_allocated[NIC_PF_INTR_ID_MBOX0] = true;
+
+ ret = request_irq(nic->msix_entries[NIC_PF_INTR_ID_MBOX1].vector,
+ nic_mbx1_intr_handler, 0, "NIC Mbox1", nic);
+ if (ret)
+ goto fail;
+
+ nic->irq_allocated[NIC_PF_INTR_ID_MBOX1] = true;
+
+ /* Enable mailbox interrupt */
+ nic_enable_mbx_intr(nic);
+ return 0;
+
+fail:
+ dev_err(&nic->pdev->dev, "Request irq failed\n");
+ nic_free_all_interrupts(nic);
+ return ret;
+}
+
+static void nic_unregister_interrupts(struct nicpf *nic)
+{
+ nic_free_all_interrupts(nic);
+ nic_disable_msix(nic);
+}
+
+static int nic_sriov_init(struct pci_dev *pdev, struct nicpf *nic)
+{
+ int pos = 0;
+ int err;
+ u16 total_vf_cnt;
+
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
+ if (!pos) {
+ dev_err(&pdev->dev, "SRIOV capability is not found in PCIe config space\n");
+ return -ENODEV;
+ }
+
+ pci_read_config_word(pdev, (pos + PCI_SRIOV_TOTAL_VF), &total_vf_cnt);
+ if (total_vf_cnt < nic->num_vf_en)
+ nic->num_vf_en = total_vf_cnt;
+
+ if (!total_vf_cnt)
+ return 0;
+
+ err = pci_enable_sriov(pdev, nic->num_vf_en);
+ if (err) {
+ dev_err(&pdev->dev, "SRIOV enable failed, num VF is %d\n",
+ nic->num_vf_en);
+ nic->num_vf_en = 0;
+ return err;
+ }
+
+ dev_info(&pdev->dev, "SRIOV enabled, number of VF available %d\n",
+ nic->num_vf_en);
+
+ nic->flags |= NIC_SRIOV_ENABLED;
+ return 0;
+}
+
+/* Poll for BGX LMAC link status and update corresponding VF
+ * if there is a change, valid only if internal L2 switch
+ * is not present otherwise VF link is always treated as up
+ */
+static void nic_poll_for_link(struct work_struct *work)
+{
+ union nic_mbx mbx = {};
+ struct nicpf *nic;
+ struct bgx_link_status link;
+ u8 vf, bgx, lmac;
+
+ nic = container_of(work, struct nicpf, dwork.work);
+
+ mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE;
+
+ for (vf = 0; vf < nic->num_vf_en; vf++) {
+ /* Poll only if VF is UP */
+ if (!nic->vf_enabled[vf])
+ continue;
+
+ /* Get BGX, LMAC indices for the VF */
+ bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+ lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
+ /* Get interface link status */
+ bgx_get_lmac_link_state(nic->node, bgx, lmac, &link);
+
+ /* Inform VF only if link status changed */
+ if (nic->link[vf] == link.link_up)
+ continue;
+
+ if (!nic->mbx_lock[vf]) {
+ nic->link[vf] = link.link_up;
+ nic->duplex[vf] = link.duplex;
+ nic->speed[vf] = link.speed;
+
+ /* Send a mbox message to VF with current link status */
+ mbx.link_status.link_up = link.link_up;
+ mbx.link_status.duplex = link.duplex;
+ mbx.link_status.speed = link.speed;
+ nic_send_msg_to_vf(nic, vf, &mbx);
+ }
+ }
+ queue_delayed_work(nic->check_link, &nic->dwork, HZ * 2);
+}
+
+static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ struct nicpf *nic;
+ int err;
+
+ BUILD_BUG_ON(sizeof(union nic_mbx) > 16);
+
+ nic = devm_kzalloc(dev, sizeof(*nic), GFP_KERNEL);
+ if (!nic)
+ return -ENOMEM;
+
+ pci_set_drvdata(pdev, nic);
+
+ nic->pdev = pdev;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ pci_set_drvdata(pdev, NULL);
+ return err;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ goto err_disable_device;
+ }
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "Unable to get usable DMA configuration\n");
+ goto err_release_regions;
+ }
+
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "Unable to get 48-bit DMA for consistent allocations\n");
+ goto err_release_regions;
+ }
+
+ /* MAP PF's configuration registers */
+ nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
+ if (!nic->reg_base) {
+ dev_err(dev, "Cannot map config register space, aborting\n");
+ err = -ENOMEM;
+ goto err_release_regions;
+ }
+
+ pci_read_config_byte(pdev, PCI_REVISION_ID, &nic->rev_id);
+
+ nic->node = nic_get_node_id(pdev);
+
+ nic_set_lmac_vf_mapping(nic);
+
+ /* Initialize hardware */
+ nic_init_hw(nic);
+
+ /* Set RSS TBL size for each VF */
+ nic->rss_ind_tbl_size = NIC_MAX_RSS_IDR_TBL_SIZE;
+
+ /* Register interrupts */
+ err = nic_register_interrupts(nic);
+ if (err)
+ goto err_release_regions;
+
+ /* Configure SRIOV */
+ err = nic_sriov_init(pdev, nic);
+ if (err)
+ goto err_unregister_interrupts;
+
+ /* Register a physical link status poll fn() */
+ nic->check_link = alloc_workqueue("check_link_status",
+ WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
+ if (!nic->check_link) {
+ err = -ENOMEM;
+ goto err_disable_sriov;
+ }
+
+ INIT_DELAYED_WORK(&nic->dwork, nic_poll_for_link);
+ queue_delayed_work(nic->check_link, &nic->dwork, 0);
+
+ return 0;
+
+err_disable_sriov:
+ if (nic->flags & NIC_SRIOV_ENABLED)
+ pci_disable_sriov(pdev);
+err_unregister_interrupts:
+ nic_unregister_interrupts(nic);
+err_release_regions:
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ return err;
+}
+
+static void nic_remove(struct pci_dev *pdev)
+{
+ struct nicpf *nic = pci_get_drvdata(pdev);
+
+ if (nic->flags & NIC_SRIOV_ENABLED)
+ pci_disable_sriov(pdev);
+
+ if (nic->check_link) {
+ /* Destroy work Queue */
+ cancel_delayed_work(&nic->dwork);
+ flush_workqueue(nic->check_link);
+ destroy_workqueue(nic->check_link);
+ }
+
+ nic_unregister_interrupts(nic);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+static struct pci_driver nic_driver = {
+ .name = DRV_NAME,
+ .id_table = nic_id_table,
+ .probe = nic_probe,
+ .remove = nic_remove,
+};
+
+static int __init nic_init_module(void)
+{
+ pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
+
+ return pci_register_driver(&nic_driver);
+}
+
+static void __exit nic_cleanup_module(void)
+{
+ pci_unregister_driver(&nic_driver);
+}
+
+module_init(nic_init_module);
+module_exit(nic_cleanup_module);
diff --git a/drivers/net/ethernet/cavium/thunder/nic_reg.h b/drivers/net/ethernet/cavium/thunder/nic_reg.h
new file mode 100644
index 0000000..58197bb
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/nic_reg.h
@@ -0,0 +1,213 @@
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef NIC_REG_H
+#define NIC_REG_H
+
+#define NIC_PF_REG_COUNT 29573
+#define NIC_VF_REG_COUNT 249
+
+/* Physical function register offsets */
+#define NIC_PF_CFG (0x0000)
+#define NIC_PF_STATUS (0x0010)
+#define NIC_PF_INTR_TIMER_CFG (0x0030)
+#define NIC_PF_BIST_STATUS (0x0040)
+#define NIC_PF_SOFT_RESET (0x0050)
+#define NIC_PF_TCP_TIMER (0x0060)
+#define NIC_PF_BP_CFG (0x0080)
+#define NIC_PF_RRM_CFG (0x0088)
+#define NIC_PF_CQM_CF (0x00A0)
+#define NIC_PF_CNM_CF (0x00A8)
+#define NIC_PF_CNM_STATUS (0x00B0)
+#define NIC_PF_CQ_AVG_CFG (0x00C0)
+#define NIC_PF_RRM_AVG_CFG (0x00C8)
+#define NIC_PF_INTF_0_1_SEND_CFG (0x0200)
+#define NIC_PF_INTF_0_1_BP_CFG (0x0208)
+#define NIC_PF_INTF_0_1_BP_DIS_0_1 (0x0210)
+#define NIC_PF_INTF_0_1_BP_SW_0_1 (0x0220)
+#define NIC_PF_RBDR_BP_STATE_0_3 (0x0240)
+#define NIC_PF_MAILBOX_INT (0x0410)
+#define NIC_PF_MAILBOX_INT_W1S (0x0430)
+#define NIC_PF_MAILBOX_ENA_W1C (0x0450)
+#define NIC_PF_MAILBOX_ENA_W1S (0x0470)
+#define NIC_PF_RX_ETYPE_0_7 (0x0500)
+#define NIC_PF_PKIND_0_15_CFG (0x0600)
+#define NIC_PF_ECC0_FLIP0 (0x1000)
+#define NIC_PF_ECC1_FLIP0 (0x1008)
+#define NIC_PF_ECC2_FLIP0 (0x1010)
+#define NIC_PF_ECC3_FLIP0 (0x1018)
+#define NIC_PF_ECC0_FLIP1 (0x1080)
+#define NIC_PF_ECC1_FLIP1 (0x1088)
+#define NIC_PF_ECC2_FLIP1 (0x1090)
+#define NIC_PF_ECC3_FLIP1 (0x1098)
+#define NIC_PF_ECC0_CDIS (0x1100)
+#define NIC_PF_ECC1_CDIS (0x1108)
+#define NIC_PF_ECC2_CDIS (0x1110)
+#define NIC_PF_ECC3_CDIS (0x1118)
+#define NIC_PF_BIST0_STATUS (0x1280)
+#define NIC_PF_BIST1_STATUS (0x1288)
+#define NIC_PF_BIST2_STATUS (0x1290)
+#define NIC_PF_BIST3_STATUS (0x1298)
+#define NIC_PF_ECC0_SBE_INT (0x2000)
+#define NIC_PF_ECC0_SBE_INT_W1S (0x2008)
+#define NIC_PF_ECC0_SBE_ENA_W1C (0x2010)
+#define NIC_PF_ECC0_SBE_ENA_W1S (0x2018)
+#define NIC_PF_ECC0_DBE_INT (0x2100)
+#define NIC_PF_ECC0_DBE_INT_W1S (0x2108)
+#define NIC_PF_ECC0_DBE_ENA_W1C (0x2110)
+#define NIC_PF_ECC0_DBE_ENA_W1S (0x2118)
+#define NIC_PF_ECC1_SBE_INT (0x2200)
+#define NIC_PF_ECC1_SBE_INT_W1S (0x2208)
+#define NIC_PF_ECC1_SBE_ENA_W1C (0x2210)
+#define NIC_PF_ECC1_SBE_ENA_W1S (0x2218)
+#define NIC_PF_ECC1_DBE_INT (0x2300)
+#define NIC_PF_ECC1_DBE_INT_W1S (0x2308)
+#define NIC_PF_ECC1_DBE_ENA_W1C (0x2310)
+#define NIC_PF_ECC1_DBE_ENA_W1S (0x2318)
+#define NIC_PF_ECC2_SBE_INT (0x2400)
+#define NIC_PF_ECC2_SBE_INT_W1S (0x2408)
+#define NIC_PF_ECC2_SBE_ENA_W1C (0x2410)
+#define NIC_PF_ECC2_SBE_ENA_W1S (0x2418)
+#define NIC_PF_ECC2_DBE_INT (0x2500)
+#define NIC_PF_ECC2_DBE_INT_W1S (0x2508)
+#define NIC_PF_ECC2_DBE_ENA_W1C (0x2510)
+#define NIC_PF_ECC2_DBE_ENA_W1S (0x2518)
+#define NIC_PF_ECC3_SBE_INT (0x2600)
+#define NIC_PF_ECC3_SBE_INT_W1S (0x2608)
+#define NIC_PF_ECC3_SBE_ENA_W1C (0x2610)
+#define NIC_PF_ECC3_SBE_ENA_W1S (0x2618)
+#define NIC_PF_ECC3_DBE_INT (0x2700)
+#define NIC_PF_ECC3_DBE_INT_W1S (0x2708)
+#define NIC_PF_ECC3_DBE_ENA_W1C (0x2710)
+#define NIC_PF_ECC3_DBE_ENA_W1S (0x2718)
+#define NIC_PF_CPI_0_2047_CFG (0x200000)
+#define NIC_PF_RSSI_0_4097_RQ (0x220000)
+#define NIC_PF_LMAC_0_7_CFG (0x240000)
+#define NIC_PF_LMAC_0_7_SW_XOFF (0x242000)
+#define NIC_PF_LMAC_0_7_CREDIT (0x244000)
+#define NIC_PF_CHAN_0_255_TX_CFG (0x400000)
+#define NIC_PF_CHAN_0_255_RX_CFG (0x420000)
+#define NIC_PF_CHAN_0_255_SW_XOFF (0x440000)
+#define NIC_PF_CHAN_0_255_CREDIT (0x460000)
+#define NIC_PF_CHAN_0_255_RX_BP_CFG (0x480000)
+#define NIC_PF_SW_SYNC_RX (0x490000)
+#define NIC_PF_SW_SYNC_RX_DONE (0x490008)
+#define NIC_PF_TL2_0_63_CFG (0x500000)
+#define NIC_PF_TL2_0_63_PRI (0x520000)
+#define NIC_PF_TL2_0_63_SH_STATUS (0x580000)
+#define NIC_PF_TL3A_0_63_CFG (0x5F0000)
+#define NIC_PF_TL3_0_255_CFG (0x600000)
+#define NIC_PF_TL3_0_255_CHAN (0x620000)
+#define NIC_PF_TL3_0_255_PIR (0x640000)
+#define NIC_PF_TL3_0_255_SW_XOFF (0x660000)
+#define NIC_PF_TL3_0_255_CNM_RATE (0x680000)
+#define NIC_PF_TL3_0_255_SH_STATUS (0x6A0000)
+#define NIC_PF_TL4A_0_255_CFG (0x6F0000)
+#define NIC_PF_TL4_0_1023_CFG (0x800000)
+#define NIC_PF_TL4_0_1023_SW_XOFF (0x820000)
+#define NIC_PF_TL4_0_1023_SH_STATUS (0x840000)
+#define NIC_PF_TL4A_0_1023_CNM_RATE (0x880000)
+#define NIC_PF_TL4A_0_1023_CNM_STATUS (0x8A0000)
+#define NIC_PF_VF_0_127_MAILBOX_0_1 (0x20002030)
+#define NIC_PF_VNIC_0_127_TX_STAT_0_4 (0x20004000)
+#define NIC_PF_VNIC_0_127_RX_STAT_0_13 (0x20004100)
+#define NIC_PF_QSET_0_127_LOCK_0_15 (0x20006000)
+#define NIC_PF_QSET_0_127_CFG (0x20010000)
+#define NIC_PF_QSET_0_127_RQ_0_7_CFG (0x20010400)
+#define NIC_PF_QSET_0_127_RQ_0_7_DROP_CFG (0x20010420)
+#define NIC_PF_QSET_0_127_RQ_0_7_BP_CFG (0x20010500)
+#define NIC_PF_QSET_0_127_RQ_0_7_STAT_0_1 (0x20010600)
+#define NIC_PF_QSET_0_127_SQ_0_7_CFG (0x20010C00)
+#define NIC_PF_QSET_0_127_SQ_0_7_CFG2 (0x20010C08)
+#define NIC_PF_QSET_0_127_SQ_0_7_STAT_0_1 (0x20010D00)
+
+#define NIC_PF_MSIX_VEC_0_18_ADDR (0x000000)
+#define NIC_PF_MSIX_VEC_0_CTL (0x000008)
+#define NIC_PF_MSIX_PBA_0 (0x0F0000)
+
+/* Virtual function register offsets */
+#define NIC_VNIC_CFG (0x000020)
+#define NIC_VF_PF_MAILBOX_0_1 (0x000130)
+#define NIC_VF_INT (0x000200)
+#define NIC_VF_INT_W1S (0x000220)
+#define NIC_VF_ENA_W1C (0x000240)
+#define NIC_VF_ENA_W1S (0x000260)
+
+#define NIC_VNIC_RSS_CFG (0x0020E0)
+#define NIC_VNIC_RSS_KEY_0_4 (0x002200)
+#define NIC_VNIC_TX_STAT_0_4 (0x004000)
+#define NIC_VNIC_RX_STAT_0_13 (0x004100)
+#define NIC_QSET_RQ_GEN_CFG (0x010010)
+
+#define NIC_QSET_CQ_0_7_CFG (0x010400)
+#define NIC_QSET_CQ_0_7_CFG2 (0x010408)
+#define NIC_QSET_CQ_0_7_THRESH (0x010410)
+#define NIC_QSET_CQ_0_7_BASE (0x010420)
+#define NIC_QSET_CQ_0_7_HEAD (0x010428)
+#define NIC_QSET_CQ_0_7_TAIL (0x010430)
+#define NIC_QSET_CQ_0_7_DOOR (0x010438)
+#define NIC_QSET_CQ_0_7_STATUS (0x010440)
+#define NIC_QSET_CQ_0_7_STATUS2 (0x010448)
+#define NIC_QSET_CQ_0_7_DEBUG (0x010450)
+
+#define NIC_QSET_RQ_0_7_CFG (0x010600)
+#define NIC_QSET_RQ_0_7_STAT_0_1 (0x010700)
+
+#define NIC_QSET_SQ_0_7_CFG (0x010800)
+#define NIC_QSET_SQ_0_7_THRESH (0x010810)
+#define NIC_QSET_SQ_0_7_BASE (0x010820)
+#define NIC_QSET_SQ_0_7_HEAD (0x010828)
+#define NIC_QSET_SQ_0_7_TAIL (0x010830)
+#define NIC_QSET_SQ_0_7_DOOR (0x010838)
+#define NIC_QSET_SQ_0_7_STATUS (0x010840)
+#define NIC_QSET_SQ_0_7_DEBUG (0x010848)
+#define NIC_QSET_SQ_0_7_CNM_CHG (0x010860)
+#define NIC_QSET_SQ_0_7_STAT_0_1 (0x010900)
+
+#define NIC_QSET_RBDR_0_1_CFG (0x010C00)
+#define NIC_QSET_RBDR_0_1_THRESH (0x010C10)
+#define NIC_QSET_RBDR_0_1_BASE (0x010C20)
+#define NIC_QSET_RBDR_0_1_HEAD (0x010C28)
+#define NIC_QSET_RBDR_0_1_TAIL (0x010C30)
+#define NIC_QSET_RBDR_0_1_DOOR (0x010C38)
+#define NIC_QSET_RBDR_0_1_STATUS0 (0x010C40)
+#define NIC_QSET_RBDR_0_1_STATUS1 (0x010C48)
+#define NIC_QSET_RBDR_0_1_PREFETCH_STATUS (0x010C50)
+
+#define NIC_VF_MSIX_VECTOR_0_19_ADDR (0x000000)
+#define NIC_VF_MSIX_VECTOR_0_19_CTL (0x000008)
+#define NIC_VF_MSIX_PBA (0x0F0000)
+
+/* Offsets within registers */
+#define NIC_MSIX_VEC_SHIFT 4
+#define NIC_Q_NUM_SHIFT 18
+#define NIC_QS_ID_SHIFT 21
+#define NIC_VF_NUM_SHIFT 21
+
+/* Port kind configuration register */
+struct pkind_cfg {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_42_63:22;
+ u64 hdr_sl:5; /* Header skip length */
+ u64 rx_hdr:3; /* TNS Receive header present */
+ u64 lenerr_en:1;/* L2 length error check enable */
+ u64 reserved_32_32:1;
+ u64 maxlen:16; /* Max frame size */
+ u64 minlen:16; /* Min frame size */
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 minlen:16;
+ u64 maxlen:16;
+ u64 reserved_32_32:1;
+ u64 lenerr_en:1;
+ u64 rx_hdr:3;
+ u64 hdr_sl:5;
+ u64 reserved_42_63:22;
+#endif
+};
+
+#endif /* NIC_REG_H */
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
new file mode 100644
index 0000000..16bd2d7
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -0,0 +1,600 @@
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+/* ETHTOOL Support for VNIC_VF Device*/
+
+#include <linux/pci.h>
+
+#include "nic_reg.h"
+#include "nic.h"
+#include "nicvf_queues.h"
+#include "q_struct.h"
+#include "thunder_bgx.h"
+
+#define DRV_NAME "thunder-nicvf"
+#define DRV_VERSION "1.0"
+
+struct nicvf_stat {
+ char name[ETH_GSTRING_LEN];
+ unsigned int index;
+};
+
+#define NICVF_HW_STAT(stat) { \
+ .name = #stat, \
+ .index = offsetof(struct nicvf_hw_stats, stat) / sizeof(u64), \
+}
+
+#define NICVF_DRV_STAT(stat) { \
+ .name = #stat, \
+ .index = offsetof(struct nicvf_drv_stats, stat) / sizeof(u64), \
+}
+
+static const struct nicvf_stat nicvf_hw_stats[] = {
+ NICVF_HW_STAT(rx_bytes_ok),
+ NICVF_HW_STAT(rx_ucast_frames_ok),
+ NICVF_HW_STAT(rx_bcast_frames_ok),
+ NICVF_HW_STAT(rx_mcast_frames_ok),
+ NICVF_HW_STAT(rx_fcs_errors),
+ NICVF_HW_STAT(rx_l2_errors),
+ NICVF_HW_STAT(rx_drop_red),
+ NICVF_HW_STAT(rx_drop_red_bytes),
+ NICVF_HW_STAT(rx_drop_overrun),
+ NICVF_HW_STAT(rx_drop_overrun_bytes),
+ NICVF_HW_STAT(rx_drop_bcast),
+ NICVF_HW_STAT(rx_drop_mcast),
+ NICVF_HW_STAT(rx_drop_l3_bcast),
+ NICVF_HW_STAT(rx_drop_l3_mcast),
+ NICVF_HW_STAT(tx_bytes_ok),
+ NICVF_HW_STAT(tx_ucast_frames_ok),
+ NICVF_HW_STAT(tx_bcast_frames_ok),
+ NICVF_HW_STAT(tx_mcast_frames_ok),
+};
+
+static const struct nicvf_stat nicvf_drv_stats[] = {
+ NICVF_DRV_STAT(rx_frames_ok),
+ NICVF_DRV_STAT(rx_frames_64),
+ NICVF_DRV_STAT(rx_frames_127),
+ NICVF_DRV_STAT(rx_frames_255),
+ NICVF_DRV_STAT(rx_frames_511),
+ NICVF_DRV_STAT(rx_frames_1023),
+ NICVF_DRV_STAT(rx_frames_1518),
+ NICVF_DRV_STAT(rx_frames_jumbo),
+ NICVF_DRV_STAT(rx_drops),
+ NICVF_DRV_STAT(tx_frames_ok),
+ NICVF_DRV_STAT(tx_busy),
+ NICVF_DRV_STAT(tx_tso),
+ NICVF_DRV_STAT(tx_drops),
+};
+
+static const struct nicvf_stat nicvf_queue_stats[] = {
+ { "bytes", 0 },
+ { "frames", 1 },
+};
+
+static const unsigned int nicvf_n_hw_stats = ARRAY_SIZE(nicvf_hw_stats);
+static const unsigned int nicvf_n_drv_stats = ARRAY_SIZE(nicvf_drv_stats);
+static const unsigned int nicvf_n_queue_stats = ARRAY_SIZE(nicvf_queue_stats);
+
+static int nicvf_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *cmd)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+
+ cmd->supported = 0;
+ cmd->transceiver = XCVR_EXTERNAL;
+ if (nic->speed <= 1000) {
+ cmd->port = PORT_MII;
+ cmd->autoneg = AUTONEG_ENABLE;
+ } else {
+ cmd->port = PORT_FIBRE;
+ cmd->autoneg = AUTONEG_DISABLE;
+ }
+ cmd->duplex = nic->duplex;
+ ethtool_cmd_speed_set(cmd, nic->speed);
+
+ return 0;
+}
+
+static void nicvf_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *info)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+
+ strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
+ strlcpy(info->version, DRV_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, pci_name(nic->pdev), sizeof(info->bus_info));
+}
+
+static u32 nicvf_get_msglevel(struct net_device *netdev)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+
+ return nic->msg_enable;
+}
+
+static void nicvf_set_msglevel(struct net_device *netdev, u32 lvl)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+
+ nic->msg_enable = lvl;
+}
+
+static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
+{
+ int stats, qidx;
+
+ if (sset != ETH_SS_STATS)
+ return;
+
+ for (stats = 0; stats < nicvf_n_hw_stats; stats++) {
+ memcpy(data, nicvf_hw_stats[stats].name, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+
+ for (stats = 0; stats < nicvf_n_drv_stats; stats++) {
+ memcpy(data, nicvf_drv_stats[stats].name, ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+
+ for (qidx = 0; qidx < MAX_RCV_QUEUES_PER_QS; qidx++) {
+ for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
+ sprintf(data, "rxq%d: %s", qidx,
+ nicvf_queue_stats[stats].name);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+
+ for (qidx = 0; qidx < MAX_SND_QUEUES_PER_QS; qidx++) {
+ for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
+ sprintf(data, "txq%d: %s", qidx,
+ nicvf_queue_stats[stats].name);
+ data += ETH_GSTRING_LEN;
+ }
+ }
+
+ for (stats = 0; stats < BGX_RX_STATS_COUNT; stats++) {
+ sprintf(data, "bgx_rxstat%d: ", stats);
+ data += ETH_GSTRING_LEN;
+ }
+
+ for (stats = 0; stats < BGX_TX_STATS_COUNT; stats++) {
+ sprintf(data, "bgx_txstat%d: ", stats);
+ data += ETH_GSTRING_LEN;
+ }
+}
+
+static int nicvf_get_sset_count(struct net_device *netdev, int sset)
+{
+ if (sset != ETH_SS_STATS)
+ return -EINVAL;
+
+ return nicvf_n_hw_stats + nicvf_n_drv_stats +
+ (nicvf_n_queue_stats *
+ (MAX_RCV_QUEUES_PER_QS + MAX_SND_QUEUES_PER_QS)) +
+ BGX_RX_STATS_COUNT + BGX_TX_STATS_COUNT;
+}
+
+static void nicvf_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+ int stat, qidx;
+
+ nicvf_update_stats(nic);
+
+ /* Update LMAC stats */
+ nicvf_update_lmac_stats(nic);
+
+ for (stat = 0; stat < nicvf_n_hw_stats; stat++)
+ *(data++) = ((u64 *)&nic->stats)
+ [nicvf_hw_stats[stat].index];
+ for (stat = 0; stat < nicvf_n_drv_stats; stat++)
+ *(data++) = ((u64 *)&nic->drv_stats)
+ [nicvf_drv_stats[stat].index];
+
+ for (qidx = 0; qidx < MAX_RCV_QUEUES_PER_QS; qidx++) {
+ for (stat = 0; stat < nicvf_n_queue_stats; stat++)
+ *(data++) = ((u64 *)&nic->qs->rq[qidx].stats)
+ [nicvf_queue_stats[stat].index];
+ }
+
+ for (qidx = 0; qidx < MAX_SND_QUEUES_PER_QS; qidx++) {
+ for (stat = 0; stat < nicvf_n_queue_stats; stat++)
+ *(data++) = ((u64 *)&nic->qs->sq[qidx].stats)
+ [nicvf_queue_stats[stat].index];
+ }
+
+ for (stat = 0; stat < BGX_RX_STATS_COUNT; stat++)
+ *(data++) = nic->bgx_stats.rx_stats[stat];
+ for (stat = 0; stat < BGX_TX_STATS_COUNT; stat++)
+ *(data++) = nic->bgx_stats.tx_stats[stat];
+}
+
+static int nicvf_get_regs_len(struct net_device *dev)
+{
+ return sizeof(u64) * NIC_VF_REG_COUNT;
+}
+
+static void nicvf_get_regs(struct net_device *dev,
+ struct ethtool_regs *regs, void *reg)
+{
+ struct nicvf *nic = netdev_priv(dev);
+ u64 *p = (u64 *)reg;
+ u64 reg_offset;
+ int mbox, key, stat, q;
+ int i = 0;
+
+ regs->version = 0;
+ memset(p, 0, NIC_VF_REG_COUNT);
+
+ p[i++] = nicvf_reg_read(nic, NIC_VNIC_CFG);
+ /* Mailbox registers */
+ for (mbox = 0; mbox < NIC_PF_VF_MAILBOX_SIZE; mbox++)
+ p[i++] = nicvf_reg_read(nic,
+ NIC_VF_PF_MAILBOX_0_1 | (mbox << 3));
+
+ p[i++] = nicvf_reg_read(nic, NIC_VF_INT);
+ p[i++] = nicvf_reg_read(nic, NIC_VF_INT_W1S);
+ p[i++] = nicvf_reg_read(nic, NIC_VF_ENA_W1C);
+ p[i++] = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
+ p[i++] = nicvf_reg_read(nic, NIC_VNIC_RSS_CFG);
+
+ for (key = 0; key < RSS_HASH_KEY_SIZE; key++)
+ p[i++] = nicvf_reg_read(nic, NIC_VNIC_RSS_KEY_0_4 | (key << 3));
+
+ /* Tx/Rx statistics */
+ for (stat = 0; stat < TX_STATS_ENUM_LAST; stat++)
+ p[i++] = nicvf_reg_read(nic,
+ NIC_VNIC_TX_STAT_0_4 | (stat << 3));
+
+ for (i = 0; i < RX_STATS_ENUM_LAST; i++)
+ p[i++] = nicvf_reg_read(nic,
+ NIC_VNIC_RX_STAT_0_13 | (stat << 3));
+
+ p[i++] = nicvf_reg_read(nic, NIC_QSET_RQ_GEN_CFG);
+
+ /* All completion queue's registers */
+ for (q = 0; q < MAX_CMP_QUEUES_PER_QS; q++) {
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_CFG, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_CFG2, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_THRESH, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_BASE, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_TAIL, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_DOOR, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS2, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_DEBUG, q);
+ }
+
+ /* All receive queue's registers */
+ for (q = 0; q < MAX_RCV_QUEUES_PER_QS; q++) {
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RQ_0_7_CFG, q);
+ p[i++] = nicvf_queue_reg_read(nic,
+ NIC_QSET_RQ_0_7_STAT_0_1, q);
+ reg_offset = NIC_QSET_RQ_0_7_STAT_0_1 | (1 << 3);
+ p[i++] = nicvf_queue_reg_read(nic, reg_offset, q);
+ }
+
+ for (q = 0; q < MAX_SND_QUEUES_PER_QS; q++) {
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_THRESH, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_BASE, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DOOR, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DEBUG, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CNM_CHG, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1, q);
+ reg_offset = NIC_QSET_SQ_0_7_STAT_0_1 | (1 << 3);
+ p[i++] = nicvf_queue_reg_read(nic, reg_offset, q);
+ }
+
+ for (q = 0; q < MAX_RCV_BUF_DESC_RINGS_PER_QS; q++) {
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_CFG, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_THRESH, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_BASE, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, q);
+ p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_DOOR, q);
+ p[i++] = nicvf_queue_reg_read(nic,
+ NIC_QSET_RBDR_0_1_STATUS0, q);
+ p[i++] = nicvf_queue_reg_read(nic,
+ NIC_QSET_RBDR_0_1_STATUS1, q);
+ reg_offset = NIC_QSET_RBDR_0_1_PREFETCH_STATUS;
+ p[i++] = nicvf_queue_reg_read(nic, reg_offset, q);
+ }
+}
+
+static int nicvf_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *cmd)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+
+ cmd->rx_coalesce_usecs = nic->cq_coalesce_usecs;
+ return 0;
+}
+
+static void nicvf_get_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+ struct queue_set *qs = nic->qs;
+
+ ring->rx_max_pending = MAX_RCV_BUF_COUNT;
+ ring->rx_pending = qs->rbdr_len;
+ ring->tx_max_pending = MAX_SND_QUEUE_LEN;
+ ring->tx_pending = qs->sq_len;
+}
+
+static int nicvf_get_rss_hash_opts(struct nicvf *nic,
+ struct ethtool_rxnfc *info)
+{
+ info->data = 0;
+
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ case SCTP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ info->data |= RXH_IP_SRC | RXH_IP_DST;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int nicvf_get_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *info, u32 *rules)
+{
+ struct nicvf *nic = netdev_priv(dev);
+ int ret = -EOPNOTSUPP;
+
+ switch (info->cmd) {
+ case ETHTOOL_GRXRINGS:
+ info->data = nic->qs->rq_cnt;
+ ret = 0;
+ break;
+ case ETHTOOL_GRXFH:
+ return nicvf_get_rss_hash_opts(nic, info);
+ default:
+ break;
+ }
+ return ret;
+}
+
+static int nicvf_set_rss_hash_opts(struct nicvf *nic,
+ struct ethtool_rxnfc *info)
+{
+ struct nicvf_rss_info *rss = &nic->rss_info;
+ u64 rss_cfg = nicvf_reg_read(nic, NIC_VNIC_RSS_CFG);
+
+ if (!rss->enable)
+ netdev_err(nic->netdev,
+ "RSS is disabled, hash cannot be set\n");
+
+ netdev_info(nic->netdev, "Set RSS flow type = %d, data = %lld\n",
+ info->flow_type, info->data);
+
+ if (!(info->data & RXH_IP_SRC) || !(info->data & RXH_IP_DST))
+ return -EINVAL;
+
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ switch (info->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ rss_cfg &= ~(1ULL << RSS_HASH_TCP);
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ rss_cfg |= (1ULL << RSS_HASH_TCP);
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ switch (info->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ rss_cfg &= ~(1ULL << RSS_HASH_UDP);
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ rss_cfg |= (1ULL << RSS_HASH_UDP);
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case SCTP_V4_FLOW:
+ case SCTP_V6_FLOW:
+ switch (info->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ rss_cfg &= ~(1ULL << RSS_HASH_L4ETC);
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ rss_cfg |= (1ULL << RSS_HASH_L4ETC);
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ rss_cfg = RSS_HASH_IP;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, rss_cfg);
+ return 0;
+}
+
+static int nicvf_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
+{
+ struct nicvf *nic = netdev_priv(dev);
+
+ switch (info->cmd) {
+ case ETHTOOL_SRXFH:
+ return nicvf_set_rss_hash_opts(nic, info);
+ default:
+ break;
+ }
+ return -EOPNOTSUPP;
+}
+
+static u32 nicvf_get_rxfh_key_size(struct net_device *netdev)
+{
+ return RSS_HASH_KEY_SIZE * sizeof(u64);
+}
+
+static u32 nicvf_get_rxfh_indir_size(struct net_device *dev)
+{
+ struct nicvf *nic = netdev_priv(dev);
+
+ return nic->rss_info.rss_size;
+}
+
+static int nicvf_get_rxfh(struct net_device *dev, u32 *indir, u8 *hkey,
+ u8 *hfunc)
+{
+ struct nicvf *nic = netdev_priv(dev);
+ struct nicvf_rss_info *rss = &nic->rss_info;
+ int idx;
+
+ if (indir) {
+ for (idx = 0; idx < rss->rss_size; idx++)
+ indir[idx] = rss->ind_tbl[idx];
+ }
+
+ if (hkey)
+ memcpy(hkey, rss->key, RSS_HASH_KEY_SIZE * sizeof(u64));
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP;
+
+ return 0;
+}
+
+static int nicvf_set_rxfh(struct net_device *dev, const u32 *indir,
+ const u8 *hkey, u8 hfunc)
+{
+ struct nicvf *nic = netdev_priv(dev);
+ struct nicvf_rss_info *rss = &nic->rss_info;
+ int idx;
+
+ if ((nic->qs->rq_cnt <= 1) || (nic->cpi_alg != CPI_ALG_NONE)) {
+ rss->enable = false;
+ rss->hash_bits = 0;
+ return -EIO;
+ }
+
+ /* We do not allow change in unsupported parameters */
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+
+ rss->enable = true;
+ if (indir) {
+ for (idx = 0; idx < rss->rss_size; idx++)
+ rss->ind_tbl[idx] = indir[idx];
+ }
+
+ if (hkey) {
+ memcpy(rss->key, hkey, RSS_HASH_KEY_SIZE * sizeof(u64));
+ nicvf_set_rss_key(nic);
+ }
+
+ nicvf_config_rss(nic);
+ return 0;
+}
+
+/* Get no of queues device supports and current queue count */
+static void nicvf_get_channels(struct net_device *dev,
+ struct ethtool_channels *channel)
+{
+ struct nicvf *nic = netdev_priv(dev);
+
+ memset(channel, 0, sizeof(*channel));
+
+ channel->max_rx = MAX_RCV_QUEUES_PER_QS;
+ channel->max_tx = MAX_SND_QUEUES_PER_QS;
+
+ channel->rx_count = nic->qs->rq_cnt;
+ channel->tx_count = nic->qs->sq_cnt;
+}
+
+/* Set no of Tx, Rx queues to be used */
+static int nicvf_set_channels(struct net_device *dev,
+ struct ethtool_channels *channel)
+{
+ struct nicvf *nic = netdev_priv(dev);
+ int err = 0;
+
+ if (!channel->rx_count || !channel->tx_count)
+ return -EINVAL;
+ if (channel->rx_count > MAX_RCV_QUEUES_PER_QS)
+ return -EINVAL;
+ if (channel->tx_count > MAX_SND_QUEUES_PER_QS)
+ return -EINVAL;
+
+ nic->qs->rq_cnt = channel->rx_count;
+ nic->qs->sq_cnt = channel->tx_count;
+ nic->qs->cq_cnt = max(nic->qs->rq_cnt, nic->qs->sq_cnt);
+
+ err = nicvf_set_real_num_queues(dev, nic->qs->sq_cnt, nic->qs->rq_cnt);
+ if (err)
+ return err;
+
+ if (!netif_running(dev))
+ return err;
+
+ nicvf_stop(dev);
+ nicvf_open(dev);
+ netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n",
+ nic->qs->sq_cnt, nic->qs->rq_cnt);
+
+ return err;
+}
+
+static const struct ethtool_ops nicvf_ethtool_ops = {
+ .get_settings = nicvf_get_settings,
+ .get_link = ethtool_op_get_link,
+ .get_drvinfo = nicvf_get_drvinfo,
+ .get_msglevel = nicvf_get_msglevel,
+ .set_msglevel = nicvf_set_msglevel,
+ .get_strings = nicvf_get_strings,
+ .get_sset_count = nicvf_get_sset_count,
+ .get_ethtool_stats = nicvf_get_ethtool_stats,
+ .get_regs_len = nicvf_get_regs_len,
+ .get_regs = nicvf_get_regs,
+ .get_coalesce = nicvf_get_coalesce,
+ .get_ringparam = nicvf_get_ringparam,
+ .get_rxnfc = nicvf_get_rxnfc,
+ .set_rxnfc = nicvf_set_rxnfc,
+ .get_rxfh_key_size = nicvf_get_rxfh_key_size,
+ .get_rxfh_indir_size = nicvf_get_rxfh_indir_size,
+ .get_rxfh = nicvf_get_rxfh,
+ .set_rxfh = nicvf_set_rxfh,
+ .get_channels = nicvf_get_channels,
+ .set_channels = nicvf_set_channels,
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
+void nicvf_set_ethtool_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &nicvf_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
new file mode 100644
index 0000000..02da802
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -0,0 +1,1331 @@
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/log2.h>
+#include <linux/prefetch.h>
+#include <linux/irq.h>
+
+#include "nic_reg.h"
+#include "nic.h"
+#include "nicvf_queues.h"
+#include "thunder_bgx.h"
+
+#define DRV_NAME "thunder-nicvf"
+#define DRV_VERSION "1.0"
+
+/* Supported devices */
+static const struct pci_device_id nicvf_id_table[] = {
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_THUNDER_NIC_VF,
+ PCI_VENDOR_ID_CAVIUM, 0xA11E) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_THUNDER_PASS1_NIC_VF,
+ PCI_VENDOR_ID_CAVIUM, 0xA11E) },
+ { 0, } /* end of table */
+};
+
+MODULE_AUTHOR("Sunil Goutham");
+MODULE_DESCRIPTION("Cavium Thunder NIC Virtual Function Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, nicvf_id_table);
+
+static int debug = 0x00;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "Debug message level bitmap");
+
+static int cpi_alg = CPI_ALG_NONE;
+module_param(cpi_alg, int, S_IRUGO);
+MODULE_PARM_DESC(cpi_alg,
+ "PFC algorithm (0=none, 1=VLAN, 2=VLAN16, 3=IP Diffserv)");
+
+static inline void nicvf_set_rx_frame_cnt(struct nicvf *nic,
+ struct sk_buff *skb)
+{
+ if (skb->len <= 64)
+ nic->drv_stats.rx_frames_64++;
+ else if (skb->len <= 127)
+ nic->drv_stats.rx_frames_127++;
+ else if (skb->len <= 255)
+ nic->drv_stats.rx_frames_255++;
+ else if (skb->len <= 511)
+ nic->drv_stats.rx_frames_511++;
+ else if (skb->len <= 1023)
+ nic->drv_stats.rx_frames_1023++;
+ else if (skb->len <= 1518)
+ nic->drv_stats.rx_frames_1518++;
+ else
+ nic->drv_stats.rx_frames_jumbo++;
+}
+
+/* The Cavium ThunderX network controller can *only* be found in SoCs
+ * containing the ThunderX ARM64 CPU implementation. All accesses to the device
+ * registers on this platform are implicitly strongly ordered with respect
+ * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use
+ * with no memory barriers in this driver. The readq()/writeq() functions add
+ * explicit ordering operation which in this case are redundant, and only
+ * add overhead.
+ */
+
+/* Register read/write APIs */
+void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val)
+{
+ writeq_relaxed(val, nic->reg_base + offset);
+}
+
+u64 nicvf_reg_read(struct nicvf *nic, u64 offset)
+{
+ return readq_relaxed(nic->reg_base + offset);
+}
+
+void nicvf_queue_reg_write(struct nicvf *nic, u64 offset,
+ u64 qidx, u64 val)
+{
+ void __iomem *addr = nic->reg_base + offset;
+
+ writeq_relaxed(val, addr + (qidx << NIC_Q_NUM_SHIFT));
+}
+
+u64 nicvf_queue_reg_read(struct nicvf *nic, u64 offset, u64 qidx)
+{
+ void __iomem *addr = nic->reg_base + offset;
+
+ return readq_relaxed(addr + (qidx << NIC_Q_NUM_SHIFT));
+}
+
+/* VF -> PF mailbox communication */
+
+static void nicvf_write_to_mbx(struct nicvf *nic, union nic_mbx *mbx)
+{
+ u64 *msg = (u64 *)mbx;
+
+ nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 0, msg[0]);
+ nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 8, msg[1]);
+}
+
+int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
+{
+ int timeout = NIC_MBOX_MSG_TIMEOUT;
+ int sleep = 10;
+
+ nic->pf_acked = false;
+ nic->pf_nacked = false;
+
+ nicvf_write_to_mbx(nic, mbx);
+
+ /* Wait for previous message to be acked, timeout 2sec */
+ while (!nic->pf_acked) {
+ if (nic->pf_nacked)
+ return -EINVAL;
+ msleep(sleep);
+ if (nic->pf_acked)
+ break;
+ timeout -= sleep;
+ if (!timeout) {
+ netdev_err(nic->netdev,
+ "PF didn't ack to mbox msg %d from VF%d\n",
+ (mbx->msg.msg & 0xFF), nic->vf_id);
+ return -EBUSY;
+ }
+ }
+ return 0;
+}
+
+/* Checks if VF is able to comminicate with PF
+* and also gets the VNIC number this VF is associated to.
+*/
+static int nicvf_check_pf_ready(struct nicvf *nic)
+{
+ int timeout = 5000, sleep = 20;
+ union nic_mbx mbx = {};
+
+ mbx.msg.msg = NIC_MBOX_MSG_READY;
+
+ nic->pf_ready_to_rcv_msg = false;
+
+ nicvf_write_to_mbx(nic, &mbx);
+
+ while (!nic->pf_ready_to_rcv_msg) {
+ msleep(sleep);
+ if (nic->pf_ready_to_rcv_msg)
+ break;
+ timeout -= sleep;
+ if (!timeout) {
+ netdev_err(nic->netdev,
+ "PF didn't respond to READY msg\n");
+ return 0;
+ }
+ }
+ return 1;
+}
+
+static void nicvf_read_bgx_stats(struct nicvf *nic, struct bgx_stats_msg *bgx)
+{
+ if (bgx->rx)
+ nic->bgx_stats.rx_stats[bgx->idx] = bgx->stats;
+ else
+ nic->bgx_stats.tx_stats[bgx->idx] = bgx->stats;
+}
+
+static void nicvf_handle_mbx_intr(struct nicvf *nic)
+{
+ union nic_mbx mbx = {};
+ u64 *mbx_data;
+ u64 mbx_addr;
+ int i;
+
+ mbx_addr = NIC_VF_PF_MAILBOX_0_1;
+ mbx_data = (u64 *)&mbx;
+
+ for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) {
+ *mbx_data = nicvf_reg_read(nic, mbx_addr);
+ mbx_data++;
+ mbx_addr += sizeof(u64);
+ }
+
+ netdev_dbg(nic->netdev, "Mbox message: msg: 0x%x\n", mbx.msg.msg);
+ switch (mbx.msg.msg) {
+ case NIC_MBOX_MSG_READY:
+ nic->pf_ready_to_rcv_msg = true;
+ nic->vf_id = mbx.nic_cfg.vf_id & 0x7F;
+ nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F;
+ nic->node = mbx.nic_cfg.node_id;
+ ether_addr_copy(nic->netdev->dev_addr, mbx.nic_cfg.mac_addr);
+ nic->link_up = false;
+ nic->duplex = 0;
+ nic->speed = 0;
+ break;
+ case NIC_MBOX_MSG_ACK:
+ nic->pf_acked = true;
+ break;
+ case NIC_MBOX_MSG_NACK:
+ nic->pf_nacked = true;
+ break;
+ case NIC_MBOX_MSG_RSS_SIZE:
+ nic->rss_info.rss_size = mbx.rss_size.ind_tbl_size;
+ nic->pf_acked = true;
+ break;
+ case NIC_MBOX_MSG_BGX_STATS:
+ nicvf_read_bgx_stats(nic, &mbx.bgx_stats);
+ nic->pf_acked = true;
+ nic->bgx_stats_acked = true;
+ break;
+ case NIC_MBOX_MSG_BGX_LINK_CHANGE:
+ nic->pf_acked = true;
+ nic->link_up = mbx.link_status.link_up;
+ nic->duplex = mbx.link_status.duplex;
+ nic->speed = mbx.link_status.speed;
+ if (nic->link_up) {
+ netdev_info(nic->netdev, "%s: Link is Up %d Mbps %s\n",
+ nic->netdev->name, nic->speed,
+ nic->duplex == DUPLEX_FULL ?
+ "Full duplex" : "Half duplex");
+ netif_carrier_on(nic->netdev);
+ netif_tx_wake_all_queues(nic->netdev);
+ } else {
+ netdev_info(nic->netdev, "%s: Link is Down\n",
+ nic->netdev->name);
+ netif_carrier_off(nic->netdev);
+ netif_tx_stop_all_queues(nic->netdev);
+ }
+ break;
+ default:
+ netdev_err(nic->netdev,
+ "Invalid message from PF, msg 0x%x\n", mbx.msg.msg);
+ break;
+ }
+ nicvf_clear_intr(nic, NICVF_INTR_MBOX, 0);
+}
+
+static int nicvf_hw_set_mac_addr(struct nicvf *nic, struct net_device *netdev)
+{
+ union nic_mbx mbx = {};
+
+ mbx.mac.msg = NIC_MBOX_MSG_SET_MAC;
+ mbx.mac.vf_id = nic->vf_id;
+ ether_addr_copy(mbx.mac.mac_addr, netdev->dev_addr);
+
+ return nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+static void nicvf_config_cpi(struct nicvf *nic)
+{
+ union nic_mbx mbx = {};
+
+ mbx.cpi_cfg.msg = NIC_MBOX_MSG_CPI_CFG;
+ mbx.cpi_cfg.vf_id = nic->vf_id;
+ mbx.cpi_cfg.cpi_alg = nic->cpi_alg;
+ mbx.cpi_cfg.rq_cnt = nic->qs->rq_cnt;
+
+ nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+static void nicvf_get_rss_size(struct nicvf *nic)
+{
+ union nic_mbx mbx = {};
+
+ mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE;
+ mbx.rss_size.vf_id = nic->vf_id;
+ nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+void nicvf_config_rss(struct nicvf *nic)
+{
+ union nic_mbx mbx = {};
+ struct nicvf_rss_info *rss = &nic->rss_info;
+ int ind_tbl_len = rss->rss_size;
+ int i, nextq = 0;
+
+ mbx.rss_cfg.vf_id = nic->vf_id;
+ mbx.rss_cfg.hash_bits = rss->hash_bits;
+ while (ind_tbl_len) {
+ mbx.rss_cfg.tbl_offset = nextq;
+ mbx.rss_cfg.tbl_len = min(ind_tbl_len,
+ RSS_IND_TBL_LEN_PER_MBX_MSG);
+ mbx.rss_cfg.msg = mbx.rss_cfg.tbl_offset ?
+ NIC_MBOX_MSG_RSS_CFG_CONT : NIC_MBOX_MSG_RSS_CFG;
+
+ for (i = 0; i < mbx.rss_cfg.tbl_len; i++)
+ mbx.rss_cfg.ind_tbl[i] = rss->ind_tbl[nextq++];
+
+ nicvf_send_msg_to_pf(nic, &mbx);
+
+ ind_tbl_len -= mbx.rss_cfg.tbl_len;
+ }
+}
+
+void nicvf_set_rss_key(struct nicvf *nic)
+{
+ struct nicvf_rss_info *rss = &nic->rss_info;
+ u64 key_addr = NIC_VNIC_RSS_KEY_0_4;
+ int idx;
+
+ for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) {
+ nicvf_reg_write(nic, key_addr, rss->key[idx]);
+ key_addr += sizeof(u64);
+ }
+}
+
+static int nicvf_rss_init(struct nicvf *nic)
+{
+ struct nicvf_rss_info *rss = &nic->rss_info;
+ int idx;
+
+ nicvf_get_rss_size(nic);
+
+ if ((nic->qs->rq_cnt <= 1) || (cpi_alg != CPI_ALG_NONE)) {
+ rss->enable = false;
+ rss->hash_bits = 0;
+ return 0;
+ }
+
+ rss->enable = true;
+
+ /* Using the HW reset value for now */
+ rss->key[0] = 0xFEED0BADFEED0BADULL;
+ rss->key[1] = 0xFEED0BADFEED0BADULL;
+ rss->key[2] = 0xFEED0BADFEED0BADULL;
+ rss->key[3] = 0xFEED0BADFEED0BADULL;
+ rss->key[4] = 0xFEED0BADFEED0BADULL;
+
+ nicvf_set_rss_key(nic);
+
+ rss->cfg = RSS_IP_HASH_ENA | RSS_TCP_HASH_ENA | RSS_UDP_HASH_ENA;
+ nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, rss->cfg);
+
+ rss->hash_bits = ilog2(rounddown_pow_of_two(rss->rss_size));
+
+ for (idx = 0; idx < rss->rss_size; idx++)
+ rss->ind_tbl[idx] = ethtool_rxfh_indir_default(idx,
+ nic->qs->rq_cnt);
+ nicvf_config_rss(nic);
+ return 1;
+}
+
+int nicvf_set_real_num_queues(struct net_device *netdev,
+ int tx_queues, int rx_queues)
+{
+ int err = 0;
+
+ err = netif_set_real_num_tx_queues(netdev, tx_queues);
+ if (err) {
+ netdev_err(netdev,
+ "Failed to set no of Tx queues: %d\n", tx_queues);
+ return err;
+ }
+
+ err = netif_set_real_num_rx_queues(netdev, rx_queues);
+ if (err)
+ netdev_err(netdev,
+ "Failed to set no of Rx queues: %d\n", rx_queues);
+ return err;
+}
+
+static int nicvf_init_resources(struct nicvf *nic)
+{
+ int err;
+ union nic_mbx mbx = {};
+
+ mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE;
+
+ /* Enable Qset */
+ nicvf_qset_config(nic, true);
+
+ /* Initialize queues and HW for data transfer */
+ err = nicvf_config_data_transfer(nic, true);
+ if (err) {
+ netdev_err(nic->netdev,
+ "Failed to alloc/config VF's QSet resources\n");
+ return err;
+ }
+
+ /* Send VF config done msg to PF */
+ nicvf_write_to_mbx(nic, &mbx);
+
+ return 0;
+}
+
+static void nicvf_snd_pkt_handler(struct net_device *netdev,
+ struct cmp_queue *cq,
+ struct cqe_send_t *cqe_tx, int cqe_type)
+{
+ struct sk_buff *skb = NULL;
+ struct nicvf *nic = netdev_priv(netdev);
+ struct snd_queue *sq;
+ struct sq_hdr_subdesc *hdr;
+
+ sq = &nic->qs->sq[cqe_tx->sq_idx];
+
+ hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, cqe_tx->sqe_ptr);
+ if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER)
+ return;
+
+ netdev_dbg(nic->netdev,
+ "%s Qset #%d SQ #%d SQ ptr #%d subdesc count %d\n",
+ __func__, cqe_tx->sq_qs, cqe_tx->sq_idx,
+ cqe_tx->sqe_ptr, hdr->subdesc_cnt);
+
+ nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
+ nicvf_check_cqe_tx_errs(nic, cq, cqe_tx);
+ skb = (struct sk_buff *)sq->skbuff[cqe_tx->sqe_ptr];
+ /* For TSO offloaded packets only one head SKB needs to be freed */
+ if (skb) {
+ prefetch(skb);
+ dev_consume_skb_any(skb);
+ }
+}
+
+static void nicvf_rcv_pkt_handler(struct net_device *netdev,
+ struct napi_struct *napi,
+ struct cmp_queue *cq,
+ struct cqe_rx_t *cqe_rx, int cqe_type)
+{
+ struct sk_buff *skb;
+ struct nicvf *nic = netdev_priv(netdev);
+ int err = 0;
+
+ /* Check for errors */
+ err = nicvf_check_cqe_rx_errs(nic, cq, cqe_rx);
+ if (err && !cqe_rx->rb_cnt)
+ return;
+
+ skb = nicvf_get_rcv_skb(nic, cqe_rx);
+ if (!skb) {
+ netdev_dbg(nic->netdev, "Packet not received\n");
+ return;
+ }
+
+ if (netif_msg_pktdata(nic)) {
+ netdev_info(nic->netdev, "%s: skb 0x%p, len=%d\n", netdev->name,
+ skb, skb->len);
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_OFFSET, 16, 1,
+ skb->data, skb->len, true);
+ }
+
+ nicvf_set_rx_frame_cnt(nic, skb);
+
+ skb_record_rx_queue(skb, cqe_rx->rq_idx);
+ if (netdev->hw_features & NETIF_F_RXCSUM) {
+ /* HW by default verifies TCP/UDP/SCTP checksums */
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else {
+ skb_checksum_none_assert(skb);
+ }
+
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ if (napi && (netdev->features & NETIF_F_GRO))
+ napi_gro_receive(napi, skb);
+ else
+ netif_receive_skb(skb);
+}
+
+static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
+ struct napi_struct *napi, int budget)
+{
+ int processed_cqe, work_done = 0;
+ int cqe_count, cqe_head;
+ struct nicvf *nic = netdev_priv(netdev);
+ struct queue_set *qs = nic->qs;
+ struct cmp_queue *cq = &qs->cq[cq_idx];
+ struct cqe_rx_t *cq_desc;
+
+ spin_lock_bh(&cq->lock);
+loop:
+ processed_cqe = 0;
+ /* Get no of valid CQ entries to process */
+ cqe_count = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, cq_idx);
+ cqe_count &= CQ_CQE_COUNT;
+ if (!cqe_count)
+ goto done;
+
+ /* Get head of the valid CQ entries */
+ cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9;
+ cqe_head &= 0xFFFF;
+
+ netdev_dbg(nic->netdev, "%s cqe_count %d cqe_head %d\n",
+ __func__, cqe_count, cqe_head);
+ while (processed_cqe < cqe_count) {
+ /* Get the CQ descriptor */
+ cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head);
+ cqe_head++;
+ cqe_head &= (cq->dmem.q_len - 1);
+ /* Initiate prefetch for next descriptor */
+ prefetch((struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head));
+
+ if ((work_done >= budget) && napi &&
+ (cq_desc->cqe_type != CQE_TYPE_SEND)) {
+ break;
+ }
+
+ netdev_dbg(nic->netdev, "cq_desc->cqe_type %d\n",
+ cq_desc->cqe_type);
+ switch (cq_desc->cqe_type) {
+ case CQE_TYPE_RX:
+ nicvf_rcv_pkt_handler(netdev, napi, cq,
+ cq_desc, CQE_TYPE_RX);
+ work_done++;
+ break;
+ case CQE_TYPE_SEND:
+ nicvf_snd_pkt_handler(netdev, cq,
+ (void *)cq_desc, CQE_TYPE_SEND);
+ break;
+ case CQE_TYPE_INVALID:
+ case CQE_TYPE_RX_SPLIT:
+ case CQE_TYPE_RX_TCP:
+ case CQE_TYPE_SEND_PTP:
+ /* Ignore for now */
+ break;
+ }
+ processed_cqe++;
+ }
+ netdev_dbg(nic->netdev, "%s processed_cqe %d work_done %d budget %d\n",
+ __func__, processed_cqe, work_done, budget);
+
+ /* Ring doorbell to inform H/W to reuse processed CQEs */
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR,
+ cq_idx, processed_cqe);
+
+ if ((work_done < budget) && napi)
+ goto loop;
+
+done:
+ spin_unlock_bh(&cq->lock);
+ return work_done;
+}
+
+static int nicvf_poll(struct napi_struct *napi, int budget)
+{
+ u64 cq_head;
+ int work_done = 0;
+ struct net_device *netdev = napi->dev;
+ struct nicvf *nic = netdev_priv(netdev);
+ struct nicvf_cq_poll *cq;
+ struct netdev_queue *txq;
+
+ cq = container_of(napi, struct nicvf_cq_poll, napi);
+ work_done = nicvf_cq_intr_handler(netdev, cq->cq_idx, napi, budget);
+
+ txq = netdev_get_tx_queue(netdev, cq->cq_idx);
+ if (netif_tx_queue_stopped(txq))
+ netif_tx_wake_queue(txq);
+
+ if (work_done < budget) {
+ /* Slow packet rate, exit polling */
+ napi_complete(napi);
+ /* Re-enable interrupts */
+ cq_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD,
+ cq->cq_idx);
+ nicvf_clear_intr(nic, NICVF_INTR_CQ, cq->cq_idx);
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_HEAD,
+ cq->cq_idx, cq_head);
+ nicvf_enable_intr(nic, NICVF_INTR_CQ, cq->cq_idx);
+ }
+ return work_done;
+}
+
+/* Qset error interrupt handler
+ *
+ * As of now only CQ errors are handled
+ */
+static void nicvf_handle_qs_err(unsigned long data)
+{
+ struct nicvf *nic = (struct nicvf *)data;
+ struct queue_set *qs = nic->qs;
+ int qidx;
+ u64 status;
+
+ netif_tx_disable(nic->netdev);
+
+ /* Check if it is CQ err */
+ for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
+ status = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS,
+ qidx);
+ if (!(status & CQ_ERR_MASK))
+ continue;
+ /* Process already queued CQEs and reconfig CQ */
+ nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
+ nicvf_sq_disable(nic, qidx);
+ nicvf_cq_intr_handler(nic->netdev, qidx, NULL, 0);
+ nicvf_cmp_queue_config(nic, qs, qidx, true);
+ nicvf_sq_free_used_descs(nic->netdev, &qs->sq[qidx], qidx);
+ nicvf_sq_enable(nic, &qs->sq[qidx], qidx);
+
+ nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
+ }
+
+ netif_tx_start_all_queues(nic->netdev);
+ /* Re-enable Qset error interrupt */
+ nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
+}
+
+static irqreturn_t nicvf_misc_intr_handler(int irq, void *nicvf_irq)
+{
+ struct nicvf *nic = (struct nicvf *)nicvf_irq;
+ u64 intr;
+
+ intr = nicvf_reg_read(nic, NIC_VF_INT);
+ /* Check for spurious interrupt */
+ if (!(intr & NICVF_INTR_MBOX_MASK))
+ return IRQ_HANDLED;
+
+ nicvf_handle_mbx_intr(nic);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t nicvf_intr_handler(int irq, void *nicvf_irq)
+{
+ u64 qidx, intr, clear_intr = 0;
+ u64 cq_intr, rbdr_intr, qs_err_intr;
+ struct nicvf *nic = (struct nicvf *)nicvf_irq;
+ struct queue_set *qs = nic->qs;
+ struct nicvf_cq_poll *cq_poll = NULL;
+
+ intr = nicvf_reg_read(nic, NIC_VF_INT);
+ if (netif_msg_intr(nic))
+ netdev_info(nic->netdev, "%s: interrupt status 0x%llx\n",
+ nic->netdev->name, intr);
+
+ qs_err_intr = intr & NICVF_INTR_QS_ERR_MASK;
+ if (qs_err_intr) {
+ /* Disable Qset err interrupt and schedule softirq */
+ nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
+ tasklet_hi_schedule(&nic->qs_err_task);
+ clear_intr |= qs_err_intr;
+ }
+
+ /* Disable interrupts and start polling */
+ cq_intr = (intr & NICVF_INTR_CQ_MASK) >> NICVF_INTR_CQ_SHIFT;
+ for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
+ if (!(cq_intr & (1 << qidx)))
+ continue;
+ if (!nicvf_is_intr_enabled(nic, NICVF_INTR_CQ, qidx))
+ continue;
+
+ nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
+ clear_intr |= ((1 << qidx) << NICVF_INTR_CQ_SHIFT);
+
+ cq_poll = nic->napi[qidx];
+ /* Schedule NAPI */
+ if (cq_poll)
+ napi_schedule(&cq_poll->napi);
+ }
+
+ /* Handle RBDR interrupts */
+ rbdr_intr = (intr & NICVF_INTR_RBDR_MASK) >> NICVF_INTR_RBDR_SHIFT;
+ if (rbdr_intr) {
+ /* Disable RBDR interrupt and schedule softirq */
+ for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
+ if (!nicvf_is_intr_enabled(nic, NICVF_INTR_RBDR, qidx))
+ continue;
+ nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
+ tasklet_hi_schedule(&nic->rbdr_task);
+ clear_intr |= ((1 << qidx) << NICVF_INTR_RBDR_SHIFT);
+ }
+ }
+
+ /* Clear interrupts */
+ nicvf_reg_write(nic, NIC_VF_INT, clear_intr);
+ return IRQ_HANDLED;
+}
+
+static int nicvf_enable_msix(struct nicvf *nic)
+{
+ int ret, vec;
+
+ nic->num_vec = NIC_VF_MSIX_VECTORS;
+
+ for (vec = 0; vec < nic->num_vec; vec++)
+ nic->msix_entries[vec].entry = vec;
+
+ ret = pci_enable_msix(nic->pdev, nic->msix_entries, nic->num_vec);
+ if (ret) {
+ netdev_err(nic->netdev,
+ "Req for #%d msix vectors failed\n", nic->num_vec);
+ return 0;
+ }
+ nic->msix_enabled = 1;
+ return 1;
+}
+
+static void nicvf_disable_msix(struct nicvf *nic)
+{
+ if (nic->msix_enabled) {
+ pci_disable_msix(nic->pdev);
+ nic->msix_enabled = 0;
+ nic->num_vec = 0;
+ }
+}
+
+static int nicvf_register_interrupts(struct nicvf *nic)
+{
+ int irq, free, ret = 0;
+ int vector;
+
+ for_each_cq_irq(irq)
+ sprintf(nic->irq_name[irq], "NICVF%d CQ%d",
+ nic->vf_id, irq);
+
+ for_each_sq_irq(irq)
+ sprintf(nic->irq_name[irq], "NICVF%d SQ%d",
+ nic->vf_id, irq - NICVF_INTR_ID_SQ);
+
+ for_each_rbdr_irq(irq)
+ sprintf(nic->irq_name[irq], "NICVF%d RBDR%d",
+ nic->vf_id, irq - NICVF_INTR_ID_RBDR);
+
+ /* Register all interrupts except mailbox */
+ for (irq = 0; irq < NICVF_INTR_ID_SQ; irq++) {
+ vector = nic->msix_entries[irq].vector;
+ ret = request_irq(vector, nicvf_intr_handler,
+ 0, nic->irq_name[irq], nic);
+ if (ret)
+ break;
+ nic->irq_allocated[irq] = true;
+ }
+
+ for (irq = NICVF_INTR_ID_SQ; irq < NICVF_INTR_ID_MISC; irq++) {
+ vector = nic->msix_entries[irq].vector;
+ ret = request_irq(vector, nicvf_intr_handler,
+ 0, nic->irq_name[irq], nic);
+ if (ret)
+ break;
+ nic->irq_allocated[irq] = true;
+ }
+
+ sprintf(nic->irq_name[NICVF_INTR_ID_QS_ERR],
+ "NICVF%d Qset error", nic->vf_id);
+ if (!ret) {
+ vector = nic->msix_entries[NICVF_INTR_ID_QS_ERR].vector;
+ irq = NICVF_INTR_ID_QS_ERR;
+ ret = request_irq(vector, nicvf_intr_handler,
+ 0, nic->irq_name[irq], nic);
+ if (!ret)
+ nic->irq_allocated[irq] = true;
+ }
+
+ if (ret) {
+ netdev_err(nic->netdev, "Request irq failed\n");
+ for (free = 0; free < irq; free++)
+ free_irq(nic->msix_entries[free].vector, nic);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void nicvf_unregister_interrupts(struct nicvf *nic)
+{
+ int irq;
+
+ /* Free registered interrupts */
+ for (irq = 0; irq < nic->num_vec; irq++) {
+ if (nic->irq_allocated[irq])
+ free_irq(nic->msix_entries[irq].vector, nic);
+ nic->irq_allocated[irq] = false;
+ }
+
+ /* Disable MSI-X */
+ nicvf_disable_msix(nic);
+}
+
+/* Initialize MSIX vectors and register MISC interrupt.
+ * Send READY message to PF to check if its alive
+ */
+static int nicvf_register_misc_interrupt(struct nicvf *nic)
+{
+ int ret = 0;
+ int irq = NICVF_INTR_ID_MISC;
+
+ /* Return if mailbox interrupt is already registered */
+ if (nic->msix_enabled)
+ return 0;
+
+ /* Enable MSI-X */
+ if (!nicvf_enable_msix(nic))
+ return 1;
+
+ sprintf(nic->irq_name[irq], "%s Mbox", "NICVF");
+ /* Register Misc interrupt */
+ ret = request_irq(nic->msix_entries[irq].vector,
+ nicvf_misc_intr_handler, 0, nic->irq_name[irq], nic);
+
+ if (ret)
+ return ret;
+ nic->irq_allocated[irq] = true;
+
+ /* Enable mailbox interrupt */
+ nicvf_enable_intr(nic, NICVF_INTR_MBOX, 0);
+
+ /* Check if VF is able to communicate with PF */
+ if (!nicvf_check_pf_ready(nic)) {
+ nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
+ nicvf_unregister_interrupts(nic);
+ return 1;
+ }
+
+ return 0;
+}
+
+static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+ int qid = skb_get_queue_mapping(skb);
+ struct netdev_queue *txq = netdev_get_tx_queue(netdev, qid);
+
+ /* Check for minimum packet length */
+ if (skb->len <= ETH_HLEN) {
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ if (!nicvf_sq_append_skb(nic, skb) && !netif_tx_queue_stopped(txq)) {
+ netif_tx_stop_queue(txq);
+ nic->drv_stats.tx_busy++;
+ if (netif_msg_tx_err(nic))
+ netdev_warn(netdev,
+ "%s: Transmit ring full, stopping SQ%d\n",
+ netdev->name, qid);
+
+ return NETDEV_TX_BUSY;
+ }
+
+ return NETDEV_TX_OK;
+}
+
+int nicvf_stop(struct net_device *netdev)
+{
+ int irq, qidx;
+ struct nicvf *nic = netdev_priv(netdev);
+ struct queue_set *qs = nic->qs;
+ struct nicvf_cq_poll *cq_poll = NULL;
+ union nic_mbx mbx = {};
+
+ mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN;
+ nicvf_send_msg_to_pf(nic, &mbx);
+
+ netif_carrier_off(netdev);
+ netif_tx_disable(netdev);
+
+ /* Disable RBDR & QS error interrupts */
+ for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
+ nicvf_disable_intr(nic, NICVF_INTR_RBDR, qidx);
+ nicvf_clear_intr(nic, NICVF_INTR_RBDR, qidx);
+ }
+ nicvf_disable_intr(nic, NICVF_INTR_QS_ERR, 0);
+ nicvf_clear_intr(nic, NICVF_INTR_QS_ERR, 0);
+
+ /* Wait for pending IRQ handlers to finish */
+ for (irq = 0; irq < nic->num_vec; irq++)
+ synchronize_irq(nic->msix_entries[irq].vector);
+
+ tasklet_kill(&nic->rbdr_task);
+ tasklet_kill(&nic->qs_err_task);
+ if (nic->rb_work_scheduled)
+ cancel_delayed_work_sync(&nic->rbdr_work);
+
+ for (qidx = 0; qidx < nic->qs->cq_cnt; qidx++) {
+ cq_poll = nic->napi[qidx];
+ if (!cq_poll)
+ continue;
+ nic->napi[qidx] = NULL;
+ napi_synchronize(&cq_poll->napi);
+ /* CQ intr is enabled while napi_complete,
+ * so disable it now
+ */
+ nicvf_disable_intr(nic, NICVF_INTR_CQ, qidx);
+ nicvf_clear_intr(nic, NICVF_INTR_CQ, qidx);
+ napi_disable(&cq_poll->napi);
+ netif_napi_del(&cq_poll->napi);
+ kfree(cq_poll);
+ }
+
+ /* Free resources */
+ nicvf_config_data_transfer(nic, false);
+
+ /* Disable HW Qset */
+ nicvf_qset_config(nic, false);
+
+ /* disable mailbox interrupt */
+ nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
+
+ nicvf_unregister_interrupts(nic);
+
+ return 0;
+}
+
+int nicvf_open(struct net_device *netdev)
+{
+ int err, qidx;
+ struct nicvf *nic = netdev_priv(netdev);
+ struct queue_set *qs = nic->qs;
+ struct nicvf_cq_poll *cq_poll = NULL;
+
+ nic->mtu = netdev->mtu;
+
+ netif_carrier_off(netdev);
+
+ err = nicvf_register_misc_interrupt(nic);
+ if (err)
+ return err;
+
+ /* Register NAPI handler for processing CQEs */
+ for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
+ cq_poll = kzalloc(sizeof(*cq_poll), GFP_KERNEL);
+ if (!cq_poll) {
+ err = -ENOMEM;
+ goto napi_del;
+ }
+ cq_poll->cq_idx = qidx;
+ netif_napi_add(netdev, &cq_poll->napi, nicvf_poll,
+ NAPI_POLL_WEIGHT);
+ napi_enable(&cq_poll->napi);
+ nic->napi[qidx] = cq_poll;
+ }
+
+ /* Check if we got MAC address from PF or else generate a radom MAC */
+ if (is_zero_ether_addr(netdev->dev_addr)) {
+ eth_hw_addr_random(netdev);
+ nicvf_hw_set_mac_addr(nic, netdev);
+ }
+
+ /* Init tasklet for handling Qset err interrupt */
+ tasklet_init(&nic->qs_err_task, nicvf_handle_qs_err,
+ (unsigned long)nic);
+
+ /* Init RBDR tasklet which will refill RBDR */
+ tasklet_init(&nic->rbdr_task, nicvf_rbdr_task,
+ (unsigned long)nic);
+ INIT_DELAYED_WORK(&nic->rbdr_work, nicvf_rbdr_work);
+
+ /* Configure CPI alorithm */
+ nic->cpi_alg = cpi_alg;
+ nicvf_config_cpi(nic);
+
+ /* Configure receive side scaling */
+ nicvf_rss_init(nic);
+
+ err = nicvf_register_interrupts(nic);
+ if (err)
+ goto cleanup;
+
+ /* Initialize the queues */
+ err = nicvf_init_resources(nic);
+ if (err)
+ goto cleanup;
+
+ /* Make sure queue initialization is written */
+ wmb();
+
+ nicvf_reg_write(nic, NIC_VF_INT, -1);
+ /* Enable Qset err interrupt */
+ nicvf_enable_intr(nic, NICVF_INTR_QS_ERR, 0);
+
+ /* Enable completion queue interrupt */
+ for (qidx = 0; qidx < qs->cq_cnt; qidx++)
+ nicvf_enable_intr(nic, NICVF_INTR_CQ, qidx);
+
+ /* Enable RBDR threshold interrupt */
+ for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
+ nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
+
+ netif_carrier_on(netdev);
+ netif_tx_start_all_queues(netdev);
+
+ return 0;
+cleanup:
+ nicvf_disable_intr(nic, NICVF_INTR_MBOX, 0);
+ nicvf_unregister_interrupts(nic);
+napi_del:
+ for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
+ cq_poll = nic->napi[qidx];
+ if (!cq_poll)
+ continue;
+ napi_disable(&cq_poll->napi);
+ netif_napi_del(&cq_poll->napi);
+ kfree(cq_poll);
+ nic->napi[qidx] = NULL;
+ }
+ return err;
+}
+
+static int nicvf_update_hw_max_frs(struct nicvf *nic, int mtu)
+{
+ union nic_mbx mbx = {};
+
+ mbx.frs.msg = NIC_MBOX_MSG_SET_MAX_FRS;
+ mbx.frs.max_frs = mtu;
+ mbx.frs.vf_id = nic->vf_id;
+
+ return nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+static int nicvf_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+
+ if (new_mtu > NIC_HW_MAX_FRS)
+ return -EINVAL;
+
+ if (new_mtu < NIC_HW_MIN_FRS)
+ return -EINVAL;
+
+ if (nicvf_update_hw_max_frs(nic, new_mtu))
+ return -EINVAL;
+ netdev->mtu = new_mtu;
+ nic->mtu = new_mtu;
+
+ return 0;
+}
+
+static int nicvf_set_mac_address(struct net_device *netdev, void *p)
+{
+ struct sockaddr *addr = p;
+ struct nicvf *nic = netdev_priv(netdev);
+
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+
+ if (nic->msix_enabled)
+ if (nicvf_hw_set_mac_addr(nic, netdev))
+ return -EBUSY;
+
+ return 0;
+}
+
+void nicvf_update_lmac_stats(struct nicvf *nic)
+{
+ int stat = 0;
+ union nic_mbx mbx = {};
+ int timeout;
+
+ if (!netif_running(nic->netdev))
+ return;
+
+ mbx.bgx_stats.msg = NIC_MBOX_MSG_BGX_STATS;
+ mbx.bgx_stats.vf_id = nic->vf_id;
+ /* Rx stats */
+ mbx.bgx_stats.rx = 1;
+ while (stat < BGX_RX_STATS_COUNT) {
+ nic->bgx_stats_acked = 0;
+ mbx.bgx_stats.idx = stat;
+ nicvf_send_msg_to_pf(nic, &mbx);
+ timeout = 0;
+ while ((!nic->bgx_stats_acked) && (timeout < 10)) {
+ msleep(2);
+ timeout++;
+ }
+ stat++;
+ }
+
+ stat = 0;
+
+ /* Tx stats */
+ mbx.bgx_stats.rx = 0;
+ while (stat < BGX_TX_STATS_COUNT) {
+ nic->bgx_stats_acked = 0;
+ mbx.bgx_stats.idx = stat;
+ nicvf_send_msg_to_pf(nic, &mbx);
+ timeout = 0;
+ while ((!nic->bgx_stats_acked) && (timeout < 10)) {
+ msleep(2);
+ timeout++;
+ }
+ stat++;
+ }
+}
+
+void nicvf_update_stats(struct nicvf *nic)
+{
+ int qidx;
+ struct nicvf_hw_stats *stats = &nic->stats;
+ struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
+ struct queue_set *qs = nic->qs;
+
+#define GET_RX_STATS(reg) \
+ nicvf_reg_read(nic, NIC_VNIC_RX_STAT_0_13 | (reg << 3))
+#define GET_TX_STATS(reg) \
+ nicvf_reg_read(nic, NIC_VNIC_TX_STAT_0_4 | (reg << 3))
+
+ stats->rx_bytes_ok = GET_RX_STATS(RX_OCTS);
+ stats->rx_ucast_frames_ok = GET_RX_STATS(RX_UCAST);
+ stats->rx_bcast_frames_ok = GET_RX_STATS(RX_BCAST);
+ stats->rx_mcast_frames_ok = GET_RX_STATS(RX_MCAST);
+ stats->rx_fcs_errors = GET_RX_STATS(RX_FCS);
+ stats->rx_l2_errors = GET_RX_STATS(RX_L2ERR);
+ stats->rx_drop_red = GET_RX_STATS(RX_RED);
+ stats->rx_drop_overrun = GET_RX_STATS(RX_ORUN);
+ stats->rx_drop_bcast = GET_RX_STATS(RX_DRP_BCAST);
+ stats->rx_drop_mcast = GET_RX_STATS(RX_DRP_MCAST);
+ stats->rx_drop_l3_bcast = GET_RX_STATS(RX_DRP_L3BCAST);
+ stats->rx_drop_l3_mcast = GET_RX_STATS(RX_DRP_L3MCAST);
+
+ stats->tx_bytes_ok = GET_TX_STATS(TX_OCTS);
+ stats->tx_ucast_frames_ok = GET_TX_STATS(TX_UCAST);
+ stats->tx_bcast_frames_ok = GET_TX_STATS(TX_BCAST);
+ stats->tx_mcast_frames_ok = GET_TX_STATS(TX_MCAST);
+ stats->tx_drops = GET_TX_STATS(TX_DROP);
+
+ drv_stats->rx_frames_ok = stats->rx_ucast_frames_ok +
+ stats->rx_bcast_frames_ok +
+ stats->rx_mcast_frames_ok;
+ drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok +
+ stats->tx_bcast_frames_ok +
+ stats->tx_mcast_frames_ok;
+ drv_stats->rx_drops = stats->rx_drop_red +
+ stats->rx_drop_overrun;
+ drv_stats->tx_drops = stats->tx_drops;
+
+ /* Update RQ and SQ stats */
+ for (qidx = 0; qidx < qs->rq_cnt; qidx++)
+ nicvf_update_rq_stats(nic, qidx);
+ for (qidx = 0; qidx < qs->sq_cnt; qidx++)
+ nicvf_update_sq_stats(nic, qidx);
+}
+
+static struct rtnl_link_stats64 *nicvf_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *stats)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+ struct nicvf_hw_stats *hw_stats = &nic->stats;
+ struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
+
+ nicvf_update_stats(nic);
+
+ stats->rx_bytes = hw_stats->rx_bytes_ok;
+ stats->rx_packets = drv_stats->rx_frames_ok;
+ stats->rx_dropped = drv_stats->rx_drops;
+
+ stats->tx_bytes = hw_stats->tx_bytes_ok;
+ stats->tx_packets = drv_stats->tx_frames_ok;
+ stats->tx_dropped = drv_stats->tx_drops;
+
+ return stats;
+}
+
+static void nicvf_tx_timeout(struct net_device *dev)
+{
+ struct nicvf *nic = netdev_priv(dev);
+
+ if (netif_msg_tx_err(nic))
+ netdev_warn(dev, "%s: Transmit timed out, resetting\n",
+ dev->name);
+
+ schedule_work(&nic->reset_task);
+}
+
+static void nicvf_reset_task(struct work_struct *work)
+{
+ struct nicvf *nic;
+
+ nic = container_of(work, struct nicvf, reset_task);
+
+ if (!netif_running(nic->netdev))
+ return;
+
+ nicvf_stop(nic->netdev);
+ nicvf_open(nic->netdev);
+ nic->netdev->trans_start = jiffies;
+}
+
+static const struct net_device_ops nicvf_netdev_ops = {
+ .ndo_open = nicvf_open,
+ .ndo_stop = nicvf_stop,
+ .ndo_start_xmit = nicvf_xmit,
+ .ndo_change_mtu = nicvf_change_mtu,
+ .ndo_set_mac_address = nicvf_set_mac_address,
+ .ndo_get_stats64 = nicvf_get_stats64,
+ .ndo_tx_timeout = nicvf_tx_timeout,
+};
+
+static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct device *dev = &pdev->dev;
+ struct net_device *netdev;
+ struct nicvf *nic;
+ struct queue_set *qs;
+ int err;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ return err;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ goto err_disable_device;
+ }
+
+ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "Unable to get usable DMA configuration\n");
+ goto err_release_regions;
+ }
+
+ err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
+ if (err) {
+ dev_err(dev, "unable to get 48-bit DMA for consistent allocations\n");
+ goto err_release_regions;
+ }
+
+ netdev = alloc_etherdev_mqs(sizeof(struct nicvf),
+ MAX_RCV_QUEUES_PER_QS,
+ MAX_SND_QUEUES_PER_QS);
+ if (!netdev) {
+ err = -ENOMEM;
+ goto err_release_regions;
+ }
+
+ pci_set_drvdata(pdev, netdev);
+
+ SET_NETDEV_DEV(netdev, &pdev->dev);
+
+ nic = netdev_priv(netdev);
+ nic->netdev = netdev;
+ nic->pdev = pdev;
+
+ /* MAP VF's configuration registers */
+ nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
+ if (!nic->reg_base) {
+ dev_err(dev, "Cannot map config register space, aborting\n");
+ err = -ENOMEM;
+ goto err_free_netdev;
+ }
+
+ err = nicvf_set_qset_resources(nic);
+ if (err)
+ goto err_free_netdev;
+
+ qs = nic->qs;
+
+ err = nicvf_set_real_num_queues(netdev, qs->sq_cnt, qs->rq_cnt);
+ if (err)
+ goto err_free_netdev;
+
+ /* Check if PF is alive and get MAC address for this VF */
+ err = nicvf_register_misc_interrupt(nic);
+ if (err)
+ goto err_free_netdev;
+
+ netdev->features |= (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_SG |
+ NETIF_F_TSO | NETIF_F_GRO);
+ netdev->hw_features = netdev->features;
+
+ netdev->netdev_ops = &nicvf_netdev_ops;
+
+ INIT_WORK(&nic->reset_task, nicvf_reset_task);
+
+ err = register_netdev(netdev);
+ if (err) {
+ dev_err(dev, "Failed to register netdevice\n");
+ goto err_unregister_interrupts;
+ }
+
+ nic->msg_enable = debug;
+
+ nicvf_set_ethtool_ops(netdev);
+
+ return 0;
+
+err_unregister_interrupts:
+ nicvf_unregister_interrupts(nic);
+err_free_netdev:
+ pci_set_drvdata(pdev, NULL);
+ free_netdev(netdev);
+err_release_regions:
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+ return err;
+}
+
+static void nicvf_remove(struct pci_dev *pdev)
+{
+ struct net_device *netdev = pci_get_drvdata(pdev);
+ struct nicvf *nic = netdev_priv(netdev);
+
+ unregister_netdev(netdev);
+ nicvf_unregister_interrupts(nic);
+ pci_set_drvdata(pdev, NULL);
+ free_netdev(netdev);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+}
+
+static struct pci_driver nicvf_driver = {
+ .name = DRV_NAME,
+ .id_table = nicvf_id_table,
+ .probe = nicvf_probe,
+ .remove = nicvf_remove,
+};
+
+static int __init nicvf_init_module(void)
+{
+ pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
+
+ return pci_register_driver(&nicvf_driver);
+}
+
+static void __exit nicvf_cleanup_module(void)
+{
+ pci_unregister_driver(&nicvf_driver);
+}
+
+module_init(nicvf_init_module);
+module_exit(nicvf_cleanup_module);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
new file mode 100644
index 0000000..d69d228d
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -0,0 +1,1545 @@
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/ip.h>
+#include <linux/etherdevice.h>
+#include <net/ip.h>
+#include <net/tso.h>
+
+#include "nic_reg.h"
+#include "nic.h"
+#include "q_struct.h"
+#include "nicvf_queues.h"
+
+struct rbuf_info {
+ struct page *page;
+ void *data;
+ u64 offset;
+};
+
+#define GET_RBUF_INFO(x) ((struct rbuf_info *)(x - NICVF_RCV_BUF_ALIGN_BYTES))
+
+/* Poll a register for a specific value */
+static int nicvf_poll_reg(struct nicvf *nic, int qidx,
+ u64 reg, int bit_pos, int bits, int val)
+{
+ u64 bit_mask;
+ u64 reg_val;
+ int timeout = 10;
+
+ bit_mask = (1ULL << bits) - 1;
+ bit_mask = (bit_mask << bit_pos);
+
+ while (timeout) {
+ reg_val = nicvf_queue_reg_read(nic, reg, qidx);
+ if (((reg_val & bit_mask) >> bit_pos) == val)
+ return 0;
+ usleep_range(1000, 2000);
+ timeout--;
+ }
+ netdev_err(nic->netdev, "Poll on reg 0x%llx failed\n", reg);
+ return 1;
+}
+
+/* Allocate memory for a queue's descriptors */
+static int nicvf_alloc_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem,
+ int q_len, int desc_size, int align_bytes)
+{
+ dmem->q_len = q_len;
+ dmem->size = (desc_size * q_len) + align_bytes;
+ /* Save address, need it while freeing */
+ dmem->unalign_base = dma_zalloc_coherent(&nic->pdev->dev, dmem->size,
+ &dmem->dma, GFP_KERNEL);
+ if (!dmem->unalign_base)
+ return -ENOMEM;
+
+ /* Align memory address for 'align_bytes' */
+ dmem->phys_base = NICVF_ALIGNED_ADDR((u64)dmem->dma, align_bytes);
+ dmem->base = dmem->unalign_base + (dmem->phys_base - dmem->dma);
+ return 0;
+}
+
+/* Free queue's descriptor memory */
+static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem)
+{
+ if (!dmem)
+ return;
+
+ dma_free_coherent(&nic->pdev->dev, dmem->size,
+ dmem->unalign_base, dmem->dma);
+ dmem->unalign_base = NULL;
+ dmem->base = NULL;
+}
+
+/* Allocate buffer for packet reception
+ * HW returns memory address where packet is DMA'ed but not a pointer
+ * into RBDR ring, so save buffer address at the start of fragment and
+ * align the start address to a cache aligned address
+ */
+static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
+ u32 buf_len, u64 **rbuf)
+{
+ u64 data;
+ struct rbuf_info *rinfo;
+ int order = get_order(buf_len);
+
+ /* Check if request can be accomodated in previous allocated page */
+ if (nic->rb_page) {
+ if ((nic->rb_page_offset + buf_len + buf_len) >
+ (PAGE_SIZE << order)) {
+ nic->rb_page = NULL;
+ } else {
+ nic->rb_page_offset += buf_len;
+ get_page(nic->rb_page);
+ }
+ }
+
+ /* Allocate a new page */
+ if (!nic->rb_page) {
+ nic->rb_page = alloc_pages(gfp | __GFP_COMP, order);
+ if (!nic->rb_page) {
+ netdev_err(nic->netdev, "Failed to allocate new rcv buffer\n");
+ return -ENOMEM;
+ }
+ nic->rb_page_offset = 0;
+ }
+
+ data = (u64)page_address(nic->rb_page) + nic->rb_page_offset;
+
+ /* Align buffer addr to cache line i.e 128 bytes */
+ rinfo = (struct rbuf_info *)(data + NICVF_RCV_BUF_ALIGN_LEN(data));
+ /* Save page address for reference updation */
+ rinfo->page = nic->rb_page;
+ /* Store start address for later retrieval */
+ rinfo->data = (void *)data;
+ /* Store alignment offset */
+ rinfo->offset = NICVF_RCV_BUF_ALIGN_LEN(data);
+
+ data += rinfo->offset;
+
+ /* Give next aligned address to hw for DMA */
+ *rbuf = (u64 *)(data + NICVF_RCV_BUF_ALIGN_BYTES);
+ return 0;
+}
+
+/* Retrieve actual buffer start address and build skb for received packet */
+static struct sk_buff *nicvf_rb_ptr_to_skb(struct nicvf *nic,
+ u64 rb_ptr, int len)
+{
+ struct sk_buff *skb;
+ struct rbuf_info *rinfo;
+
+ rb_ptr = (u64)phys_to_virt(rb_ptr);
+ /* Get buffer start address and alignment offset */
+ rinfo = GET_RBUF_INFO(rb_ptr);
+
+ /* Now build an skb to give to stack */
+ skb = build_skb(rinfo->data, RCV_FRAG_LEN);
+ if (!skb) {
+ put_page(rinfo->page);
+ return NULL;
+ }
+
+ /* Set correct skb->data */
+ skb_reserve(skb, rinfo->offset + NICVF_RCV_BUF_ALIGN_BYTES);
+
+ prefetch((void *)rb_ptr);
+ return skb;
+}
+
+/* Allocate RBDR ring and populate receive buffers */
+static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr,
+ int ring_len, int buf_size)
+{
+ int idx;
+ u64 *rbuf;
+ struct rbdr_entry_t *desc;
+ int err;
+
+ err = nicvf_alloc_q_desc_mem(nic, &rbdr->dmem, ring_len,
+ sizeof(struct rbdr_entry_t),
+ NICVF_RCV_BUF_ALIGN_BYTES);
+ if (err)
+ return err;
+
+ rbdr->desc = rbdr->dmem.base;
+ /* Buffer size has to be in multiples of 128 bytes */
+ rbdr->dma_size = buf_size;
+ rbdr->enable = true;
+ rbdr->thresh = RBDR_THRESH;
+
+ nic->rb_page = NULL;
+ for (idx = 0; idx < ring_len; idx++) {
+ err = nicvf_alloc_rcv_buffer(nic, GFP_KERNEL, RCV_FRAG_LEN,
+ &rbuf);
+ if (err)
+ return err;
+
+ desc = GET_RBDR_DESC(rbdr, idx);
+ desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN;
+ }
+ return 0;
+}
+
+/* Free RBDR ring and its receive buffers */
+static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
+{
+ int head, tail;
+ u64 buf_addr;
+ struct rbdr_entry_t *desc;
+ struct rbuf_info *rinfo;
+
+ if (!rbdr)
+ return;
+
+ rbdr->enable = false;
+ if (!rbdr->dmem.base)
+ return;
+
+ head = rbdr->head;
+ tail = rbdr->tail;
+
+ /* Free SKBs */
+ while (head != tail) {
+ desc = GET_RBDR_DESC(rbdr, head);
+ buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
+ rinfo = GET_RBUF_INFO((u64)phys_to_virt(buf_addr));
+ put_page(rinfo->page);
+ head++;
+ head &= (rbdr->dmem.q_len - 1);
+ }
+ /* Free SKB of tail desc */
+ desc = GET_RBDR_DESC(rbdr, tail);
+ buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
+ rinfo = GET_RBUF_INFO((u64)phys_to_virt(buf_addr));
+ put_page(rinfo->page);
+
+ /* Free RBDR ring */
+ nicvf_free_q_desc_mem(nic, &rbdr->dmem);
+}
+
+/* Refill receive buffer descriptors with new buffers.
+ */
+static void nicvf_refill_rbdr(struct nicvf *nic, gfp_t gfp)
+{
+ struct queue_set *qs = nic->qs;
+ int rbdr_idx = qs->rbdr_cnt;
+ int tail, qcount;
+ int refill_rb_cnt;
+ struct rbdr *rbdr;
+ struct rbdr_entry_t *desc;
+ u64 *rbuf;
+ int new_rb = 0;
+
+refill:
+ if (!rbdr_idx)
+ return;
+ rbdr_idx--;
+ rbdr = &qs->rbdr[rbdr_idx];
+ /* Check if it's enabled */
+ if (!rbdr->enable)
+ goto next_rbdr;
+
+ /* Get no of desc's to be refilled */
+ qcount = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, rbdr_idx);
+ qcount &= 0x7FFFF;
+ /* Doorbell can be ringed with a max of ring size minus 1 */
+ if (qcount >= (qs->rbdr_len - 1))
+ goto next_rbdr;
+ else
+ refill_rb_cnt = qs->rbdr_len - qcount - 1;
+
+ /* Start filling descs from tail */
+ tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, rbdr_idx) >> 3;
+ while (refill_rb_cnt) {
+ tail++;
+ tail &= (rbdr->dmem.q_len - 1);
+
+ if (nicvf_alloc_rcv_buffer(nic, gfp, RCV_FRAG_LEN, &rbuf))
+ break;
+
+ desc = GET_RBDR_DESC(rbdr, tail);
+ desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN;
+ refill_rb_cnt--;
+ new_rb++;
+ }
+
+ /* make sure all memory stores are done before ringing doorbell */
+ smp_wmb();
+
+ /* Check if buffer allocation failed */
+ if (refill_rb_cnt)
+ nic->rb_alloc_fail = true;
+ else
+ nic->rb_alloc_fail = false;
+
+ /* Notify HW */
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR,
+ rbdr_idx, new_rb);
+next_rbdr:
+ /* Re-enable RBDR interrupts only if buffer allocation is success */
+ if (!nic->rb_alloc_fail && rbdr->enable)
+ nicvf_enable_intr(nic, NICVF_INTR_RBDR, rbdr_idx);
+
+ if (rbdr_idx)
+ goto refill;
+}
+
+/* Alloc rcv buffers in non-atomic mode for better success */
+void nicvf_rbdr_work(struct work_struct *work)
+{
+ struct nicvf *nic = container_of(work, struct nicvf, rbdr_work.work);
+
+ nicvf_refill_rbdr(nic, GFP_KERNEL);
+ if (nic->rb_alloc_fail)
+ schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10));
+ else
+ nic->rb_work_scheduled = false;
+}
+
+/* In Softirq context, alloc rcv buffers in atomic mode */
+void nicvf_rbdr_task(unsigned long data)
+{
+ struct nicvf *nic = (struct nicvf *)data;
+
+ nicvf_refill_rbdr(nic, GFP_ATOMIC);
+ if (nic->rb_alloc_fail) {
+ nic->rb_work_scheduled = true;
+ schedule_delayed_work(&nic->rbdr_work, msecs_to_jiffies(10));
+ }
+}
+
+/* Initialize completion queue */
+static int nicvf_init_cmp_queue(struct nicvf *nic,
+ struct cmp_queue *cq, int q_len)
+{
+ int err;
+
+ err = nicvf_alloc_q_desc_mem(nic, &cq->dmem, q_len, CMP_QUEUE_DESC_SIZE,
+ NICVF_CQ_BASE_ALIGN_BYTES);
+ if (err)
+ return err;
+
+ cq->desc = cq->dmem.base;
+ cq->thresh = CMP_QUEUE_CQE_THRESH;
+ nic->cq_coalesce_usecs = (CMP_QUEUE_TIMER_THRESH * 0.05) - 1;
+
+ return 0;
+}
+
+static void nicvf_free_cmp_queue(struct nicvf *nic, struct cmp_queue *cq)
+{
+ if (!cq)
+ return;
+ if (!cq->dmem.base)
+ return;
+
+ nicvf_free_q_desc_mem(nic, &cq->dmem);
+}
+
+/* Initialize transmit queue */
+static int nicvf_init_snd_queue(struct nicvf *nic,
+ struct snd_queue *sq, int q_len)
+{
+ int err;
+
+ err = nicvf_alloc_q_desc_mem(nic, &sq->dmem, q_len, SND_QUEUE_DESC_SIZE,
+ NICVF_SQ_BASE_ALIGN_BYTES);
+ if (err)
+ return err;
+
+ sq->desc = sq->dmem.base;
+ sq->skbuff = kcalloc(q_len, sizeof(u64), GFP_KERNEL);
+ if (!sq->skbuff)
+ return -ENOMEM;
+ sq->head = 0;
+ sq->tail = 0;
+ atomic_set(&sq->free_cnt, q_len - 1);
+ sq->thresh = SND_QUEUE_THRESH;
+
+ /* Preallocate memory for TSO segment's header */
+ sq->tso_hdrs = dma_alloc_coherent(&nic->pdev->dev,
+ q_len * TSO_HEADER_SIZE,
+ &sq->tso_hdrs_phys, GFP_KERNEL);
+ if (!sq->tso_hdrs)
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
+{
+ if (!sq)
+ return;
+ if (!sq->dmem.base)
+ return;
+
+ if (sq->tso_hdrs)
+ dma_free_coherent(&nic->pdev->dev, sq->dmem.q_len,
+ sq->tso_hdrs, sq->tso_hdrs_phys);
+
+ kfree(sq->skbuff);
+ nicvf_free_q_desc_mem(nic, &sq->dmem);
+}
+
+static void nicvf_reclaim_snd_queue(struct nicvf *nic,
+ struct queue_set *qs, int qidx)
+{
+ /* Disable send queue */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0);
+ /* Check if SQ is stopped */
+ if (nicvf_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, 21, 1, 0x01))
+ return;
+ /* Reset send queue */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
+}
+
+static void nicvf_reclaim_rcv_queue(struct nicvf *nic,
+ struct queue_set *qs, int qidx)
+{
+ union nic_mbx mbx = {};
+
+ /* Make sure all packets in the pipeline are written back into mem */
+ mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC;
+ nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+static void nicvf_reclaim_cmp_queue(struct nicvf *nic,
+ struct queue_set *qs, int qidx)
+{
+ /* Disable timer threshold (doesn't get reset upon CQ reset */
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0);
+ /* Disable completion queue */
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0);
+ /* Reset completion queue */
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
+}
+
+static void nicvf_reclaim_rbdr(struct nicvf *nic,
+ struct rbdr *rbdr, int qidx)
+{
+ u64 tmp, fifo_state;
+ int timeout = 10;
+
+ /* Save head and tail pointers for feeing up buffers */
+ rbdr->head = nicvf_queue_reg_read(nic,
+ NIC_QSET_RBDR_0_1_HEAD,
+ qidx) >> 3;
+ rbdr->tail = nicvf_queue_reg_read(nic,
+ NIC_QSET_RBDR_0_1_TAIL,
+ qidx) >> 3;
+
+ /* If RBDR FIFO is in 'FAIL' state then do a reset first
+ * before relaiming.
+ */
+ fifo_state = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx);
+ if (((fifo_state >> 62) & 0x03) == 0x3)
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
+ qidx, NICVF_RBDR_RESET);
+
+ /* Disable RBDR */
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0);
+ if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
+ return;
+ while (1) {
+ tmp = nicvf_queue_reg_read(nic,
+ NIC_QSET_RBDR_0_1_PREFETCH_STATUS,
+ qidx);
+ if ((tmp & 0xFFFFFFFF) == ((tmp >> 32) & 0xFFFFFFFF))
+ break;
+ usleep_range(1000, 2000);
+ timeout--;
+ if (!timeout) {
+ netdev_err(nic->netdev,
+ "Failed polling on prefetch status\n");
+ return;
+ }
+ }
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
+ qidx, NICVF_RBDR_RESET);
+
+ if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02))
+ return;
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00);
+ if (nicvf_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00))
+ return;
+}
+
+/* Configures receive queue */
+static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
+ int qidx, bool enable)
+{
+ union nic_mbx mbx = {};
+ struct rcv_queue *rq;
+ struct rq_cfg rq_cfg;
+
+ rq = &qs->rq[qidx];
+ rq->enable = enable;
+
+ /* Disable receive queue */
+ nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0);
+
+ if (!rq->enable) {
+ nicvf_reclaim_rcv_queue(nic, qs, qidx);
+ return;
+ }
+
+ rq->cq_qs = qs->vnic_id;
+ rq->cq_idx = qidx;
+ rq->start_rbdr_qs = qs->vnic_id;
+ rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1;
+ rq->cont_rbdr_qs = qs->vnic_id;
+ rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1;
+ /* all writes of RBDR data to be loaded into L2 Cache as well*/
+ rq->caching = 1;
+
+ /* Send a mailbox msg to PF to config RQ */
+ mbx.rq.msg = NIC_MBOX_MSG_RQ_CFG;
+ mbx.rq.qs_num = qs->vnic_id;
+ mbx.rq.rq_num = qidx;
+ mbx.rq.cfg = (rq->caching << 26) | (rq->cq_qs << 19) |
+ (rq->cq_idx << 16) | (rq->cont_rbdr_qs << 9) |
+ (rq->cont_qs_rbdr_idx << 8) |
+ (rq->start_rbdr_qs << 1) | (rq->start_qs_rbdr_idx);
+ nicvf_send_msg_to_pf(nic, &mbx);
+
+ mbx.rq.msg = NIC_MBOX_MSG_RQ_BP_CFG;
+ mbx.rq.cfg = (1ULL << 63) | (1ULL << 62) | (qs->vnic_id << 0);
+ nicvf_send_msg_to_pf(nic, &mbx);
+
+ /* RQ drop config
+ * Enable CQ drop to reserve sufficient CQEs for all tx packets
+ */
+ mbx.rq.msg = NIC_MBOX_MSG_RQ_DROP_CFG;
+ mbx.rq.cfg = (1ULL << 62) | (RQ_CQ_DROP << 8);
+ nicvf_send_msg_to_pf(nic, &mbx);
+
+ nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, qidx, 0x00);
+
+ /* Enable Receive queue */
+ rq_cfg.ena = 1;
+ rq_cfg.tcp_ena = 0;
+ nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, *(u64 *)&rq_cfg);
+}
+
+/* Configures completion queue */
+void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
+ int qidx, bool enable)
+{
+ struct cmp_queue *cq;
+ struct cq_cfg cq_cfg;
+
+ cq = &qs->cq[qidx];
+ cq->enable = enable;
+
+ if (!cq->enable) {
+ nicvf_reclaim_cmp_queue(nic, qs, qidx);
+ return;
+ }
+
+ /* Reset completion queue */
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET);
+
+ if (!cq->enable)
+ return;
+
+ spin_lock_init(&cq->lock);
+ /* Set completion queue base address */
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE,
+ qidx, (u64)(cq->dmem.phys_base));
+
+ /* Enable Completion queue */
+ cq_cfg.ena = 1;
+ cq_cfg.reset = 0;
+ cq_cfg.caching = 0;
+ cq_cfg.qsize = CMP_QSIZE;
+ cq_cfg.avg_con = 0;
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(u64 *)&cq_cfg);
+
+ /* Set threshold value for interrupt generation */
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, cq->thresh);
+ nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2,
+ qidx, nic->cq_coalesce_usecs);
+}
+
+/* Configures transmit queue */
+static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs,
+ int qidx, bool enable)
+{
+ union nic_mbx mbx = {};
+ struct snd_queue *sq;
+ struct sq_cfg sq_cfg;
+
+ sq = &qs->sq[qidx];
+ sq->enable = enable;
+
+ if (!sq->enable) {
+ nicvf_reclaim_snd_queue(nic, qs, qidx);
+ return;
+ }
+
+ /* Reset send queue */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET);
+
+ sq->cq_qs = qs->vnic_id;
+ sq->cq_idx = qidx;
+
+ /* Send a mailbox msg to PF to config SQ */
+ mbx.sq.msg = NIC_MBOX_MSG_SQ_CFG;
+ mbx.sq.qs_num = qs->vnic_id;
+ mbx.sq.sq_num = qidx;
+ mbx.sq.cfg = (sq->cq_qs << 3) | sq->cq_idx;
+ nicvf_send_msg_to_pf(nic, &mbx);
+
+ /* Set queue base address */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE,
+ qidx, (u64)(sq->dmem.phys_base));
+
+ /* Enable send queue & set queue size */
+ sq_cfg.ena = 1;
+ sq_cfg.reset = 0;
+ sq_cfg.ldwb = 0;
+ sq_cfg.qsize = SND_QSIZE;
+ sq_cfg.tstmp_bgx_intf = 0;
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(u64 *)&sq_cfg);
+
+ /* Set threshold value for interrupt generation */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_THRESH, qidx, sq->thresh);
+
+ /* Set queue:cpu affinity for better load distribution */
+ if (cpu_online(qidx)) {
+ cpumask_set_cpu(qidx, &sq->affinity_mask);
+ netif_set_xps_queue(nic->netdev,
+ &sq->affinity_mask, qidx);
+ }
+}
+
+/* Configures receive buffer descriptor ring */
+static void nicvf_rbdr_config(struct nicvf *nic, struct queue_set *qs,
+ int qidx, bool enable)
+{
+ struct rbdr *rbdr;
+ struct rbdr_cfg rbdr_cfg;
+
+ rbdr = &qs->rbdr[qidx];
+ nicvf_reclaim_rbdr(nic, rbdr, qidx);
+ if (!enable)
+ return;
+
+ /* Set descriptor base address */
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE,
+ qidx, (u64)(rbdr->dmem.phys_base));
+
+ /* Enable RBDR & set queue size */
+ /* Buffer size should be in multiples of 128 bytes */
+ rbdr_cfg.ena = 1;
+ rbdr_cfg.reset = 0;
+ rbdr_cfg.ldwb = 0;
+ rbdr_cfg.qsize = RBDR_SIZE;
+ rbdr_cfg.avg_con = 0;
+ rbdr_cfg.lines = rbdr->dma_size / 128;
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG,
+ qidx, *(u64 *)&rbdr_cfg);
+
+ /* Notify HW */
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR,
+ qidx, qs->rbdr_len - 1);
+
+ /* Set threshold value for interrupt generation */
+ nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_THRESH,
+ qidx, rbdr->thresh - 1);
+}
+
+/* Requests PF to assign and enable Qset */
+void nicvf_qset_config(struct nicvf *nic, bool enable)
+{
+ union nic_mbx mbx = {};
+ struct queue_set *qs = nic->qs;
+ struct qs_cfg *qs_cfg;
+
+ if (!qs) {
+ netdev_warn(nic->netdev,
+ "Qset is still not allocated, don't init queues\n");
+ return;
+ }
+
+ qs->enable = enable;
+ qs->vnic_id = nic->vf_id;
+
+ /* Send a mailbox msg to PF to config Qset */
+ mbx.qs.msg = NIC_MBOX_MSG_QS_CFG;
+ mbx.qs.num = qs->vnic_id;
+
+ mbx.qs.cfg = 0;
+ qs_cfg = (struct qs_cfg *)&mbx.qs.cfg;
+ if (qs->enable) {
+ qs_cfg->ena = 1;
+#ifdef __BIG_ENDIAN
+ qs_cfg->be = 1;
+#endif
+ qs_cfg->vnic = qs->vnic_id;
+ }
+ nicvf_send_msg_to_pf(nic, &mbx);
+}
+
+static void nicvf_free_resources(struct nicvf *nic)
+{
+ int qidx;
+ struct queue_set *qs = nic->qs;
+
+ /* Free receive buffer descriptor ring */
+ for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
+ nicvf_free_rbdr(nic, &qs->rbdr[qidx]);
+
+ /* Free completion queue */
+ for (qidx = 0; qidx < qs->cq_cnt; qidx++)
+ nicvf_free_cmp_queue(nic, &qs->cq[qidx]);
+
+ /* Free send queue */
+ for (qidx = 0; qidx < qs->sq_cnt; qidx++)
+ nicvf_free_snd_queue(nic, &qs->sq[qidx]);
+}
+
+static int nicvf_alloc_resources(struct nicvf *nic)
+{
+ int qidx;
+ struct queue_set *qs = nic->qs;
+
+ /* Alloc receive buffer descriptor ring */
+ for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
+ if (nicvf_init_rbdr(nic, &qs->rbdr[qidx], qs->rbdr_len,
+ DMA_BUFFER_LEN))
+ goto alloc_fail;
+ }
+
+ /* Alloc send queue */
+ for (qidx = 0; qidx < qs->sq_cnt; qidx++) {
+ if (nicvf_init_snd_queue(nic, &qs->sq[qidx], qs->sq_len))
+ goto alloc_fail;
+ }
+
+ /* Alloc completion queue */
+ for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
+ if (nicvf_init_cmp_queue(nic, &qs->cq[qidx], qs->cq_len))
+ goto alloc_fail;
+ }
+
+ return 0;
+alloc_fail:
+ nicvf_free_resources(nic);
+ return -ENOMEM;
+}
+
+int nicvf_set_qset_resources(struct nicvf *nic)
+{
+ struct queue_set *qs;
+
+ qs = devm_kzalloc(&nic->pdev->dev, sizeof(*qs), GFP_KERNEL);
+ if (!qs)
+ return -ENOMEM;
+ nic->qs = qs;
+
+ /* Set count of each queue */
+ qs->rbdr_cnt = RBDR_CNT;
+ qs->rq_cnt = RCV_QUEUE_CNT;
+ qs->sq_cnt = SND_QUEUE_CNT;
+ qs->cq_cnt = CMP_QUEUE_CNT;
+
+ /* Set queue lengths */
+ qs->rbdr_len = RCV_BUF_COUNT;
+ qs->sq_len = SND_QUEUE_LEN;
+ qs->cq_len = CMP_QUEUE_LEN;
+ return 0;
+}
+
+int nicvf_config_data_transfer(struct nicvf *nic, bool enable)
+{
+ bool disable = false;
+ struct queue_set *qs = nic->qs;
+ int qidx;
+
+ if (!qs)
+ return 0;
+
+ if (enable) {
+ if (nicvf_alloc_resources(nic))
+ return -ENOMEM;
+
+ for (qidx = 0; qidx < qs->sq_cnt; qidx++)
+ nicvf_snd_queue_config(nic, qs, qidx, enable);
+ for (qidx = 0; qidx < qs->cq_cnt; qidx++)
+ nicvf_cmp_queue_config(nic, qs, qidx, enable);
+ for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
+ nicvf_rbdr_config(nic, qs, qidx, enable);
+ for (qidx = 0; qidx < qs->rq_cnt; qidx++)
+ nicvf_rcv_queue_config(nic, qs, qidx, enable);
+ } else {
+ for (qidx = 0; qidx < qs->rq_cnt; qidx++)
+ nicvf_rcv_queue_config(nic, qs, qidx, disable);
+ for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
+ nicvf_rbdr_config(nic, qs, qidx, disable);
+ for (qidx = 0; qidx < qs->sq_cnt; qidx++)
+ nicvf_snd_queue_config(nic, qs, qidx, disable);
+ for (qidx = 0; qidx < qs->cq_cnt; qidx++)
+ nicvf_cmp_queue_config(nic, qs, qidx, disable);
+
+ nicvf_free_resources(nic);
+ }
+
+ return 0;
+}
+
+/* Get a free desc from SQ
+ * returns descriptor ponter & descriptor number
+ */
+static inline int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt)
+{
+ int qentry;
+
+ qentry = sq->tail;
+ atomic_sub(desc_cnt, &sq->free_cnt);
+ sq->tail += desc_cnt;
+ sq->tail &= (sq->dmem.q_len - 1);
+
+ return qentry;
+}
+
+/* Free descriptor back to SQ for future use */
+void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt)
+{
+ atomic_add(desc_cnt, &sq->free_cnt);
+ sq->head += desc_cnt;
+ sq->head &= (sq->dmem.q_len - 1);
+}
+
+static inline int nicvf_get_nxt_sqentry(struct snd_queue *sq, int qentry)
+{
+ qentry++;
+ qentry &= (sq->dmem.q_len - 1);
+ return qentry;
+}
+
+void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx)
+{
+ u64 sq_cfg;
+
+ sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
+ sq_cfg |= NICVF_SQ_EN;
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
+ /* Ring doorbell so that H/W restarts processing SQEs */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0);
+}
+
+void nicvf_sq_disable(struct nicvf *nic, int qidx)
+{
+ u64 sq_cfg;
+
+ sq_cfg = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx);
+ sq_cfg &= ~NICVF_SQ_EN;
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg);
+}
+
+void nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq,
+ int qidx)
+{
+ u64 head, tail;
+ struct sk_buff *skb;
+ struct nicvf *nic = netdev_priv(netdev);
+ struct sq_hdr_subdesc *hdr;
+
+ head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4;
+ tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4;
+ while (sq->head != head) {
+ hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head);
+ if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER) {
+ nicvf_put_sq_desc(sq, 1);
+ continue;
+ }
+ skb = (struct sk_buff *)sq->skbuff[sq->head];
+ atomic64_add(1, (atomic64_t *)&netdev->stats.tx_packets);
+ atomic64_add(hdr->tot_len,
+ (atomic64_t *)&netdev->stats.tx_bytes);
+ dev_kfree_skb_any(skb);
+ nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
+ }
+}
+
+/* Calculate no of SQ subdescriptors needed to transmit all
+ * segments of this TSO packet.
+ * Taken from 'Tilera network driver' with a minor modification.
+ */
+static int nicvf_tso_count_subdescs(struct sk_buff *skb)
+{
+ struct skb_shared_info *sh = skb_shinfo(skb);
+ unsigned int sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ unsigned int data_len = skb->len - sh_len;
+ unsigned int p_len = sh->gso_size;
+ long f_id = -1; /* id of the current fragment */
+ long f_size = skb_headlen(skb) - sh_len; /* current fragment size */
+ long f_used = 0; /* bytes used from the current fragment */
+ long n; /* size of the current piece of payload */
+ int num_edescs = 0;
+ int segment;
+
+ for (segment = 0; segment < sh->gso_segs; segment++) {
+ unsigned int p_used = 0;
+
+ /* One edesc for header and for each piece of the payload. */
+ for (num_edescs++; p_used < p_len; num_edescs++) {
+ /* Advance as needed. */
+ while (f_used >= f_size) {
+ f_id++;
+ f_size = skb_frag_size(&sh->frags[f_id]);
+ f_used = 0;
+ }
+
+ /* Use bytes from the current fragment. */
+ n = p_len - p_used;
+ if (n > f_size - f_used)
+ n = f_size - f_used;
+ f_used += n;
+ p_used += n;
+ }
+
+ /* The last segment may be less than gso_size. */
+ data_len -= p_len;
+ if (data_len < p_len)
+ p_len = data_len;
+ }
+
+ /* '+ gso_segs' for SQ_HDR_SUDESCs for each segment */
+ return num_edescs + sh->gso_segs;
+}
+
+/* Get the number of SQ descriptors needed to xmit this skb */
+static int nicvf_sq_subdesc_required(struct nicvf *nic, struct sk_buff *skb)
+{
+ int subdesc_cnt = MIN_SQ_DESC_PER_PKT_XMIT;
+
+ if (skb_shinfo(skb)->gso_size) {
+ subdesc_cnt = nicvf_tso_count_subdescs(skb);
+ return subdesc_cnt;
+ }
+
+ if (skb_shinfo(skb)->nr_frags)
+ subdesc_cnt += skb_shinfo(skb)->nr_frags;
+
+ return subdesc_cnt;
+}
+
+/* Add SQ HEADER subdescriptor.
+ * First subdescriptor for every send descriptor.
+ */
+static inline void
+nicvf_sq_add_hdr_subdesc(struct snd_queue *sq, int qentry,
+ int subdesc_cnt, struct sk_buff *skb, int len)
+{
+ int proto;
+ struct sq_hdr_subdesc *hdr;
+
+ hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, qentry);
+ sq->skbuff[qentry] = (u64)skb;
+
+ memset(hdr, 0, SND_QUEUE_DESC_SIZE);
+ hdr->subdesc_type = SQ_DESC_TYPE_HEADER;
+ /* Enable notification via CQE after processing SQE */
+ hdr->post_cqe = 1;
+ /* No of subdescriptors following this */
+ hdr->subdesc_cnt = subdesc_cnt;
+ hdr->tot_len = len;
+
+ /* Offload checksum calculation to HW */
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ if (skb->protocol != htons(ETH_P_IP))
+ return;
+
+ hdr->csum_l3 = 1; /* Enable IP csum calculation */
+ hdr->l3_offset = skb_network_offset(skb);
+ hdr->l4_offset = skb_transport_offset(skb);
+
+ proto = ip_hdr(skb)->protocol;
+ switch (proto) {
+ case IPPROTO_TCP:
+ hdr->csum_l4 = SEND_L4_CSUM_TCP;
+ break;
+ case IPPROTO_UDP:
+ hdr->csum_l4 = SEND_L4_CSUM_UDP;
+ break;
+ case IPPROTO_SCTP:
+ hdr->csum_l4 = SEND_L4_CSUM_SCTP;
+ break;
+ }
+ }
+}
+
+/* SQ GATHER subdescriptor
+ * Must follow HDR descriptor
+ */
+static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
+ int size, u64 data)
+{
+ struct sq_gather_subdesc *gather;
+
+ qentry &= (sq->dmem.q_len - 1);
+ gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, qentry);
+
+ memset(gather, 0, SND_QUEUE_DESC_SIZE);
+ gather->subdesc_type = SQ_DESC_TYPE_GATHER;
+ gather->ld_type = NIC_SEND_LD_TYPE_E_LDWB;
+ gather->size = size;
+ gather->addr = data;
+}
+
+/* Segment a TSO packet into 'gso_size' segments and append
+ * them to SQ for transfer
+ */
+static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
+ int qentry, struct sk_buff *skb)
+{
+ struct tso_t tso;
+ int seg_subdescs = 0, desc_cnt = 0;
+ int seg_len, total_len, data_left;
+ int hdr_qentry = qentry;
+ int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+
+ tso_start(skb, &tso);
+ total_len = skb->len - hdr_len;
+ while (total_len > 0) {
+ char *hdr;
+
+ /* Save Qentry for adding HDR_SUBDESC at the end */
+ hdr_qentry = qentry;
+
+ data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
+ total_len -= data_left;
+
+ /* Add segment's header */
+ qentry = nicvf_get_nxt_sqentry(sq, qentry);
+ hdr = sq->tso_hdrs + qentry * TSO_HEADER_SIZE;
+ tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
+ nicvf_sq_add_gather_subdesc(sq, qentry, hdr_len,
+ sq->tso_hdrs_phys +
+ qentry * TSO_HEADER_SIZE);
+ /* HDR_SUDESC + GATHER */
+ seg_subdescs = 2;
+ seg_len = hdr_len;
+
+ /* Add segment's payload fragments */
+ while (data_left > 0) {
+ int size;
+
+ size = min_t(int, tso.size, data_left);
+
+ qentry = nicvf_get_nxt_sqentry(sq, qentry);
+ nicvf_sq_add_gather_subdesc(sq, qentry, size,
+ virt_to_phys(tso.data));
+ seg_subdescs++;
+ seg_len += size;
+
+ data_left -= size;
+ tso_build_data(skb, &tso, size);
+ }
+ nicvf_sq_add_hdr_subdesc(sq, hdr_qentry,
+ seg_subdescs - 1, skb, seg_len);
+ sq->skbuff[hdr_qentry] = 0;
+ qentry = nicvf_get_nxt_sqentry(sq, qentry);
+
+ desc_cnt += seg_subdescs;
+ }
+ /* Save SKB in the last segment for freeing */
+ sq->skbuff[hdr_qentry] = (u64)skb;
+
+ /* make sure all memory stores are done before ringing doorbell */
+ smp_wmb();
+
+ /* Inform HW to xmit all TSO segments */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
+ skb_get_queue_mapping(skb), desc_cnt);
+ return 1;
+}
+
+/* Append an skb to a SQ for packet transfer. */
+int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb)
+{
+ int i, size;
+ int subdesc_cnt;
+ int sq_num, qentry;
+ struct queue_set *qs = nic->qs;
+ struct snd_queue *sq;
+
+ sq_num = skb_get_queue_mapping(skb);
+ sq = &qs->sq[sq_num];
+
+ subdesc_cnt = nicvf_sq_subdesc_required(nic, skb);
+ if (subdesc_cnt > atomic_read(&sq->free_cnt))
+ goto append_fail;
+
+ qentry = nicvf_get_sq_desc(sq, subdesc_cnt);
+
+ /* Check if its a TSO packet */
+ if (skb_shinfo(skb)->gso_size)
+ return nicvf_sq_append_tso(nic, sq, qentry, skb);
+
+ /* Add SQ header subdesc */
+ nicvf_sq_add_hdr_subdesc(sq, qentry, subdesc_cnt - 1, skb, skb->len);
+
+ /* Add SQ gather subdescs */
+ qentry = nicvf_get_nxt_sqentry(sq, qentry);
+ size = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
+ nicvf_sq_add_gather_subdesc(sq, qentry, size, virt_to_phys(skb->data));
+
+ /* Check for scattered buffer */
+ if (!skb_is_nonlinear(skb))
+ goto doorbell;
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ const struct skb_frag_struct *frag;
+
+ frag = &skb_shinfo(skb)->frags[i];
+
+ qentry = nicvf_get_nxt_sqentry(sq, qentry);
+ size = skb_frag_size(frag);
+ nicvf_sq_add_gather_subdesc(sq, qentry, size,
+ virt_to_phys(
+ skb_frag_address(frag)));
+ }
+
+doorbell:
+ /* make sure all memory stores are done before ringing doorbell */
+ smp_wmb();
+
+ /* Inform HW to xmit new packet */
+ nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
+ sq_num, subdesc_cnt);
+ return 1;
+
+append_fail:
+ netdev_dbg(nic->netdev, "Not enough SQ descriptors to xmit pkt\n");
+ return 0;
+}
+
+static inline unsigned frag_num(unsigned i)
+{
+#ifdef __BIG_ENDIAN
+ return (i & ~3) + 3 - (i & 3);
+#else
+ return i;
+#endif
+}
+
+/* Returns SKB for a received packet */
+struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
+{
+ int frag;
+ int payload_len = 0;
+ struct sk_buff *skb = NULL;
+ struct sk_buff *skb_frag = NULL;
+ struct sk_buff *prev_frag = NULL;
+ u16 *rb_lens = NULL;
+ u64 *rb_ptrs = NULL;
+
+ rb_lens = (void *)cqe_rx + (3 * sizeof(u64));
+ rb_ptrs = (void *)cqe_rx + (6 * sizeof(u64));
+
+ netdev_dbg(nic->netdev, "%s rb_cnt %d rb0_ptr %llx rb0_sz %d\n",
+ __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz);
+
+ for (frag = 0; frag < cqe_rx->rb_cnt; frag++) {
+ payload_len = rb_lens[frag_num(frag)];
+ if (!frag) {
+ /* First fragment */
+ skb = nicvf_rb_ptr_to_skb(nic,
+ *rb_ptrs - cqe_rx->align_pad,
+ payload_len);
+ if (!skb)
+ return NULL;
+ skb_reserve(skb, cqe_rx->align_pad);
+ skb_put(skb, payload_len);
+ } else {
+ /* Add fragments */
+ skb_frag = nicvf_rb_ptr_to_skb(nic, *rb_ptrs,
+ payload_len);
+ if (!skb_frag) {
+ dev_kfree_skb(skb);
+ return NULL;
+ }
+
+ if (!skb_shinfo(skb)->frag_list)
+ skb_shinfo(skb)->frag_list = skb_frag;
+ else
+ prev_frag->next = skb_frag;
+
+ prev_frag = skb_frag;
+ skb->len += payload_len;
+ skb->data_len += payload_len;
+ skb_frag->len = payload_len;
+ }
+ /* Next buffer pointer */
+ rb_ptrs++;
+ }
+ return skb;
+}
+
+/* Enable interrupt */
+void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx)
+{
+ u64 reg_val;
+
+ reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
+
+ switch (int_type) {
+ case NICVF_INTR_CQ:
+ reg_val |= ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
+ break;
+ case NICVF_INTR_SQ:
+ reg_val |= ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
+ break;
+ case NICVF_INTR_RBDR:
+ reg_val |= ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
+ break;
+ case NICVF_INTR_PKT_DROP:
+ reg_val |= (1ULL << NICVF_INTR_PKT_DROP_SHIFT);
+ break;
+ case NICVF_INTR_TCP_TIMER:
+ reg_val |= (1ULL << NICVF_INTR_TCP_TIMER_SHIFT);
+ break;
+ case NICVF_INTR_MBOX:
+ reg_val |= (1ULL << NICVF_INTR_MBOX_SHIFT);
+ break;
+ case NICVF_INTR_QS_ERR:
+ reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT);
+ break;
+ default:
+ netdev_err(nic->netdev,
+ "Failed to enable interrupt: unknown type\n");
+ break;
+ }
+
+ nicvf_reg_write(nic, NIC_VF_ENA_W1S, reg_val);
+}
+
+/* Disable interrupt */
+void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx)
+{
+ u64 reg_val = 0;
+
+ switch (int_type) {
+ case NICVF_INTR_CQ:
+ reg_val |= ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
+ break;
+ case NICVF_INTR_SQ:
+ reg_val |= ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
+ break;
+ case NICVF_INTR_RBDR:
+ reg_val |= ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
+ break;
+ case NICVF_INTR_PKT_DROP:
+ reg_val |= (1ULL << NICVF_INTR_PKT_DROP_SHIFT);
+ break;
+ case NICVF_INTR_TCP_TIMER:
+ reg_val |= (1ULL << NICVF_INTR_TCP_TIMER_SHIFT);
+ break;
+ case NICVF_INTR_MBOX:
+ reg_val |= (1ULL << NICVF_INTR_MBOX_SHIFT);
+ break;
+ case NICVF_INTR_QS_ERR:
+ reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT);
+ break;
+ default:
+ netdev_err(nic->netdev,
+ "Failed to disable interrupt: unknown type\n");
+ break;
+ }
+
+ nicvf_reg_write(nic, NIC_VF_ENA_W1C, reg_val);
+}
+
+/* Clear interrupt */
+void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx)
+{
+ u64 reg_val = 0;
+
+ switch (int_type) {
+ case NICVF_INTR_CQ:
+ reg_val = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
+ break;
+ case NICVF_INTR_SQ:
+ reg_val = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
+ break;
+ case NICVF_INTR_RBDR:
+ reg_val = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
+ break;
+ case NICVF_INTR_PKT_DROP:
+ reg_val = (1ULL << NICVF_INTR_PKT_DROP_SHIFT);
+ break;
+ case NICVF_INTR_TCP_TIMER:
+ reg_val = (1ULL << NICVF_INTR_TCP_TIMER_SHIFT);
+ break;
+ case NICVF_INTR_MBOX:
+ reg_val = (1ULL << NICVF_INTR_MBOX_SHIFT);
+ break;
+ case NICVF_INTR_QS_ERR:
+ reg_val |= (1ULL << NICVF_INTR_QS_ERR_SHIFT);
+ break;
+ default:
+ netdev_err(nic->netdev,
+ "Failed to clear interrupt: unknown type\n");
+ break;
+ }
+
+ nicvf_reg_write(nic, NIC_VF_INT, reg_val);
+}
+
+/* Check if interrupt is enabled */
+int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx)
+{
+ u64 reg_val;
+ u64 mask = 0xff;
+
+ reg_val = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
+
+ switch (int_type) {
+ case NICVF_INTR_CQ:
+ mask = ((1ULL << q_idx) << NICVF_INTR_CQ_SHIFT);
+ break;
+ case NICVF_INTR_SQ:
+ mask = ((1ULL << q_idx) << NICVF_INTR_SQ_SHIFT);
+ break;
+ case NICVF_INTR_RBDR:
+ mask = ((1ULL << q_idx) << NICVF_INTR_RBDR_SHIFT);
+ break;
+ case NICVF_INTR_PKT_DROP:
+ mask = NICVF_INTR_PKT_DROP_MASK;
+ break;
+ case NICVF_INTR_TCP_TIMER:
+ mask = NICVF_INTR_TCP_TIMER_MASK;
+ break;
+ case NICVF_INTR_MBOX:
+ mask = NICVF_INTR_MBOX_MASK;
+ break;
+ case NICVF_INTR_QS_ERR:
+ mask = NICVF_INTR_QS_ERR_MASK;
+ break;
+ default:
+ netdev_err(nic->netdev,
+ "Failed to check interrupt enable: unknown type\n");
+ break;
+ }
+
+ return (reg_val & mask);
+}
+
+void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx)
+{
+ struct rcv_queue *rq;
+
+#define GET_RQ_STATS(reg) \
+ nicvf_reg_read(nic, NIC_QSET_RQ_0_7_STAT_0_1 |\
+ (rq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
+
+ rq = &nic->qs->rq[rq_idx];
+ rq->stats.bytes = GET_RQ_STATS(RQ_SQ_STATS_OCTS);
+ rq->stats.pkts = GET_RQ_STATS(RQ_SQ_STATS_PKTS);
+}
+
+void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx)
+{
+ struct snd_queue *sq;
+
+#define GET_SQ_STATS(reg) \
+ nicvf_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1 |\
+ (sq_idx << NIC_Q_NUM_SHIFT) | (reg << 3))
+
+ sq = &nic->qs->sq[sq_idx];
+ sq->stats.bytes = GET_SQ_STATS(RQ_SQ_STATS_OCTS);
+ sq->stats.pkts = GET_SQ_STATS(RQ_SQ_STATS_PKTS);
+}
+
+/* Check for errors in the receive cmp.queue entry */
+int nicvf_check_cqe_rx_errs(struct nicvf *nic,
+ struct cmp_queue *cq, struct cqe_rx_t *cqe_rx)
+{
+ struct cmp_queue_stats *stats = &cq->stats;
+
+ if (!cqe_rx->err_level && !cqe_rx->err_opcode) {
+ stats->rx.errop.good++;
+ return 0;
+ }
+
+ if (netif_msg_rx_err(nic))
+ netdev_err(nic->netdev,
+ "%s: RX error CQE err_level 0x%x err_opcode 0x%x\n",
+ nic->netdev->name,
+ cqe_rx->err_level, cqe_rx->err_opcode);
+
+ switch (cqe_rx->err_level) {
+ case CQ_ERRLVL_MAC:
+ stats->rx.errlvl.mac_errs++;
+ break;
+ case CQ_ERRLVL_L2:
+ stats->rx.errlvl.l2_errs++;
+ break;
+ case CQ_ERRLVL_L3:
+ stats->rx.errlvl.l3_errs++;
+ break;
+ case CQ_ERRLVL_L4:
+ stats->rx.errlvl.l4_errs++;
+ break;
+ }
+
+ switch (cqe_rx->err_opcode) {
+ case CQ_RX_ERROP_RE_PARTIAL:
+ stats->rx.errop.partial_pkts++;
+ break;
+ case CQ_RX_ERROP_RE_JABBER:
+ stats->rx.errop.jabber_errs++;
+ break;
+ case CQ_RX_ERROP_RE_FCS:
+ stats->rx.errop.fcs_errs++;
+ break;
+ case CQ_RX_ERROP_RE_TERMINATE:
+ stats->rx.errop.terminate_errs++;
+ break;
+ case CQ_RX_ERROP_RE_RX_CTL:
+ stats->rx.errop.bgx_rx_errs++;
+ break;
+ case CQ_RX_ERROP_PREL2_ERR:
+ stats->rx.errop.prel2_errs++;
+ break;
+ case CQ_RX_ERROP_L2_FRAGMENT:
+ stats->rx.errop.l2_frags++;
+ break;
+ case CQ_RX_ERROP_L2_OVERRUN:
+ stats->rx.errop.l2_overruns++;
+ break;
+ case CQ_RX_ERROP_L2_PFCS:
+ stats->rx.errop.l2_pfcs++;
+ break;
+ case CQ_RX_ERROP_L2_PUNY:
+ stats->rx.errop.l2_puny++;
+ break;
+ case CQ_RX_ERROP_L2_MAL:
+ stats->rx.errop.l2_hdr_malformed++;
+ break;
+ case CQ_RX_ERROP_L2_OVERSIZE:
+ stats->rx.errop.l2_oversize++;
+ break;
+ case CQ_RX_ERROP_L2_UNDERSIZE:
+ stats->rx.errop.l2_undersize++;
+ break;
+ case CQ_RX_ERROP_L2_LENMISM:
+ stats->rx.errop.l2_len_mismatch++;
+ break;
+ case CQ_RX_ERROP_L2_PCLP:
+ stats->rx.errop.l2_pclp++;
+ break;
+ case CQ_RX_ERROP_IP_NOT:
+ stats->rx.errop.non_ip++;
+ break;
+ case CQ_RX_ERROP_IP_CSUM_ERR:
+ stats->rx.errop.ip_csum_err++;
+ break;
+ case CQ_RX_ERROP_IP_MAL:
+ stats->rx.errop.ip_hdr_malformed++;
+ break;
+ case CQ_RX_ERROP_IP_MALD:
+ stats->rx.errop.ip_payload_malformed++;
+ break;
+ case CQ_RX_ERROP_IP_HOP:
+ stats->rx.errop.ip_hop_errs++;
+ break;
+ case CQ_RX_ERROP_L3_ICRC:
+ stats->rx.errop.l3_icrc_errs++;
+ break;
+ case CQ_RX_ERROP_L3_PCLP:
+ stats->rx.errop.l3_pclp++;
+ break;
+ case CQ_RX_ERROP_L4_MAL:
+ stats->rx.errop.l4_malformed++;
+ break;
+ case CQ_RX_ERROP_L4_CHK:
+ stats->rx.errop.l4_csum_errs++;
+ break;
+ case CQ_RX_ERROP_UDP_LEN:
+ stats->rx.errop.udp_len_err++;
+ break;
+ case CQ_RX_ERROP_L4_PORT:
+ stats->rx.errop.bad_l4_port++;
+ break;
+ case CQ_RX_ERROP_TCP_FLAG:
+ stats->rx.errop.bad_tcp_flag++;
+ break;
+ case CQ_RX_ERROP_TCP_OFFSET:
+ stats->rx.errop.tcp_offset_errs++;
+ break;
+ case CQ_RX_ERROP_L4_PCLP:
+ stats->rx.errop.l4_pclp++;
+ break;
+ case CQ_RX_ERROP_RBDR_TRUNC:
+ stats->rx.errop.pkt_truncated++;
+ break;
+ }
+
+ return 1;
+}
+
+/* Check for errors in the send cmp.queue entry */
+int nicvf_check_cqe_tx_errs(struct nicvf *nic,
+ struct cmp_queue *cq, struct cqe_send_t *cqe_tx)
+{
+ struct cmp_queue_stats *stats = &cq->stats;
+
+ switch (cqe_tx->send_status) {
+ case CQ_TX_ERROP_GOOD:
+ stats->tx.good++;
+ return 0;
+ case CQ_TX_ERROP_DESC_FAULT:
+ stats->tx.desc_fault++;
+ break;
+ case CQ_TX_ERROP_HDR_CONS_ERR:
+ stats->tx.hdr_cons_err++;
+ break;
+ case CQ_TX_ERROP_SUBDC_ERR:
+ stats->tx.subdesc_err++;
+ break;
+ case CQ_TX_ERROP_IMM_SIZE_OFLOW:
+ stats->tx.imm_size_oflow++;
+ break;
+ case CQ_TX_ERROP_DATA_SEQUENCE_ERR:
+ stats->tx.data_seq_err++;
+ break;
+ case CQ_TX_ERROP_MEM_SEQUENCE_ERR:
+ stats->tx.mem_seq_err++;
+ break;
+ case CQ_TX_ERROP_LOCK_VIOL:
+ stats->tx.lock_viol++;
+ break;
+ case CQ_TX_ERROP_DATA_FAULT:
+ stats->tx.data_fault++;
+ break;
+ case CQ_TX_ERROP_TSTMP_CONFLICT:
+ stats->tx.tstmp_conflict++;
+ break;
+ case CQ_TX_ERROP_TSTMP_TIMEOUT:
+ stats->tx.tstmp_timeout++;
+ break;
+ case CQ_TX_ERROP_MEM_FAULT:
+ stats->tx.mem_fault++;
+ break;
+ case CQ_TX_ERROP_CK_OVERLAP:
+ stats->tx.csum_overlap++;
+ break;
+ case CQ_TX_ERROP_CK_OFLOW:
+ stats->tx.csum_overflow++;
+ break;
+ }
+
+ return 1;
+}
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
new file mode 100644
index 0000000..8341bdf
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -0,0 +1,381 @@
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef NICVF_QUEUES_H
+#define NICVF_QUEUES_H
+
+#include <linux/netdevice.h>
+#include "q_struct.h"
+
+#define MAX_QUEUE_SET 128
+#define MAX_RCV_QUEUES_PER_QS 8
+#define MAX_RCV_BUF_DESC_RINGS_PER_QS 2
+#define MAX_SND_QUEUES_PER_QS 8
+#define MAX_CMP_QUEUES_PER_QS 8
+
+/* VF's queue interrupt ranges */
+#define NICVF_INTR_ID_CQ 0
+#define NICVF_INTR_ID_SQ 8
+#define NICVF_INTR_ID_RBDR 16
+#define NICVF_INTR_ID_MISC 18
+#define NICVF_INTR_ID_QS_ERR 19
+
+#define for_each_cq_irq(irq) \
+ for (irq = NICVF_INTR_ID_CQ; irq < NICVF_INTR_ID_SQ; irq++)
+#define for_each_sq_irq(irq) \
+ for (irq = NICVF_INTR_ID_SQ; irq < NICVF_INTR_ID_RBDR; irq++)
+#define for_each_rbdr_irq(irq) \
+ for (irq = NICVF_INTR_ID_RBDR; irq < NICVF_INTR_ID_MISC; irq++)
+
+#define RBDR_SIZE0 0ULL /* 8K entries */
+#define RBDR_SIZE1 1ULL /* 16K entries */
+#define RBDR_SIZE2 2ULL /* 32K entries */
+#define RBDR_SIZE3 3ULL /* 64K entries */
+#define RBDR_SIZE4 4ULL /* 126K entries */
+#define RBDR_SIZE5 5ULL /* 256K entries */
+#define RBDR_SIZE6 6ULL /* 512K entries */
+
+#define SND_QUEUE_SIZE0 0ULL /* 1K entries */
+#define SND_QUEUE_SIZE1 1ULL /* 2K entries */
+#define SND_QUEUE_SIZE2 2ULL /* 4K entries */
+#define SND_QUEUE_SIZE3 3ULL /* 8K entries */
+#define SND_QUEUE_SIZE4 4ULL /* 16K entries */
+#define SND_QUEUE_SIZE5 5ULL /* 32K entries */
+#define SND_QUEUE_SIZE6 6ULL /* 64K entries */
+
+#define CMP_QUEUE_SIZE0 0ULL /* 1K entries */
+#define CMP_QUEUE_SIZE1 1ULL /* 2K entries */
+#define CMP_QUEUE_SIZE2 2ULL /* 4K entries */
+#define CMP_QUEUE_SIZE3 3ULL /* 8K entries */
+#define CMP_QUEUE_SIZE4 4ULL /* 16K entries */
+#define CMP_QUEUE_SIZE5 5ULL /* 32K entries */
+#define CMP_QUEUE_SIZE6 6ULL /* 64K entries */
+
+/* Default queue count per QS, its lengths and threshold values */
+#define RBDR_CNT 1
+#define RCV_QUEUE_CNT 8
+#define SND_QUEUE_CNT 8
+#define CMP_QUEUE_CNT 8 /* Max of RCV and SND qcount */
+
+#define SND_QSIZE SND_QUEUE_SIZE4
+#define SND_QUEUE_LEN (1ULL << (SND_QSIZE + 10))
+#define MAX_SND_QUEUE_LEN (1ULL << (SND_QUEUE_SIZE6 + 10))
+#define SND_QUEUE_THRESH 2ULL
+#define MIN_SQ_DESC_PER_PKT_XMIT 2
+/* Since timestamp not enabled, otherwise 2 */
+#define MAX_CQE_PER_PKT_XMIT 1
+
+#define CMP_QSIZE CMP_QUEUE_SIZE4
+#define CMP_QUEUE_LEN (1ULL << (CMP_QSIZE + 10))
+#define CMP_QUEUE_CQE_THRESH 0
+#define CMP_QUEUE_TIMER_THRESH 220 /* 10usec */
+
+#define RBDR_SIZE RBDR_SIZE0
+#define RCV_BUF_COUNT (1ULL << (RBDR_SIZE + 13))
+#define MAX_RCV_BUF_COUNT (1ULL << (RBDR_SIZE6 + 13))
+#define RBDR_THRESH (RCV_BUF_COUNT / 2)
+#define DMA_BUFFER_LEN 2048 /* In multiples of 128bytes */
+#define RCV_FRAG_LEN (SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \
+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + \
+ (NICVF_RCV_BUF_ALIGN_BYTES * 2))
+#define RCV_DATA_OFFSET NICVF_RCV_BUF_ALIGN_BYTES
+
+#define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \
+ MAX_CQE_PER_PKT_XMIT)
+#define RQ_CQ_DROP ((CMP_QUEUE_LEN - MAX_CQES_FOR_TX) / 256)
+
+/* Descriptor size in bytes */
+#define SND_QUEUE_DESC_SIZE 16
+#define CMP_QUEUE_DESC_SIZE 512
+
+/* Buffer / descriptor alignments */
+#define NICVF_RCV_BUF_ALIGN 7
+#define NICVF_RCV_BUF_ALIGN_BYTES (1ULL << NICVF_RCV_BUF_ALIGN)
+#define NICVF_CQ_BASE_ALIGN_BYTES 512 /* 9 bits */
+#define NICVF_SQ_BASE_ALIGN_BYTES 128 /* 7 bits */
+
+#define NICVF_ALIGNED_ADDR(ADDR, ALIGN_BYTES) ALIGN(ADDR, ALIGN_BYTES)
+#define NICVF_ADDR_ALIGN_LEN(ADDR, BYTES)\
+ (NICVF_ALIGNED_ADDR(ADDR, BYTES) - BYTES)
+#define NICVF_RCV_BUF_ALIGN_LEN(X)\
+ (NICVF_ALIGNED_ADDR(X, NICVF_RCV_BUF_ALIGN_BYTES) - X)
+
+/* Queue enable/disable */
+#define NICVF_SQ_EN BIT_ULL(19)
+
+/* Queue reset */
+#define NICVF_CQ_RESET BIT_ULL(41)
+#define NICVF_SQ_RESET BIT_ULL(17)
+#define NICVF_RBDR_RESET BIT_ULL(43)
+
+enum CQ_RX_ERRLVL_E {
+ CQ_ERRLVL_MAC,
+ CQ_ERRLVL_L2,
+ CQ_ERRLVL_L3,
+ CQ_ERRLVL_L4,
+};
+
+enum CQ_RX_ERROP_E {
+ CQ_RX_ERROP_RE_NONE = 0x0,
+ CQ_RX_ERROP_RE_PARTIAL = 0x1,
+ CQ_RX_ERROP_RE_JABBER = 0x2,
+ CQ_RX_ERROP_RE_FCS = 0x7,
+ CQ_RX_ERROP_RE_TERMINATE = 0x9,
+ CQ_RX_ERROP_RE_RX_CTL = 0xb,
+ CQ_RX_ERROP_PREL2_ERR = 0x1f,
+ CQ_RX_ERROP_L2_FRAGMENT = 0x20,
+ CQ_RX_ERROP_L2_OVERRUN = 0x21,
+ CQ_RX_ERROP_L2_PFCS = 0x22,
+ CQ_RX_ERROP_L2_PUNY = 0x23,
+ CQ_RX_ERROP_L2_MAL = 0x24,
+ CQ_RX_ERROP_L2_OVERSIZE = 0x25,
+ CQ_RX_ERROP_L2_UNDERSIZE = 0x26,
+ CQ_RX_ERROP_L2_LENMISM = 0x27,
+ CQ_RX_ERROP_L2_PCLP = 0x28,
+ CQ_RX_ERROP_IP_NOT = 0x41,
+ CQ_RX_ERROP_IP_CSUM_ERR = 0x42,
+ CQ_RX_ERROP_IP_MAL = 0x43,
+ CQ_RX_ERROP_IP_MALD = 0x44,
+ CQ_RX_ERROP_IP_HOP = 0x45,
+ CQ_RX_ERROP_L3_ICRC = 0x46,
+ CQ_RX_ERROP_L3_PCLP = 0x47,
+ CQ_RX_ERROP_L4_MAL = 0x61,
+ CQ_RX_ERROP_L4_CHK = 0x62,
+ CQ_RX_ERROP_UDP_LEN = 0x63,
+ CQ_RX_ERROP_L4_PORT = 0x64,
+ CQ_RX_ERROP_TCP_FLAG = 0x65,
+ CQ_RX_ERROP_TCP_OFFSET = 0x66,
+ CQ_RX_ERROP_L4_PCLP = 0x67,
+ CQ_RX_ERROP_RBDR_TRUNC = 0x70,
+};
+
+enum CQ_TX_ERROP_E {
+ CQ_TX_ERROP_GOOD = 0x0,
+ CQ_TX_ERROP_DESC_FAULT = 0x10,
+ CQ_TX_ERROP_HDR_CONS_ERR = 0x11,
+ CQ_TX_ERROP_SUBDC_ERR = 0x12,
+ CQ_TX_ERROP_IMM_SIZE_OFLOW = 0x80,
+ CQ_TX_ERROP_DATA_SEQUENCE_ERR = 0x81,
+ CQ_TX_ERROP_MEM_SEQUENCE_ERR = 0x82,
+ CQ_TX_ERROP_LOCK_VIOL = 0x83,
+ CQ_TX_ERROP_DATA_FAULT = 0x84,
+ CQ_TX_ERROP_TSTMP_CONFLICT = 0x85,
+ CQ_TX_ERROP_TSTMP_TIMEOUT = 0x86,
+ CQ_TX_ERROP_MEM_FAULT = 0x87,
+ CQ_TX_ERROP_CK_OVERLAP = 0x88,
+ CQ_TX_ERROP_CK_OFLOW = 0x89,
+ CQ_TX_ERROP_ENUM_LAST = 0x8a,
+};
+
+struct cmp_queue_stats {
+ struct rx_stats {
+ struct {
+ u64 mac_errs;
+ u64 l2_errs;
+ u64 l3_errs;
+ u64 l4_errs;
+ } errlvl;
+ struct {
+ u64 good;
+ u64 partial_pkts;
+ u64 jabber_errs;
+ u64 fcs_errs;
+ u64 terminate_errs;
+ u64 bgx_rx_errs;
+ u64 prel2_errs;
+ u64 l2_frags;
+ u64 l2_overruns;
+ u64 l2_pfcs;
+ u64 l2_puny;
+ u64 l2_hdr_malformed;
+ u64 l2_oversize;
+ u64 l2_undersize;
+ u64 l2_len_mismatch;
+ u64 l2_pclp;
+ u64 non_ip;
+ u64 ip_csum_err;
+ u64 ip_hdr_malformed;
+ u64 ip_payload_malformed;
+ u64 ip_hop_errs;
+ u64 l3_icrc_errs;
+ u64 l3_pclp;
+ u64 l4_malformed;
+ u64 l4_csum_errs;
+ u64 udp_len_err;
+ u64 bad_l4_port;
+ u64 bad_tcp_flag;
+ u64 tcp_offset_errs;
+ u64 l4_pclp;
+ u64 pkt_truncated;
+ } errop;
+ } rx;
+ struct tx_stats {
+ u64 good;
+ u64 desc_fault;
+ u64 hdr_cons_err;
+ u64 subdesc_err;
+ u64 imm_size_oflow;
+ u64 data_seq_err;
+ u64 mem_seq_err;
+ u64 lock_viol;
+ u64 data_fault;
+ u64 tstmp_conflict;
+ u64 tstmp_timeout;
+ u64 mem_fault;
+ u64 csum_overlap;
+ u64 csum_overflow;
+ } tx;
+} ____cacheline_aligned_in_smp;
+
+enum RQ_SQ_STATS {
+ RQ_SQ_STATS_OCTS,
+ RQ_SQ_STATS_PKTS,
+};
+
+struct rx_tx_queue_stats {
+ u64 bytes;
+ u64 pkts;
+} ____cacheline_aligned_in_smp;
+
+struct q_desc_mem {
+ dma_addr_t dma;
+ u64 size;
+ u16 q_len;
+ dma_addr_t phys_base;
+ void *base;
+ void *unalign_base;
+};
+
+struct rbdr {
+ bool enable;
+ u32 dma_size;
+ u32 frag_len;
+ u32 thresh; /* Threshold level for interrupt */
+ void *desc;
+ u32 head;
+ u32 tail;
+ struct q_desc_mem dmem;
+} ____cacheline_aligned_in_smp;
+
+struct rcv_queue {
+ bool enable;
+ struct rbdr *rbdr_start;
+ struct rbdr *rbdr_cont;
+ bool en_tcp_reassembly;
+ u8 cq_qs; /* CQ's QS to which this RQ is assigned */
+ u8 cq_idx; /* CQ index (0 to 7) in the QS */
+ u8 cont_rbdr_qs; /* Continue buffer ptrs - QS num */
+ u8 cont_qs_rbdr_idx; /* RBDR idx in the cont QS */
+ u8 start_rbdr_qs; /* First buffer ptrs - QS num */
+ u8 start_qs_rbdr_idx; /* RBDR idx in the above QS */
+ u8 caching;
+ struct rx_tx_queue_stats stats;
+} ____cacheline_aligned_in_smp;
+
+struct cmp_queue {
+ bool enable;
+ u16 thresh;
+ spinlock_t lock; /* lock to serialize processing CQEs */
+ void *desc;
+ struct q_desc_mem dmem;
+ struct cmp_queue_stats stats;
+} ____cacheline_aligned_in_smp;
+
+struct snd_queue {
+ bool enable;
+ u8 cq_qs; /* CQ's QS to which this SQ is pointing */
+ u8 cq_idx; /* CQ index (0 to 7) in the above QS */
+ u16 thresh;
+ atomic_t free_cnt;
+ u32 head;
+ u32 tail;
+ u64 *skbuff;
+ void *desc;
+
+#define TSO_HEADER_SIZE 128
+ /* For TSO segment's header */
+ char *tso_hdrs;
+ dma_addr_t tso_hdrs_phys;
+
+ cpumask_t affinity_mask;
+ struct q_desc_mem dmem;
+ struct rx_tx_queue_stats stats;
+} ____cacheline_aligned_in_smp;
+
+struct queue_set {
+ bool enable;
+ bool be_en;
+ u8 vnic_id;
+ u8 rq_cnt;
+ u8 cq_cnt;
+ u64 cq_len;
+ u8 sq_cnt;
+ u64 sq_len;
+ u8 rbdr_cnt;
+ u64 rbdr_len;
+ struct rcv_queue rq[MAX_RCV_QUEUES_PER_QS];
+ struct cmp_queue cq[MAX_CMP_QUEUES_PER_QS];
+ struct snd_queue sq[MAX_SND_QUEUES_PER_QS];
+ struct rbdr rbdr[MAX_RCV_BUF_DESC_RINGS_PER_QS];
+} ____cacheline_aligned_in_smp;
+
+#define GET_RBDR_DESC(RING, idx)\
+ (&(((struct rbdr_entry_t *)((RING)->desc))[idx]))
+#define GET_SQ_DESC(RING, idx)\
+ (&(((struct sq_hdr_subdesc *)((RING)->desc))[idx]))
+#define GET_CQ_DESC(RING, idx)\
+ (&(((union cq_desc_t *)((RING)->desc))[idx]))
+
+/* CQ status bits */
+#define CQ_WR_FULL BIT(26)
+#define CQ_WR_DISABLE BIT(25)
+#define CQ_WR_FAULT BIT(24)
+#define CQ_CQE_COUNT (0xFFFF << 0)
+
+#define CQ_ERR_MASK (CQ_WR_FULL | CQ_WR_DISABLE | CQ_WR_FAULT)
+
+int nicvf_set_qset_resources(struct nicvf *nic);
+int nicvf_config_data_transfer(struct nicvf *nic, bool enable);
+void nicvf_qset_config(struct nicvf *nic, bool enable);
+void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
+ int qidx, bool enable);
+
+void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx);
+void nicvf_sq_disable(struct nicvf *nic, int qidx);
+void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt);
+void nicvf_sq_free_used_descs(struct net_device *netdev,
+ struct snd_queue *sq, int qidx);
+int nicvf_sq_append_skb(struct nicvf *nic, struct sk_buff *skb);
+
+struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx);
+void nicvf_rbdr_task(unsigned long data);
+void nicvf_rbdr_work(struct work_struct *work);
+
+void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx);
+void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx);
+void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx);
+int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx);
+
+/* Register access APIs */
+void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val);
+u64 nicvf_reg_read(struct nicvf *nic, u64 offset);
+void nicvf_qset_reg_write(struct nicvf *nic, u64 offset, u64 val);
+u64 nicvf_qset_reg_read(struct nicvf *nic, u64 offset);
+void nicvf_queue_reg_write(struct nicvf *nic, u64 offset,
+ u64 qidx, u64 val);
+u64 nicvf_queue_reg_read(struct nicvf *nic,
+ u64 offset, u64 qidx);
+
+/* Stats */
+void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx);
+void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx);
+int nicvf_check_cqe_rx_errs(struct nicvf *nic,
+ struct cmp_queue *cq, struct cqe_rx_t *cqe_rx);
+int nicvf_check_cqe_tx_errs(struct nicvf *nic,
+ struct cmp_queue *cq, struct cqe_send_t *cqe_tx);
+#endif /* NICVF_QUEUES_H */
diff --git a/drivers/net/ethernet/cavium/thunder/q_struct.h b/drivers/net/ethernet/cavium/thunder/q_struct.h
new file mode 100644
index 0000000..3c1de97
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/q_struct.h
@@ -0,0 +1,701 @@
+/*
+ * This file contains HW queue descriptor formats, config register
+ * structures etc
+ *
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef Q_STRUCT_H
+#define Q_STRUCT_H
+
+/* Load transaction types for reading segment bytes specified by
+ * NIC_SEND_GATHER_S[LD_TYPE].
+ */
+enum nic_send_ld_type_e {
+ NIC_SEND_LD_TYPE_E_LDD = 0x0,
+ NIC_SEND_LD_TYPE_E_LDT = 0x1,
+ NIC_SEND_LD_TYPE_E_LDWB = 0x2,
+ NIC_SEND_LD_TYPE_E_ENUM_LAST = 0x3,
+};
+
+enum ether_type_algorithm {
+ ETYPE_ALG_NONE = 0x0,
+ ETYPE_ALG_SKIP = 0x1,
+ ETYPE_ALG_ENDPARSE = 0x2,
+ ETYPE_ALG_VLAN = 0x3,
+ ETYPE_ALG_VLAN_STRIP = 0x4,
+};
+
+enum layer3_type {
+ L3TYPE_NONE = 0x00,
+ L3TYPE_GRH = 0x01,
+ L3TYPE_IPV4 = 0x04,
+ L3TYPE_IPV4_OPTIONS = 0x05,
+ L3TYPE_IPV6 = 0x06,
+ L3TYPE_IPV6_OPTIONS = 0x07,
+ L3TYPE_ET_STOP = 0x0D,
+ L3TYPE_OTHER = 0x0E,
+};
+
+enum layer4_type {
+ L4TYPE_NONE = 0x00,
+ L4TYPE_IPSEC_ESP = 0x01,
+ L4TYPE_IPFRAG = 0x02,
+ L4TYPE_IPCOMP = 0x03,
+ L4TYPE_TCP = 0x04,
+ L4TYPE_UDP = 0x05,
+ L4TYPE_SCTP = 0x06,
+ L4TYPE_GRE = 0x07,
+ L4TYPE_ROCE_BTH = 0x08,
+ L4TYPE_OTHER = 0x0E,
+};
+
+/* CPI and RSSI configuration */
+enum cpi_algorithm_type {
+ CPI_ALG_NONE = 0x0,
+ CPI_ALG_VLAN = 0x1,
+ CPI_ALG_VLAN16 = 0x2,
+ CPI_ALG_DIFF = 0x3,
+};
+
+enum rss_algorithm_type {
+ RSS_ALG_NONE = 0x00,
+ RSS_ALG_PORT = 0x01,
+ RSS_ALG_IP = 0x02,
+ RSS_ALG_TCP_IP = 0x03,
+ RSS_ALG_UDP_IP = 0x04,
+ RSS_ALG_SCTP_IP = 0x05,
+ RSS_ALG_GRE_IP = 0x06,
+ RSS_ALG_ROCE = 0x07,
+};
+
+enum rss_hash_cfg {
+ RSS_HASH_L2ETC = 0x00,
+ RSS_HASH_IP = 0x01,
+ RSS_HASH_TCP = 0x02,
+ RSS_HASH_TCP_SYN_DIS = 0x03,
+ RSS_HASH_UDP = 0x04,
+ RSS_HASH_L4ETC = 0x05,
+ RSS_HASH_ROCE = 0x06,
+ RSS_L3_BIDI = 0x07,
+ RSS_L4_BIDI = 0x08,
+};
+
+/* Completion queue entry types */
+enum cqe_type {
+ CQE_TYPE_INVALID = 0x0,
+ CQE_TYPE_RX = 0x2,
+ CQE_TYPE_RX_SPLIT = 0x3,
+ CQE_TYPE_RX_TCP = 0x4,
+ CQE_TYPE_SEND = 0x8,
+ CQE_TYPE_SEND_PTP = 0x9,
+};
+
+enum cqe_rx_tcp_status {
+ CQE_RX_STATUS_VALID_TCP_CNXT = 0x00,
+ CQE_RX_STATUS_INVALID_TCP_CNXT = 0x0F,
+};
+
+enum cqe_send_status {
+ CQE_SEND_STATUS_GOOD = 0x00,
+ CQE_SEND_STATUS_DESC_FAULT = 0x01,
+ CQE_SEND_STATUS_HDR_CONS_ERR = 0x11,
+ CQE_SEND_STATUS_SUBDESC_ERR = 0x12,
+ CQE_SEND_STATUS_IMM_SIZE_OFLOW = 0x80,
+ CQE_SEND_STATUS_CRC_SEQ_ERR = 0x81,
+ CQE_SEND_STATUS_DATA_SEQ_ERR = 0x82,
+ CQE_SEND_STATUS_MEM_SEQ_ERR = 0x83,
+ CQE_SEND_STATUS_LOCK_VIOL = 0x84,
+ CQE_SEND_STATUS_LOCK_UFLOW = 0x85,
+ CQE_SEND_STATUS_DATA_FAULT = 0x86,
+ CQE_SEND_STATUS_TSTMP_CONFLICT = 0x87,
+ CQE_SEND_STATUS_TSTMP_TIMEOUT = 0x88,
+ CQE_SEND_STATUS_MEM_FAULT = 0x89,
+ CQE_SEND_STATUS_CSUM_OVERLAP = 0x8A,
+ CQE_SEND_STATUS_CSUM_OVERFLOW = 0x8B,
+};
+
+enum cqe_rx_tcp_end_reason {
+ CQE_RX_TCP_END_FIN_FLAG_DET = 0,
+ CQE_RX_TCP_END_INVALID_FLAG = 1,
+ CQE_RX_TCP_END_TIMEOUT = 2,
+ CQE_RX_TCP_END_OUT_OF_SEQ = 3,
+ CQE_RX_TCP_END_PKT_ERR = 4,
+ CQE_RX_TCP_END_QS_DISABLED = 0x0F,
+};
+
+/* Packet protocol level error enumeration */
+enum cqe_rx_err_level {
+ CQE_RX_ERRLVL_RE = 0x0,
+ CQE_RX_ERRLVL_L2 = 0x1,
+ CQE_RX_ERRLVL_L3 = 0x2,
+ CQE_RX_ERRLVL_L4 = 0x3,
+};
+
+/* Packet protocol level error type enumeration */
+enum cqe_rx_err_opcode {
+ CQE_RX_ERR_RE_NONE = 0x0,
+ CQE_RX_ERR_RE_PARTIAL = 0x1,
+ CQE_RX_ERR_RE_JABBER = 0x2,
+ CQE_RX_ERR_RE_FCS = 0x7,
+ CQE_RX_ERR_RE_TERMINATE = 0x9,
+ CQE_RX_ERR_RE_RX_CTL = 0xb,
+ CQE_RX_ERR_PREL2_ERR = 0x1f,
+ CQE_RX_ERR_L2_FRAGMENT = 0x20,
+ CQE_RX_ERR_L2_OVERRUN = 0x21,
+ CQE_RX_ERR_L2_PFCS = 0x22,
+ CQE_RX_ERR_L2_PUNY = 0x23,
+ CQE_RX_ERR_L2_MAL = 0x24,
+ CQE_RX_ERR_L2_OVERSIZE = 0x25,
+ CQE_RX_ERR_L2_UNDERSIZE = 0x26,
+ CQE_RX_ERR_L2_LENMISM = 0x27,
+ CQE_RX_ERR_L2_PCLP = 0x28,
+ CQE_RX_ERR_IP_NOT = 0x41,
+ CQE_RX_ERR_IP_CHK = 0x42,
+ CQE_RX_ERR_IP_MAL = 0x43,
+ CQE_RX_ERR_IP_MALD = 0x44,
+ CQE_RX_ERR_IP_HOP = 0x45,
+ CQE_RX_ERR_L3_ICRC = 0x46,
+ CQE_RX_ERR_L3_PCLP = 0x47,
+ CQE_RX_ERR_L4_MAL = 0x61,
+ CQE_RX_ERR_L4_CHK = 0x62,
+ CQE_RX_ERR_UDP_LEN = 0x63,
+ CQE_RX_ERR_L4_PORT = 0x64,
+ CQE_RX_ERR_TCP_FLAG = 0x65,
+ CQE_RX_ERR_TCP_OFFSET = 0x66,
+ CQE_RX_ERR_L4_PCLP = 0x67,
+ CQE_RX_ERR_RBDR_TRUNC = 0x70,
+};
+
+struct cqe_rx_t {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 cqe_type:4; /* W0 */
+ u64 stdn_fault:1;
+ u64 rsvd0:1;
+ u64 rq_qs:7;
+ u64 rq_idx:3;
+ u64 rsvd1:12;
+ u64 rss_alg:4;
+ u64 rsvd2:4;
+ u64 rb_cnt:4;
+ u64 vlan_found:1;
+ u64 vlan_stripped:1;
+ u64 vlan2_found:1;
+ u64 vlan2_stripped:1;
+ u64 l4_type:4;
+ u64 l3_type:4;
+ u64 l2_present:1;
+ u64 err_level:3;
+ u64 err_opcode:8;
+
+ u64 pkt_len:16; /* W1 */
+ u64 l2_ptr:8;
+ u64 l3_ptr:8;
+ u64 l4_ptr:8;
+ u64 cq_pkt_len:8;
+ u64 align_pad:3;
+ u64 rsvd3:1;
+ u64 chan:12;
+
+ u64 rss_tag:32; /* W2 */
+ u64 vlan_tci:16;
+ u64 vlan_ptr:8;
+ u64 vlan2_ptr:8;
+
+ u64 rb3_sz:16; /* W3 */
+ u64 rb2_sz:16;
+ u64 rb1_sz:16;
+ u64 rb0_sz:16;
+
+ u64 rb7_sz:16; /* W4 */
+ u64 rb6_sz:16;
+ u64 rb5_sz:16;
+ u64 rb4_sz:16;
+
+ u64 rb11_sz:16; /* W5 */
+ u64 rb10_sz:16;
+ u64 rb9_sz:16;
+ u64 rb8_sz:16;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 err_opcode:8;
+ u64 err_level:3;
+ u64 l2_present:1;
+ u64 l3_type:4;
+ u64 l4_type:4;
+ u64 vlan2_stripped:1;
+ u64 vlan2_found:1;
+ u64 vlan_stripped:1;
+ u64 vlan_found:1;
+ u64 rb_cnt:4;
+ u64 rsvd2:4;
+ u64 rss_alg:4;
+ u64 rsvd1:12;
+ u64 rq_idx:3;
+ u64 rq_qs:7;
+ u64 rsvd0:1;
+ u64 stdn_fault:1;
+ u64 cqe_type:4; /* W0 */
+ u64 chan:12;
+ u64 rsvd3:1;
+ u64 align_pad:3;
+ u64 cq_pkt_len:8;
+ u64 l4_ptr:8;
+ u64 l3_ptr:8;
+ u64 l2_ptr:8;
+ u64 pkt_len:16; /* W1 */
+ u64 vlan2_ptr:8;
+ u64 vlan_ptr:8;
+ u64 vlan_tci:16;
+ u64 rss_tag:32; /* W2 */
+ u64 rb0_sz:16;
+ u64 rb1_sz:16;
+ u64 rb2_sz:16;
+ u64 rb3_sz:16; /* W3 */
+ u64 rb4_sz:16;
+ u64 rb5_sz:16;
+ u64 rb6_sz:16;
+ u64 rb7_sz:16; /* W4 */
+ u64 rb8_sz:16;
+ u64 rb9_sz:16;
+ u64 rb10_sz:16;
+ u64 rb11_sz:16; /* W5 */
+#endif
+ u64 rb0_ptr:64;
+ u64 rb1_ptr:64;
+ u64 rb2_ptr:64;
+ u64 rb3_ptr:64;
+ u64 rb4_ptr:64;
+ u64 rb5_ptr:64;
+ u64 rb6_ptr:64;
+ u64 rb7_ptr:64;
+ u64 rb8_ptr:64;
+ u64 rb9_ptr:64;
+ u64 rb10_ptr:64;
+ u64 rb11_ptr:64;
+};
+
+struct cqe_rx_tcp_err_t {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 cqe_type:4; /* W0 */
+ u64 rsvd0:60;
+
+ u64 rsvd1:4; /* W1 */
+ u64 partial_first:1;
+ u64 rsvd2:27;
+ u64 rbdr_bytes:8;
+ u64 rsvd3:24;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 rsvd0:60;
+ u64 cqe_type:4;
+
+ u64 rsvd3:24;
+ u64 rbdr_bytes:8;
+ u64 rsvd2:27;
+ u64 partial_first:1;
+ u64 rsvd1:4;
+#endif
+};
+
+struct cqe_rx_tcp_t {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 cqe_type:4; /* W0 */
+ u64 rsvd0:52;
+ u64 cq_tcp_status:8;
+
+ u64 rsvd1:32; /* W1 */
+ u64 tcp_cntx_bytes:8;
+ u64 rsvd2:8;
+ u64 tcp_err_bytes:16;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 cq_tcp_status:8;
+ u64 rsvd0:52;
+ u64 cqe_type:4; /* W0 */
+
+ u64 tcp_err_bytes:16;
+ u64 rsvd2:8;
+ u64 tcp_cntx_bytes:8;
+ u64 rsvd1:32; /* W1 */
+#endif
+};
+
+struct cqe_send_t {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 cqe_type:4; /* W0 */
+ u64 rsvd0:4;
+ u64 sqe_ptr:16;
+ u64 rsvd1:4;
+ u64 rsvd2:10;
+ u64 sq_qs:7;
+ u64 sq_idx:3;
+ u64 rsvd3:8;
+ u64 send_status:8;
+
+ u64 ptp_timestamp:64; /* W1 */
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 send_status:8;
+ u64 rsvd3:8;
+ u64 sq_idx:3;
+ u64 sq_qs:7;
+ u64 rsvd2:10;
+ u64 rsvd1:4;
+ u64 sqe_ptr:16;
+ u64 rsvd0:4;
+ u64 cqe_type:4; /* W0 */
+
+ u64 ptp_timestamp:64; /* W1 */
+#endif
+};
+
+union cq_desc_t {
+ u64 u[64];
+ struct cqe_send_t snd_hdr;
+ struct cqe_rx_t rx_hdr;
+ struct cqe_rx_tcp_t rx_tcp_hdr;
+ struct cqe_rx_tcp_err_t rx_tcp_err_hdr;
+};
+
+struct rbdr_entry_t {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 rsvd0:15;
+ u64 buf_addr:42;
+ u64 cache_align:7;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 cache_align:7;
+ u64 buf_addr:42;
+ u64 rsvd0:15;
+#endif
+};
+
+/* TCP reassembly context */
+struct rbe_tcp_cnxt_t {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 tcp_pkt_cnt:12;
+ u64 rsvd1:4;
+ u64 align_hdr_bytes:4;
+ u64 align_ptr_bytes:4;
+ u64 ptr_bytes:16;
+ u64 rsvd2:24;
+ u64 cqe_type:4;
+ u64 rsvd0:54;
+ u64 tcp_end_reason:2;
+ u64 tcp_status:4;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 tcp_status:4;
+ u64 tcp_end_reason:2;
+ u64 rsvd0:54;
+ u64 cqe_type:4;
+ u64 rsvd2:24;
+ u64 ptr_bytes:16;
+ u64 align_ptr_bytes:4;
+ u64 align_hdr_bytes:4;
+ u64 rsvd1:4;
+ u64 tcp_pkt_cnt:12;
+#endif
+};
+
+/* Always Big endian */
+struct rx_hdr_t {
+ u64 opaque:32;
+ u64 rss_flow:8;
+ u64 skip_length:6;
+ u64 disable_rss:1;
+ u64 disable_tcp_reassembly:1;
+ u64 nodrop:1;
+ u64 dest_alg:2;
+ u64 rsvd0:2;
+ u64 dest_rq:11;
+};
+
+enum send_l4_csum_type {
+ SEND_L4_CSUM_DISABLE = 0x00,
+ SEND_L4_CSUM_UDP = 0x01,
+ SEND_L4_CSUM_TCP = 0x02,
+ SEND_L4_CSUM_SCTP = 0x03,
+};
+
+enum send_crc_alg {
+ SEND_CRCALG_CRC32 = 0x00,
+ SEND_CRCALG_CRC32C = 0x01,
+ SEND_CRCALG_ICRC = 0x02,
+};
+
+enum send_load_type {
+ SEND_LD_TYPE_LDD = 0x00,
+ SEND_LD_TYPE_LDT = 0x01,
+ SEND_LD_TYPE_LDWB = 0x02,
+};
+
+enum send_mem_alg_type {
+ SEND_MEMALG_SET = 0x00,
+ SEND_MEMALG_ADD = 0x08,
+ SEND_MEMALG_SUB = 0x09,
+ SEND_MEMALG_ADDLEN = 0x0A,
+ SEND_MEMALG_SUBLEN = 0x0B,
+};
+
+enum send_mem_dsz_type {
+ SEND_MEMDSZ_B64 = 0x00,
+ SEND_MEMDSZ_B32 = 0x01,
+ SEND_MEMDSZ_B8 = 0x03,
+};
+
+enum sq_subdesc_type {
+ SQ_DESC_TYPE_INVALID = 0x00,
+ SQ_DESC_TYPE_HEADER = 0x01,
+ SQ_DESC_TYPE_CRC = 0x02,
+ SQ_DESC_TYPE_IMMEDIATE = 0x03,
+ SQ_DESC_TYPE_GATHER = 0x04,
+ SQ_DESC_TYPE_MEMORY = 0x05,
+};
+
+struct sq_crc_subdesc {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 rsvd1:32;
+ u64 crc_ival:32;
+ u64 subdesc_type:4;
+ u64 crc_alg:2;
+ u64 rsvd0:10;
+ u64 crc_insert_pos:16;
+ u64 hdr_start:16;
+ u64 crc_len:16;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 crc_len:16;
+ u64 hdr_start:16;
+ u64 crc_insert_pos:16;
+ u64 rsvd0:10;
+ u64 crc_alg:2;
+ u64 subdesc_type:4;
+ u64 crc_ival:32;
+ u64 rsvd1:32;
+#endif
+};
+
+struct sq_gather_subdesc {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 subdesc_type:4; /* W0 */
+ u64 ld_type:2;
+ u64 rsvd0:42;
+ u64 size:16;
+
+ u64 rsvd1:15; /* W1 */
+ u64 addr:49;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 size:16;
+ u64 rsvd0:42;
+ u64 ld_type:2;
+ u64 subdesc_type:4; /* W0 */
+
+ u64 addr:49;
+ u64 rsvd1:15; /* W1 */
+#endif
+};
+
+/* SQ immediate subdescriptor */
+struct sq_imm_subdesc {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 subdesc_type:4; /* W0 */
+ u64 rsvd0:46;
+ u64 len:14;
+
+ u64 data:64; /* W1 */
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 len:14;
+ u64 rsvd0:46;
+ u64 subdesc_type:4; /* W0 */
+
+ u64 data:64; /* W1 */
+#endif
+};
+
+struct sq_mem_subdesc {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 subdesc_type:4; /* W0 */
+ u64 mem_alg:4;
+ u64 mem_dsz:2;
+ u64 wmem:1;
+ u64 rsvd0:21;
+ u64 offset:32;
+
+ u64 rsvd1:15; /* W1 */
+ u64 addr:49;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 offset:32;
+ u64 rsvd0:21;
+ u64 wmem:1;
+ u64 mem_dsz:2;
+ u64 mem_alg:4;
+ u64 subdesc_type:4; /* W0 */
+
+ u64 addr:49;
+ u64 rsvd1:15; /* W1 */
+#endif
+};
+
+struct sq_hdr_subdesc {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 subdesc_type:4;
+ u64 tso:1;
+ u64 post_cqe:1; /* Post CQE on no error also */
+ u64 dont_send:1;
+ u64 tstmp:1;
+ u64 subdesc_cnt:8;
+ u64 csum_l4:2;
+ u64 csum_l3:1;
+ u64 rsvd0:5;
+ u64 l4_offset:8;
+ u64 l3_offset:8;
+ u64 rsvd1:4;
+ u64 tot_len:20; /* W0 */
+
+ u64 tso_sdc_cont:8;
+ u64 tso_sdc_first:8;
+ u64 tso_l4_offset:8;
+ u64 tso_flags_last:12;
+ u64 tso_flags_first:12;
+ u64 rsvd2:2;
+ u64 tso_max_paysize:14; /* W1 */
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 tot_len:20;
+ u64 rsvd1:4;
+ u64 l3_offset:8;
+ u64 l4_offset:8;
+ u64 rsvd0:5;
+ u64 csum_l3:1;
+ u64 csum_l4:2;
+ u64 subdesc_cnt:8;
+ u64 tstmp:1;
+ u64 dont_send:1;
+ u64 post_cqe:1; /* Post CQE on no error also */
+ u64 tso:1;
+ u64 subdesc_type:4; /* W0 */
+
+ u64 tso_max_paysize:14;
+ u64 rsvd2:2;
+ u64 tso_flags_first:12;
+ u64 tso_flags_last:12;
+ u64 tso_l4_offset:8;
+ u64 tso_sdc_first:8;
+ u64 tso_sdc_cont:8; /* W1 */
+#endif
+};
+
+/* Queue config register formats */
+struct rq_cfg {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_2_63:62;
+ u64 ena:1;
+ u64 tcp_ena:1;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 tcp_ena:1;
+ u64 ena:1;
+ u64 reserved_2_63:62;
+#endif
+};
+
+struct cq_cfg {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_43_63:21;
+ u64 ena:1;
+ u64 reset:1;
+ u64 caching:1;
+ u64 reserved_35_39:5;
+ u64 qsize:3;
+ u64 reserved_25_31:7;
+ u64 avg_con:9;
+ u64 reserved_0_15:16;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 reserved_0_15:16;
+ u64 avg_con:9;
+ u64 reserved_25_31:7;
+ u64 qsize:3;
+ u64 reserved_35_39:5;
+ u64 caching:1;
+ u64 reset:1;
+ u64 ena:1;
+ u64 reserved_43_63:21;
+#endif
+};
+
+struct sq_cfg {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_20_63:44;
+ u64 ena:1;
+ u64 reserved_18_18:1;
+ u64 reset:1;
+ u64 ldwb:1;
+ u64 reserved_11_15:5;
+ u64 qsize:3;
+ u64 reserved_3_7:5;
+ u64 tstmp_bgx_intf:3;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 tstmp_bgx_intf:3;
+ u64 reserved_3_7:5;
+ u64 qsize:3;
+ u64 reserved_11_15:5;
+ u64 ldwb:1;
+ u64 reset:1;
+ u64 reserved_18_18:1;
+ u64 ena:1;
+ u64 reserved_20_63:44;
+#endif
+};
+
+struct rbdr_cfg {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_45_63:19;
+ u64 ena:1;
+ u64 reset:1;
+ u64 ldwb:1;
+ u64 reserved_36_41:6;
+ u64 qsize:4;
+ u64 reserved_25_31:7;
+ u64 avg_con:9;
+ u64 reserved_12_15:4;
+ u64 lines:12;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 lines:12;
+ u64 reserved_12_15:4;
+ u64 avg_con:9;
+ u64 reserved_25_31:7;
+ u64 qsize:4;
+ u64 reserved_36_41:6;
+ u64 ldwb:1;
+ u64 reset:1;
+ u64 ena: 1;
+ u64 reserved_45_63:19;
+#endif
+};
+
+struct qs_cfg {
+#if defined(__BIG_ENDIAN_BITFIELD)
+ u64 reserved_32_63:32;
+ u64 ena:1;
+ u64 reserved_27_30:4;
+ u64 sq_ins_ena:1;
+ u64 sq_ins_pos:6;
+ u64 lock_ena:1;
+ u64 lock_viol_cqe_ena:1;
+ u64 send_tstmp_ena:1;
+ u64 be:1;
+ u64 reserved_7_15:9;
+ u64 vnic:7;
+#elif defined(__LITTLE_ENDIAN_BITFIELD)
+ u64 vnic:7;
+ u64 reserved_7_15:9;
+ u64 be:1;
+ u64 send_tstmp_ena:1;
+ u64 lock_viol_cqe_ena:1;
+ u64 lock_ena:1;
+ u64 sq_ins_pos:6;
+ u64 sq_ins_ena:1;
+ u64 reserved_27_30:4;
+ u64 ena:1;
+ u64 reserved_32_63:32;
+#endif
+};
+
+#endif /* Q_STRUCT_H */
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
new file mode 100644
index 0000000..633ec05
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -0,0 +1,966 @@
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/phy.h>
+#include <linux/of.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+
+#include "nic_reg.h"
+#include "nic.h"
+#include "thunder_bgx.h"
+
+#define DRV_NAME "thunder-BGX"
+#define DRV_VERSION "1.0"
+
+struct lmac {
+ struct bgx *bgx;
+ int dmac;
+ unsigned char mac[ETH_ALEN];
+ bool link_up;
+ int lmacid; /* ID within BGX */
+ int lmacid_bd; /* ID on board */
+ struct net_device netdev;
+ struct phy_device *phydev;
+ unsigned int last_duplex;
+ unsigned int last_link;
+ unsigned int last_speed;
+ bool is_sgmii;
+ struct delayed_work dwork;
+ struct workqueue_struct *check_link;
+};
+
+struct bgx {
+ u8 bgx_id;
+ u8 qlm_mode;
+ struct lmac lmac[MAX_LMAC_PER_BGX];
+ int lmac_count;
+ int lmac_type;
+ int lane_to_sds;
+ int use_training;
+ void __iomem *reg_base;
+ struct pci_dev *pdev;
+};
+
+static struct bgx *bgx_vnic[MAX_BGX_THUNDER];
+static int lmac_count; /* Total no of LMACs in system */
+
+static int bgx_xaui_check_link(struct lmac *lmac);
+
+/* Supported devices */
+static const struct pci_device_id bgx_id_table[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_BGX) },
+ { 0, } /* end of table */
+};
+
+MODULE_AUTHOR("Cavium Inc");
+MODULE_DESCRIPTION("Cavium Thunder BGX/MAC Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, bgx_id_table);
+
+/* The Cavium ThunderX network controller can *only* be found in SoCs
+ * containing the ThunderX ARM64 CPU implementation. All accesses to the device
+ * registers on this platform are implicitly strongly ordered with respect
+ * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use
+ * with no memory barriers in this driver. The readq()/writeq() functions add
+ * explicit ordering operation which in this case are redundant, and only
+ * add overhead.
+ */
+
+/* Register read/write APIs */
+static u64 bgx_reg_read(struct bgx *bgx, u8 lmac, u64 offset)
+{
+ void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset;
+
+ return readq_relaxed(addr);
+}
+
+static void bgx_reg_write(struct bgx *bgx, u8 lmac, u64 offset, u64 val)
+{
+ void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset;
+
+ writeq_relaxed(val, addr);
+}
+
+static void bgx_reg_modify(struct bgx *bgx, u8 lmac, u64 offset, u64 val)
+{
+ void __iomem *addr = bgx->reg_base + ((u32)lmac << 20) + offset;
+
+ writeq_relaxed(val | readq_relaxed(addr), addr);
+}
+
+static int bgx_poll_reg(struct bgx *bgx, u8 lmac, u64 reg, u64 mask, bool zero)
+{
+ int timeout = 100;
+ u64 reg_val;
+
+ while (timeout) {
+ reg_val = bgx_reg_read(bgx, lmac, reg);
+ if (zero && !(reg_val & mask))
+ return 0;
+ if (!zero && (reg_val & mask))
+ return 0;
+ usleep_range(1000, 2000);
+ timeout--;
+ }
+ return 1;
+}
+
+/* Return number of BGX present in HW */
+unsigned bgx_get_map(int node)
+{
+ int i;
+ unsigned map = 0;
+
+ for (i = 0; i < MAX_BGX_PER_CN88XX; i++) {
+ if (bgx_vnic[(node * MAX_BGX_PER_CN88XX) + i])
+ map |= (1 << i);
+ }
+
+ return map;
+}
+EXPORT_SYMBOL(bgx_get_map);
+
+/* Return number of LMAC configured for this BGX */
+int bgx_get_lmac_count(int node, int bgx_idx)
+{
+ struct bgx *bgx;
+
+ bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+ if (bgx)
+ return bgx->lmac_count;
+
+ return 0;
+}
+EXPORT_SYMBOL(bgx_get_lmac_count);
+
+/* Returns the current link status of LMAC */
+void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status)
+{
+ struct bgx_link_status *link = (struct bgx_link_status *)status;
+ struct bgx *bgx;
+ struct lmac *lmac;
+
+ bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+ if (!bgx)
+ return;
+
+ lmac = &bgx->lmac[lmacid];
+ link->link_up = lmac->link_up;
+ link->duplex = lmac->last_duplex;
+ link->speed = lmac->last_speed;
+}
+EXPORT_SYMBOL(bgx_get_lmac_link_state);
+
+const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid)
+{
+ struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+
+ if (bgx)
+ return bgx->lmac[lmacid].mac;
+
+ return NULL;
+}
+EXPORT_SYMBOL(bgx_get_lmac_mac);
+
+void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac)
+{
+ struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+
+ if (!bgx)
+ return;
+
+ ether_addr_copy(bgx->lmac[lmacid].mac, mac);
+}
+EXPORT_SYMBOL(bgx_set_lmac_mac);
+
+static void bgx_sgmii_change_link_state(struct lmac *lmac)
+{
+ struct bgx *bgx = lmac->bgx;
+ u64 cmr_cfg;
+ u64 port_cfg = 0;
+ u64 misc_ctl = 0;
+
+ cmr_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_CMRX_CFG);
+ cmr_cfg &= ~CMR_EN;
+ bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
+
+ port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
+ misc_ctl = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL);
+
+ if (lmac->link_up) {
+ misc_ctl &= ~PCS_MISC_CTL_GMX_ENO;
+ port_cfg &= ~GMI_PORT_CFG_DUPLEX;
+ port_cfg |= (lmac->last_duplex << 2);
+ } else {
+ misc_ctl |= PCS_MISC_CTL_GMX_ENO;
+ }
+
+ switch (lmac->last_speed) {
+ case 10:
+ port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */
+ port_cfg |= GMI_PORT_CFG_SPEED_MSB; /* speed_msb 1 */
+ port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */
+ misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
+ misc_ctl |= 50; /* samp_pt */
+ bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64);
+ bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0);
+ break;
+ case 100:
+ port_cfg &= ~GMI_PORT_CFG_SPEED; /* speed 0 */
+ port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */
+ port_cfg &= ~GMI_PORT_CFG_SLOT_TIME; /* slottime 0 */
+ misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
+ misc_ctl |= 5; /* samp_pt */
+ bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 64);
+ bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_BURST, 0);
+ break;
+ case 1000:
+ port_cfg |= GMI_PORT_CFG_SPEED; /* speed 1 */
+ port_cfg &= ~GMI_PORT_CFG_SPEED_MSB; /* speed_msb 0 */
+ port_cfg |= GMI_PORT_CFG_SLOT_TIME; /* slottime 1 */
+ misc_ctl &= ~PCS_MISC_CTL_SAMP_PT_MASK;
+ misc_ctl |= 1; /* samp_pt */
+ bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_TXX_SLOT, 512);
+ if (lmac->last_duplex)
+ bgx_reg_write(bgx, lmac->lmacid,
+ BGX_GMP_GMI_TXX_BURST, 0);
+ else
+ bgx_reg_write(bgx, lmac->lmacid,
+ BGX_GMP_GMI_TXX_BURST, 8192);
+ break;
+ default:
+ break;
+ }
+ bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_PCS_MISCX_CTL, misc_ctl);
+ bgx_reg_write(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG, port_cfg);
+
+ port_cfg = bgx_reg_read(bgx, lmac->lmacid, BGX_GMP_GMI_PRTX_CFG);
+
+ /* renable lmac */
+ cmr_cfg |= CMR_EN;
+ bgx_reg_write(bgx, lmac->lmacid, BGX_CMRX_CFG, cmr_cfg);
+}
+
+static void bgx_lmac_handler(struct net_device *netdev)
+{
+ struct lmac *lmac = container_of(netdev, struct lmac, netdev);
+ struct phy_device *phydev = lmac->phydev;
+ int link_changed = 0;
+
+ if (!lmac)
+ return;
+
+ if (!phydev->link && lmac->last_link)
+ link_changed = -1;
+
+ if (phydev->link &&
+ (lmac->last_duplex != phydev->duplex ||
+ lmac->last_link != phydev->link ||
+ lmac->last_speed != phydev->speed)) {
+ link_changed = 1;
+ }
+
+ lmac->last_link = phydev->link;
+ lmac->last_speed = phydev->speed;
+ lmac->last_duplex = phydev->duplex;
+
+ if (!link_changed)
+ return;
+
+ if (link_changed > 0)
+ lmac->link_up = true;
+ else
+ lmac->link_up = false;
+
+ if (lmac->is_sgmii)
+ bgx_sgmii_change_link_state(lmac);
+ else
+ bgx_xaui_check_link(lmac);
+}
+
+u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx)
+{
+ struct bgx *bgx;
+
+ bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+ if (!bgx)
+ return 0;
+
+ if (idx > 8)
+ lmac = 0;
+ return bgx_reg_read(bgx, lmac, BGX_CMRX_RX_STAT0 + (idx * 8));
+}
+EXPORT_SYMBOL(bgx_get_rx_stats);
+
+u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx)
+{
+ struct bgx *bgx;
+
+ bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+ if (!bgx)
+ return 0;
+
+ return bgx_reg_read(bgx, lmac, BGX_CMRX_TX_STAT0 + (idx * 8));
+}
+EXPORT_SYMBOL(bgx_get_tx_stats);
+
+static void bgx_flush_dmac_addrs(struct bgx *bgx, int lmac)
+{
+ u64 offset;
+
+ while (bgx->lmac[lmac].dmac > 0) {
+ offset = ((bgx->lmac[lmac].dmac - 1) * sizeof(u64)) +
+ (lmac * MAX_DMAC_PER_LMAC * sizeof(u64));
+ bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + offset, 0);
+ bgx->lmac[lmac].dmac--;
+ }
+}
+
+static int bgx_lmac_sgmii_init(struct bgx *bgx, int lmacid)
+{
+ u64 cfg;
+
+ bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_THRESH, 0x30);
+ /* max packet size */
+ bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_RXX_JABBER, MAX_FRAME_SIZE);
+
+ /* Disable frame alignment if using preamble */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
+ if (cfg & 1)
+ bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_SGMII_CTL, 0);
+
+ /* Enable lmac */
+ bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
+
+ /* PCS reset */
+ bgx_reg_modify(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, PCS_MRX_CTL_RESET);
+ if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_CTL,
+ PCS_MRX_CTL_RESET, true)) {
+ dev_err(&bgx->pdev->dev, "BGX PCS reset not completed\n");
+ return -1;
+ }
+
+ /* power down, reset autoneg, autoneg enable */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_PCS_MRX_CTL);
+ cfg &= ~PCS_MRX_CTL_PWR_DN;
+ cfg |= (PCS_MRX_CTL_RST_AN | PCS_MRX_CTL_AN_EN);
+ bgx_reg_write(bgx, lmacid, BGX_GMP_PCS_MRX_CTL, cfg);
+
+ if (bgx_poll_reg(bgx, lmacid, BGX_GMP_PCS_MRX_STATUS,
+ PCS_MRX_STATUS_AN_CPT, false)) {
+ dev_err(&bgx->pdev->dev, "BGX AN_CPT not completed\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+static int bgx_lmac_xaui_init(struct bgx *bgx, int lmacid, int lmac_type)
+{
+ u64 cfg;
+
+ /* Reset SPU */
+ bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET);
+ if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) {
+ dev_err(&bgx->pdev->dev, "BGX SPU reset not completed\n");
+ return -1;
+ }
+
+ /* Disable LMAC */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+ cfg &= ~CMR_EN;
+ bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
+
+ bgx_reg_modify(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_LOW_POWER);
+ /* Set interleaved running disparity for RXAUI */
+ if (bgx->lmac_type != BGX_MODE_RXAUI)
+ bgx_reg_modify(bgx, lmacid,
+ BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS);
+ else
+ bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL,
+ SPU_MISC_CTL_RX_DIS | SPU_MISC_CTL_INTLV_RDISP);
+
+ /* clear all interrupts */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_RX_INT);
+ bgx_reg_write(bgx, lmacid, BGX_SMUX_RX_INT, cfg);
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_INT);
+ bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_INT, cfg);
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
+
+ if (bgx->use_training) {
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LP_CUP, 0x00);
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_CUP, 0x00);
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_LD_REP, 0x00);
+ /* training enable */
+ bgx_reg_modify(bgx, lmacid,
+ BGX_SPUX_BR_PMD_CRTL, SPU_PMD_CRTL_TRAIN_EN);
+ }
+
+ /* Append FCS to each packet */
+ bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, SMU_TX_APPEND_FCS_D);
+
+ /* Disable forward error correction */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_FEC_CONTROL);
+ cfg &= ~SPU_FEC_CTL_FEC_EN;
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_FEC_CONTROL, cfg);
+
+ /* Disable autoneg */
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_CONTROL);
+ cfg = cfg & ~(SPU_AN_CTL_AN_EN | SPU_AN_CTL_XNP_EN);
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_CONTROL, cfg);
+
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_AN_ADV);
+ if (bgx->lmac_type == BGX_MODE_10G_KR)
+ cfg |= (1 << 23);
+ else if (bgx->lmac_type == BGX_MODE_40G_KR)
+ cfg |= (1 << 24);
+ else
+ cfg &= ~((1 << 23) | (1 << 24));
+ cfg = cfg & (~((1ULL << 25) | (1ULL << 22) | (1ULL << 12)));
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_AN_ADV, cfg);
+
+ cfg = bgx_reg_read(bgx, 0, BGX_SPU_DBG_CONTROL);
+ cfg &= ~SPU_DBG_CTL_AN_ARB_LINK_CHK_EN;
+ bgx_reg_write(bgx, 0, BGX_SPU_DBG_CONTROL, cfg);
+
+ /* Enable lmac */
+ bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG, CMR_EN);
+
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_CONTROL1);
+ cfg &= ~SPU_CTL_LOW_POWER;
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_CONTROL1, cfg);
+
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_CTL);
+ cfg &= ~SMU_TX_CTL_UNI_EN;
+ cfg |= SMU_TX_CTL_DIC_EN;
+ bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_CTL, cfg);
+
+ /* take lmac_count into account */
+ bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_THRESH, (0x100 - 1));
+ /* max packet size */
+ bgx_reg_modify(bgx, lmacid, BGX_SMUX_RX_JABBER, MAX_FRAME_SIZE);
+
+ return 0;
+}
+
+static int bgx_xaui_check_link(struct lmac *lmac)
+{
+ struct bgx *bgx = lmac->bgx;
+ int lmacid = lmac->lmacid;
+ int lmac_type = bgx->lmac_type;
+ u64 cfg;
+
+ bgx_reg_modify(bgx, lmacid, BGX_SPUX_MISC_CONTROL, SPU_MISC_CTL_RX_DIS);
+ if (bgx->use_training) {
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
+ if (!(cfg & (1ull << 13))) {
+ cfg = (1ull << 13) | (1ull << 14);
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL);
+ cfg |= (1ull << 0);
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_BR_PMD_CRTL, cfg);
+ return -1;
+ }
+ }
+
+ /* wait for PCS to come out of reset */
+ if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_CONTROL1, SPU_CTL_RESET, true)) {
+ dev_err(&bgx->pdev->dev, "BGX SPU reset not completed\n");
+ return -1;
+ }
+
+ if ((lmac_type == BGX_MODE_10G_KR) || (lmac_type == BGX_MODE_XFI) ||
+ (lmac_type == BGX_MODE_40G_KR) || (lmac_type == BGX_MODE_XLAUI)) {
+ if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BR_STATUS1,
+ SPU_BR_STATUS_BLK_LOCK, false)) {
+ dev_err(&bgx->pdev->dev,
+ "SPU_BR_STATUS_BLK_LOCK not completed\n");
+ return -1;
+ }
+ } else {
+ if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_BX_STATUS,
+ SPU_BX_STATUS_RX_ALIGN, false)) {
+ dev_err(&bgx->pdev->dev,
+ "SPU_BX_STATUS_RX_ALIGN not completed\n");
+ return -1;
+ }
+ }
+
+ /* Clear rcvflt bit (latching high) and read it back */
+ bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS2, SPU_STATUS2_RCVFLT);
+ if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
+ dev_err(&bgx->pdev->dev, "Receive fault, retry training\n");
+ if (bgx->use_training) {
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_INT);
+ if (!(cfg & (1ull << 13))) {
+ cfg = (1ull << 13) | (1ull << 14);
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_INT, cfg);
+ cfg = bgx_reg_read(bgx, lmacid,
+ BGX_SPUX_BR_PMD_CRTL);
+ cfg |= (1ull << 0);
+ bgx_reg_write(bgx, lmacid,
+ BGX_SPUX_BR_PMD_CRTL, cfg);
+ return -1;
+ }
+ }
+ return -1;
+ }
+
+ /* Wait for MAC RX to be ready */
+ if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_RX_CTL,
+ SMU_RX_CTL_STATUS, true)) {
+ dev_err(&bgx->pdev->dev, "SMU RX link not okay\n");
+ return -1;
+ }
+
+ /* Wait for BGX RX to be idle */
+ if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_RX_IDLE, false)) {
+ dev_err(&bgx->pdev->dev, "SMU RX not idle\n");
+ return -1;
+ }
+
+ /* Wait for BGX TX to be idle */
+ if (bgx_poll_reg(bgx, lmacid, BGX_SMUX_CTL, SMU_CTL_TX_IDLE, false)) {
+ dev_err(&bgx->pdev->dev, "SMU TX not idle\n");
+ return -1;
+ }
+
+ if (bgx_reg_read(bgx, lmacid, BGX_SPUX_STATUS2) & SPU_STATUS2_RCVFLT) {
+ dev_err(&bgx->pdev->dev, "Receive fault\n");
+ return -1;
+ }
+
+ /* Receive link is latching low. Force it high and verify it */
+ bgx_reg_modify(bgx, lmacid, BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
+ if (bgx_poll_reg(bgx, lmacid, BGX_SPUX_STATUS1,
+ SPU_STATUS1_RCV_LNK, false)) {
+ dev_err(&bgx->pdev->dev, "SPU receive link down\n");
+ return -1;
+ }
+
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SPUX_MISC_CONTROL);
+ cfg &= ~SPU_MISC_CTL_RX_DIS;
+ bgx_reg_write(bgx, lmacid, BGX_SPUX_MISC_CONTROL, cfg);
+ return 0;
+}
+
+static void bgx_poll_for_link(struct work_struct *work)
+{
+ struct lmac *lmac;
+ u64 link;
+
+ lmac = container_of(work, struct lmac, dwork.work);
+
+ /* Receive link is latching low. Force it high and verify it */
+ bgx_reg_modify(lmac->bgx, lmac->lmacid,
+ BGX_SPUX_STATUS1, SPU_STATUS1_RCV_LNK);
+ bgx_poll_reg(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1,
+ SPU_STATUS1_RCV_LNK, false);
+
+ link = bgx_reg_read(lmac->bgx, lmac->lmacid, BGX_SPUX_STATUS1);
+ if (link & SPU_STATUS1_RCV_LNK) {
+ lmac->link_up = 1;
+ if (lmac->bgx->lmac_type == BGX_MODE_XLAUI)
+ lmac->last_speed = 40000;
+ else
+ lmac->last_speed = 10000;
+ lmac->last_duplex = 1;
+ } else {
+ lmac->link_up = 0;
+ }
+
+ if (lmac->last_link != lmac->link_up) {
+ lmac->last_link = lmac->link_up;
+ if (lmac->link_up)
+ bgx_xaui_check_link(lmac);
+ }
+
+ queue_delayed_work(lmac->check_link, &lmac->dwork, HZ * 2);
+}
+
+static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid)
+{
+ struct lmac *lmac;
+ u64 cfg;
+
+ lmac = &bgx->lmac[lmacid];
+ lmac->bgx = bgx;
+
+ if (bgx->lmac_type == BGX_MODE_SGMII) {
+ lmac->is_sgmii = 1;
+ if (bgx_lmac_sgmii_init(bgx, lmacid))
+ return -1;
+ } else {
+ lmac->is_sgmii = 0;
+ if (bgx_lmac_xaui_init(bgx, lmacid, bgx->lmac_type))
+ return -1;
+ }
+
+ if (lmac->is_sgmii) {
+ cfg = bgx_reg_read(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND);
+ cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */
+ bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_APPEND, cfg);
+ bgx_reg_write(bgx, lmacid, BGX_GMP_GMI_TXX_MIN_PKT, 60 - 1);
+ } else {
+ cfg = bgx_reg_read(bgx, lmacid, BGX_SMUX_TX_APPEND);
+ cfg |= ((1ull << 2) | (1ull << 1)); /* FCS and PAD */
+ bgx_reg_modify(bgx, lmacid, BGX_SMUX_TX_APPEND, cfg);
+ bgx_reg_write(bgx, lmacid, BGX_SMUX_TX_MIN_PKT, 60 + 4);
+ }
+
+ /* Enable lmac */
+ bgx_reg_modify(bgx, lmacid, BGX_CMRX_CFG,
+ CMR_EN | CMR_PKT_RX_EN | CMR_PKT_TX_EN);
+
+ /* Restore default cfg, incase low level firmware changed it */
+ bgx_reg_write(bgx, lmacid, BGX_CMRX_RX_DMAC_CTL, 0x03);
+
+ if ((bgx->lmac_type != BGX_MODE_XFI) &&
+ (bgx->lmac_type != BGX_MODE_XLAUI) &&
+ (bgx->lmac_type != BGX_MODE_40G_KR) &&
+ (bgx->lmac_type != BGX_MODE_10G_KR)) {
+ if (!lmac->phydev)
+ return -ENODEV;
+
+ lmac->phydev->dev_flags = 0;
+
+ if (phy_connect_direct(&lmac->netdev, lmac->phydev,
+ bgx_lmac_handler,
+ PHY_INTERFACE_MODE_SGMII))
+ return -ENODEV;
+
+ phy_start_aneg(lmac->phydev);
+ } else {
+ lmac->check_link = alloc_workqueue("check_link", WQ_UNBOUND |
+ WQ_MEM_RECLAIM, 1);
+ if (!lmac->check_link)
+ return -ENOMEM;
+ INIT_DELAYED_WORK(&lmac->dwork, bgx_poll_for_link);
+ queue_delayed_work(lmac->check_link, &lmac->dwork, 0);
+ }
+
+ return 0;
+}
+
+static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
+{
+ struct lmac *lmac;
+ u64 cmrx_cfg;
+
+ lmac = &bgx->lmac[lmacid];
+ if (lmac->check_link) {
+ /* Destroy work queue */
+ cancel_delayed_work(&lmac->dwork);
+ flush_workqueue(lmac->check_link);
+ destroy_workqueue(lmac->check_link);
+ }
+
+ cmrx_cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
+ cmrx_cfg &= ~(1 << 15);
+ bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg);
+ bgx_flush_dmac_addrs(bgx, lmacid);
+
+ if (lmac->phydev)
+ phy_disconnect(lmac->phydev);
+
+ lmac->phydev = NULL;
+}
+
+static void bgx_set_num_ports(struct bgx *bgx)
+{
+ u64 lmac_count;
+
+ switch (bgx->qlm_mode) {
+ case QLM_MODE_SGMII:
+ bgx->lmac_count = 4;
+ bgx->lmac_type = BGX_MODE_SGMII;
+ bgx->lane_to_sds = 0;
+ break;
+ case QLM_MODE_XAUI_1X4:
+ bgx->lmac_count = 1;
+ bgx->lmac_type = BGX_MODE_XAUI;
+ bgx->lane_to_sds = 0xE4;
+ break;
+ case QLM_MODE_RXAUI_2X2:
+ bgx->lmac_count = 2;
+ bgx->lmac_type = BGX_MODE_RXAUI;
+ bgx->lane_to_sds = 0xE4;
+ break;
+ case QLM_MODE_XFI_4X1:
+ bgx->lmac_count = 4;
+ bgx->lmac_type = BGX_MODE_XFI;
+ bgx->lane_to_sds = 0;
+ break;
+ case QLM_MODE_XLAUI_1X4:
+ bgx->lmac_count = 1;
+ bgx->lmac_type = BGX_MODE_XLAUI;
+ bgx->lane_to_sds = 0xE4;
+ break;
+ case QLM_MODE_10G_KR_4X1:
+ bgx->lmac_count = 4;
+ bgx->lmac_type = BGX_MODE_10G_KR;
+ bgx->lane_to_sds = 0;
+ bgx->use_training = 1;
+ break;
+ case QLM_MODE_40G_KR4_1X4:
+ bgx->lmac_count = 1;
+ bgx->lmac_type = BGX_MODE_40G_KR;
+ bgx->lane_to_sds = 0xE4;
+ bgx->use_training = 1;
+ break;
+ default:
+ bgx->lmac_count = 0;
+ break;
+ }
+
+ /* Check if low level firmware has programmed LMAC count
+ * based on board type, if yes consider that otherwise
+ * the default static values
+ */
+ lmac_count = bgx_reg_read(bgx, 0, BGX_CMR_RX_LMACS) & 0x7;
+ if (lmac_count != 4)
+ bgx->lmac_count = lmac_count;
+}
+
+static void bgx_init_hw(struct bgx *bgx)
+{
+ int i;
+
+ bgx_set_num_ports(bgx);
+
+ bgx_reg_modify(bgx, 0, BGX_CMR_GLOBAL_CFG, CMR_GLOBAL_CFG_FCS_STRIP);
+ if (bgx_reg_read(bgx, 0, BGX_CMR_BIST_STATUS))
+ dev_err(&bgx->pdev->dev, "BGX%d BIST failed\n", bgx->bgx_id);
+
+ /* Set lmac type and lane2serdes mapping */
+ for (i = 0; i < bgx->lmac_count; i++) {
+ if (bgx->lmac_type == BGX_MODE_RXAUI) {
+ if (i)
+ bgx->lane_to_sds = 0x0e;
+ else
+ bgx->lane_to_sds = 0x04;
+ bgx_reg_write(bgx, i, BGX_CMRX_CFG,
+ (bgx->lmac_type << 8) | bgx->lane_to_sds);
+ continue;
+ }
+ bgx_reg_write(bgx, i, BGX_CMRX_CFG,
+ (bgx->lmac_type << 8) | (bgx->lane_to_sds + i));
+ bgx->lmac[i].lmacid_bd = lmac_count;
+ lmac_count++;
+ }
+
+ bgx_reg_write(bgx, 0, BGX_CMR_TX_LMACS, bgx->lmac_count);
+ bgx_reg_write(bgx, 0, BGX_CMR_RX_LMACS, bgx->lmac_count);
+
+ /* Set the backpressure AND mask */
+ for (i = 0; i < bgx->lmac_count; i++)
+ bgx_reg_modify(bgx, 0, BGX_CMR_CHAN_MSK_AND,
+ ((1ULL << MAX_BGX_CHANS_PER_LMAC) - 1) <<
+ (i * MAX_BGX_CHANS_PER_LMAC));
+
+ /* Disable all MAC filtering */
+ for (i = 0; i < RX_DMAC_COUNT; i++)
+ bgx_reg_write(bgx, 0, BGX_CMR_RX_DMACX_CAM + (i * 8), 0x00);
+
+ /* Disable MAC steering (NCSI traffic) */
+ for (i = 0; i < RX_TRAFFIC_STEER_RULE_COUNT; i++)
+ bgx_reg_write(bgx, 0, BGX_CMR_RX_STREERING + (i * 8), 0x00);
+}
+
+static void bgx_get_qlm_mode(struct bgx *bgx)
+{
+ struct device *dev = &bgx->pdev->dev;
+ int lmac_type;
+ int train_en;
+
+ /* Read LMAC0 type to figure out QLM mode
+ * This is configured by low level firmware
+ */
+ lmac_type = bgx_reg_read(bgx, 0, BGX_CMRX_CFG);
+ lmac_type = (lmac_type >> 8) & 0x07;
+
+ train_en = bgx_reg_read(bgx, 0, BGX_SPUX_BR_PMD_CRTL) &
+ SPU_PMD_CRTL_TRAIN_EN;
+
+ switch (lmac_type) {
+ case BGX_MODE_SGMII:
+ bgx->qlm_mode = QLM_MODE_SGMII;
+ dev_info(dev, "BGX%d QLM mode: SGMII\n", bgx->bgx_id);
+ break;
+ case BGX_MODE_XAUI:
+ bgx->qlm_mode = QLM_MODE_XAUI_1X4;
+ dev_info(dev, "BGX%d QLM mode: XAUI\n", bgx->bgx_id);
+ break;
+ case BGX_MODE_RXAUI:
+ bgx->qlm_mode = QLM_MODE_RXAUI_2X2;
+ dev_info(dev, "BGX%d QLM mode: RXAUI\n", bgx->bgx_id);
+ break;
+ case BGX_MODE_XFI:
+ if (!train_en) {
+ bgx->qlm_mode = QLM_MODE_XFI_4X1;
+ dev_info(dev, "BGX%d QLM mode: XFI\n", bgx->bgx_id);
+ } else {
+ bgx->qlm_mode = QLM_MODE_10G_KR_4X1;
+ dev_info(dev, "BGX%d QLM mode: 10G_KR\n", bgx->bgx_id);
+ }
+ break;
+ case BGX_MODE_XLAUI:
+ if (!train_en) {
+ bgx->qlm_mode = QLM_MODE_XLAUI_1X4;
+ dev_info(dev, "BGX%d QLM mode: XLAUI\n", bgx->bgx_id);
+ } else {
+ bgx->qlm_mode = QLM_MODE_40G_KR4_1X4;
+ dev_info(dev, "BGX%d QLM mode: 40G_KR4\n", bgx->bgx_id);
+ }
+ break;
+ default:
+ bgx->qlm_mode = QLM_MODE_SGMII;
+ dev_info(dev, "BGX%d QLM default mode: SGMII\n", bgx->bgx_id);
+ }
+}
+
+static void bgx_init_of(struct bgx *bgx, struct device_node *np)
+{
+ struct device_node *np_child;
+ u8 lmac = 0;
+
+ for_each_child_of_node(np, np_child) {
+ struct device_node *phy_np;
+ const char *mac;
+
+ phy_np = of_parse_phandle(np_child, "phy-handle", 0);
+ if (phy_np)
+ bgx->lmac[lmac].phydev = of_phy_find_device(phy_np);
+
+ mac = of_get_mac_address(np_child);
+ if (mac)
+ ether_addr_copy(bgx->lmac[lmac].mac, mac);
+
+ SET_NETDEV_DEV(&bgx->lmac[lmac].netdev, &bgx->pdev->dev);
+ bgx->lmac[lmac].lmacid = lmac;
+ lmac++;
+ if (lmac == MAX_LMAC_PER_BGX)
+ break;
+ }
+}
+
+static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ int err;
+ struct device *dev = &pdev->dev;
+ struct bgx *bgx = NULL;
+ struct device_node *np;
+ char bgx_sel[5];
+ u8 lmac;
+
+ bgx = devm_kzalloc(dev, sizeof(*bgx), GFP_KERNEL);
+ if (!bgx)
+ return -ENOMEM;
+ bgx->pdev = pdev;
+
+ pci_set_drvdata(pdev, bgx);
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(dev, "Failed to enable PCI device\n");
+ pci_set_drvdata(pdev, NULL);
+ return err;
+ }
+
+ err = pci_request_regions(pdev, DRV_NAME);
+ if (err) {
+ dev_err(dev, "PCI request regions failed 0x%x\n", err);
+ goto err_disable_device;
+ }
+
+ /* MAP configuration registers */
+ bgx->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
+ if (!bgx->reg_base) {
+ dev_err(dev, "BGX: Cannot map CSR memory space, aborting\n");
+ err = -ENOMEM;
+ goto err_release_regions;
+ }
+ bgx->bgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24) & 1;
+ bgx->bgx_id += nic_get_node_id(pdev) * MAX_BGX_PER_CN88XX;
+
+ bgx_vnic[bgx->bgx_id] = bgx;
+ bgx_get_qlm_mode(bgx);
+
+ snprintf(bgx_sel, 5, "bgx%d", bgx->bgx_id);
+ np = of_find_node_by_name(NULL, bgx_sel);
+ if (np)
+ bgx_init_of(bgx, np);
+
+ bgx_init_hw(bgx);
+
+ /* Enable all LMACs */
+ for (lmac = 0; lmac < bgx->lmac_count; lmac++) {
+ err = bgx_lmac_enable(bgx, lmac);
+ if (err) {
+ dev_err(dev, "BGX%d failed to enable lmac%d\n",
+ bgx->bgx_id, lmac);
+ goto err_enable;
+ }
+ }
+
+ return 0;
+
+err_enable:
+ bgx_vnic[bgx->bgx_id] = NULL;
+err_release_regions:
+ pci_release_regions(pdev);
+err_disable_device:
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+ return err;
+}
+
+static void bgx_remove(struct pci_dev *pdev)
+{
+ struct bgx *bgx = pci_get_drvdata(pdev);
+ u8 lmac;
+
+ /* Disable all LMACs */
+ for (lmac = 0; lmac < bgx->lmac_count; lmac++)
+ bgx_lmac_disable(bgx, lmac);
+
+ bgx_vnic[bgx->bgx_id] = NULL;
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+ pci_set_drvdata(pdev, NULL);
+}
+
+static struct pci_driver bgx_driver = {
+ .name = DRV_NAME,
+ .id_table = bgx_id_table,
+ .probe = bgx_probe,
+ .remove = bgx_remove,
+};
+
+static int __init bgx_init_module(void)
+{
+ pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
+
+ return pci_register_driver(&bgx_driver);
+}
+
+static void __exit bgx_cleanup_module(void)
+{
+ pci_unregister_driver(&bgx_driver);
+}
+
+module_init(bgx_init_module);
+module_exit(bgx_cleanup_module);
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
new file mode 100644
index 0000000..ba4f53b
--- /dev/null
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -0,0 +1,220 @@
+/*
+ * Copyright (C) 2015 Cavium, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of version 2 of the GNU General Public License
+ * as published by the Free Software Foundation.
+ */
+
+#ifndef THUNDER_BGX_H
+#define THUNDER_BGX_H
+
+#define MAX_BGX_THUNDER 8 /* Max 4 nodes, 2 per node */
+#define MAX_BGX_PER_CN88XX 2
+#define MAX_LMAC_PER_BGX 4
+#define MAX_BGX_CHANS_PER_LMAC 16
+#define MAX_DMAC_PER_LMAC 8
+#define MAX_FRAME_SIZE 9216
+
+#define MAX_DMAC_PER_LMAC_TNS_BYPASS_MODE 2
+
+#define MAX_LMAC (MAX_BGX_PER_CN88XX * MAX_LMAC_PER_BGX)
+
+/* Registers */
+#define BGX_CMRX_CFG 0x00
+#define CMR_PKT_TX_EN BIT_ULL(13)
+#define CMR_PKT_RX_EN BIT_ULL(14)
+#define CMR_EN BIT_ULL(15)
+#define BGX_CMR_GLOBAL_CFG 0x08
+#define CMR_GLOBAL_CFG_FCS_STRIP BIT_ULL(6)
+#define BGX_CMRX_RX_ID_MAP 0x60
+#define BGX_CMRX_RX_STAT0 0x70
+#define BGX_CMRX_RX_STAT1 0x78
+#define BGX_CMRX_RX_STAT2 0x80
+#define BGX_CMRX_RX_STAT3 0x88
+#define BGX_CMRX_RX_STAT4 0x90
+#define BGX_CMRX_RX_STAT5 0x98
+#define BGX_CMRX_RX_STAT6 0xA0
+#define BGX_CMRX_RX_STAT7 0xA8
+#define BGX_CMRX_RX_STAT8 0xB0
+#define BGX_CMRX_RX_STAT9 0xB8
+#define BGX_CMRX_RX_STAT10 0xC0
+#define BGX_CMRX_RX_BP_DROP 0xC8
+#define BGX_CMRX_RX_DMAC_CTL 0x0E8
+#define BGX_CMR_RX_DMACX_CAM 0x200
+#define RX_DMACX_CAM_EN BIT_ULL(48)
+#define RX_DMACX_CAM_LMACID(x) (x << 49)
+#define RX_DMAC_COUNT 32
+#define BGX_CMR_RX_STREERING 0x300
+#define RX_TRAFFIC_STEER_RULE_COUNT 8
+#define BGX_CMR_CHAN_MSK_AND 0x450
+#define BGX_CMR_BIST_STATUS 0x460
+#define BGX_CMR_RX_LMACS 0x468
+#define BGX_CMRX_TX_STAT0 0x600
+#define BGX_CMRX_TX_STAT1 0x608
+#define BGX_CMRX_TX_STAT2 0x610
+#define BGX_CMRX_TX_STAT3 0x618
+#define BGX_CMRX_TX_STAT4 0x620
+#define BGX_CMRX_TX_STAT5 0x628
+#define BGX_CMRX_TX_STAT6 0x630
+#define BGX_CMRX_TX_STAT7 0x638
+#define BGX_CMRX_TX_STAT8 0x640
+#define BGX_CMRX_TX_STAT9 0x648
+#define BGX_CMRX_TX_STAT10 0x650
+#define BGX_CMRX_TX_STAT11 0x658
+#define BGX_CMRX_TX_STAT12 0x660
+#define BGX_CMRX_TX_STAT13 0x668
+#define BGX_CMRX_TX_STAT14 0x670
+#define BGX_CMRX_TX_STAT15 0x678
+#define BGX_CMRX_TX_STAT16 0x680
+#define BGX_CMRX_TX_STAT17 0x688
+#define BGX_CMR_TX_LMACS 0x1000
+
+#define BGX_SPUX_CONTROL1 0x10000
+#define SPU_CTL_LOW_POWER BIT_ULL(11)
+#define SPU_CTL_RESET BIT_ULL(15)
+#define BGX_SPUX_STATUS1 0x10008
+#define SPU_STATUS1_RCV_LNK BIT_ULL(2)
+#define BGX_SPUX_STATUS2 0x10020
+#define SPU_STATUS2_RCVFLT BIT_ULL(10)
+#define BGX_SPUX_BX_STATUS 0x10028
+#define SPU_BX_STATUS_RX_ALIGN BIT_ULL(12)
+#define BGX_SPUX_BR_STATUS1 0x10030
+#define SPU_BR_STATUS_BLK_LOCK BIT_ULL(0)
+#define SPU_BR_STATUS_RCV_LNK BIT_ULL(12)
+#define BGX_SPUX_BR_PMD_CRTL 0x10068
+#define SPU_PMD_CRTL_TRAIN_EN BIT_ULL(1)
+#define BGX_SPUX_BR_PMD_LP_CUP 0x10078
+#define BGX_SPUX_BR_PMD_LD_CUP 0x10088
+#define BGX_SPUX_BR_PMD_LD_REP 0x10090
+#define BGX_SPUX_FEC_CONTROL 0x100A0
+#define SPU_FEC_CTL_FEC_EN BIT_ULL(0)
+#define SPU_FEC_CTL_ERR_EN BIT_ULL(1)
+#define BGX_SPUX_AN_CONTROL 0x100C8
+#define SPU_AN_CTL_AN_EN BIT_ULL(12)
+#define SPU_AN_CTL_XNP_EN BIT_ULL(13)
+#define BGX_SPUX_AN_ADV 0x100D8
+#define BGX_SPUX_MISC_CONTROL 0x10218
+#define SPU_MISC_CTL_INTLV_RDISP BIT_ULL(10)
+#define SPU_MISC_CTL_RX_DIS BIT_ULL(12)
+#define BGX_SPUX_INT 0x10220 /* +(0..3) << 20 */
+#define BGX_SPUX_INT_W1S 0x10228
+#define BGX_SPUX_INT_ENA_W1C 0x10230
+#define BGX_SPUX_INT_ENA_W1S 0x10238
+#define BGX_SPU_DBG_CONTROL 0x10300
+#define SPU_DBG_CTL_AN_ARB_LINK_CHK_EN BIT_ULL(18)
+#define SPU_DBG_CTL_AN_NONCE_MCT_DIS BIT_ULL(29)
+
+#define BGX_SMUX_RX_INT 0x20000
+#define BGX_SMUX_RX_JABBER 0x20030
+#define BGX_SMUX_RX_CTL 0x20048
+#define SMU_RX_CTL_STATUS (3ull << 0)
+#define BGX_SMUX_TX_APPEND 0x20100
+#define SMU_TX_APPEND_FCS_D BIT_ULL(2)
+#define BGX_SMUX_TX_MIN_PKT 0x20118
+#define BGX_SMUX_TX_INT 0x20140
+#define BGX_SMUX_TX_CTL 0x20178
+#define SMU_TX_CTL_DIC_EN BIT_ULL(0)
+#define SMU_TX_CTL_UNI_EN BIT_ULL(1)
+#define SMU_TX_CTL_LNK_STATUS (3ull << 4)
+#define BGX_SMUX_TX_THRESH 0x20180
+#define BGX_SMUX_CTL 0x20200
+#define SMU_CTL_RX_IDLE BIT_ULL(0)
+#define SMU_CTL_TX_IDLE BIT_ULL(1)
+
+#define BGX_GMP_PCS_MRX_CTL 0x30000
+#define PCS_MRX_CTL_RST_AN BIT_ULL(9)
+#define PCS_MRX_CTL_PWR_DN BIT_ULL(11)
+#define PCS_MRX_CTL_AN_EN BIT_ULL(12)
+#define PCS_MRX_CTL_RESET BIT_ULL(15)
+#define BGX_GMP_PCS_MRX_STATUS 0x30008
+#define PCS_MRX_STATUS_AN_CPT BIT_ULL(5)
+#define BGX_GMP_PCS_ANX_AN_RESULTS 0x30020
+#define BGX_GMP_PCS_SGM_AN_ADV 0x30068
+#define BGX_GMP_PCS_MISCX_CTL 0x30078
+#define PCS_MISC_CTL_GMX_ENO BIT_ULL(11)
+#define PCS_MISC_CTL_SAMP_PT_MASK 0x7Full
+#define BGX_GMP_GMI_PRTX_CFG 0x38020
+#define GMI_PORT_CFG_SPEED BIT_ULL(1)
+#define GMI_PORT_CFG_DUPLEX BIT_ULL(2)
+#define GMI_PORT_CFG_SLOT_TIME BIT_ULL(3)
+#define GMI_PORT_CFG_SPEED_MSB BIT_ULL(8)
+#define BGX_GMP_GMI_RXX_JABBER 0x38038
+#define BGX_GMP_GMI_TXX_THRESH 0x38210
+#define BGX_GMP_GMI_TXX_APPEND 0x38218
+#define BGX_GMP_GMI_TXX_SLOT 0x38220
+#define BGX_GMP_GMI_TXX_BURST 0x38228
+#define BGX_GMP_GMI_TXX_MIN_PKT 0x38240
+#define BGX_GMP_GMI_TXX_SGMII_CTL 0x38300
+
+#define BGX_MSIX_VEC_0_29_ADDR 0x400000 /* +(0..29) << 4 */
+#define BGX_MSIX_VEC_0_29_CTL 0x400008
+#define BGX_MSIX_PBA_0 0x4F0000
+
+/* MSI-X interrupts */
+#define BGX_MSIX_VECTORS 30
+#define BGX_LMAC_VEC_OFFSET 7
+#define BGX_MSIX_VEC_SHIFT 4
+
+#define CMRX_INT 0
+#define SPUX_INT 1
+#define SMUX_RX_INT 2
+#define SMUX_TX_INT 3
+#define GMPX_PCS_INT 4
+#define GMPX_GMI_RX_INT 5
+#define GMPX_GMI_TX_INT 6
+#define CMR_MEM_INT 28
+#define SPU_MEM_INT 29
+
+#define LMAC_INTR_LINK_UP BIT(0)
+#define LMAC_INTR_LINK_DOWN BIT(1)
+
+/* RX_DMAC_CTL configuration*/
+enum MCAST_MODE {
+ MCAST_MODE_REJECT,
+ MCAST_MODE_ACCEPT,
+ MCAST_MODE_CAM_FILTER,
+ RSVD
+};
+
+#define BCAST_ACCEPT 1
+#define CAM_ACCEPT 1
+
+void bgx_add_dmac_addr(u64 dmac, int node, int bgx_idx, int lmac);
+unsigned bgx_get_map(int node);
+int bgx_get_lmac_count(int node, int bgx);
+const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid);
+void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac);
+void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status);
+u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx);
+u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx);
+#define BGX_RX_STATS_COUNT 11
+#define BGX_TX_STATS_COUNT 18
+
+struct bgx_stats {
+ u64 rx_stats[BGX_RX_STATS_COUNT];
+ u64 tx_stats[BGX_TX_STATS_COUNT];
+};
+
+enum LMAC_TYPE {
+ BGX_MODE_SGMII = 0, /* 1 lane, 1.250 Gbaud */
+ BGX_MODE_XAUI = 1, /* 4 lanes, 3.125 Gbaud */
+ BGX_MODE_DXAUI = 1, /* 4 lanes, 6.250 Gbaud */
+ BGX_MODE_RXAUI = 2, /* 2 lanes, 6.250 Gbaud */
+ BGX_MODE_XFI = 3, /* 1 lane, 10.3125 Gbaud */
+ BGX_MODE_XLAUI = 4, /* 4 lanes, 10.3125 Gbaud */
+ BGX_MODE_10G_KR = 3,/* 1 lane, 10.3125 Gbaud */
+ BGX_MODE_40G_KR = 4,/* 4 lanes, 10.3125 Gbaud */
+};
+
+enum qlm_mode {
+ QLM_MODE_SGMII, /* SGMII, each lane independent */
+ QLM_MODE_XAUI_1X4, /* 1 XAUI or DXAUI, 4 lanes */
+ QLM_MODE_RXAUI_2X2, /* 2 RXAUI, 2 lanes each */
+ QLM_MODE_XFI_4X1, /* 4 XFI, 1 lane each */
+ QLM_MODE_XLAUI_1X4, /* 1 XLAUI, 4 lanes each */
+ QLM_MODE_10G_KR_4X1, /* 4 10GBASE-KR, 1 lane each */
+ QLM_MODE_40G_KR4_1X4, /* 1 40GBASE-KR4, 4 lanes each */
+};
+
+#endif /* THUNDER_BGX_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 3c109d1..bf2b822 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -198,23 +198,45 @@ struct lb_port_stats {
};
struct tp_tcp_stats {
- u32 tcpOutRsts;
- u64 tcpInSegs;
- u64 tcpOutSegs;
- u64 tcpRetransSegs;
+ u32 tcp_out_rsts;
+ u64 tcp_in_segs;
+ u64 tcp_out_segs;
+ u64 tcp_retrans_segs;
+};
+
+struct tp_usm_stats {
+ u32 frames;
+ u32 drops;
+ u64 octets;
+};
+
+struct tp_fcoe_stats {
+ u32 frames_ddp;
+ u32 frames_drop;
+ u64 octets_ddp;
};
struct tp_err_stats {
- u32 macInErrs[4];
- u32 hdrInErrs[4];
- u32 tcpInErrs[4];
- u32 tnlCongDrops[4];
- u32 ofldChanDrops[4];
- u32 tnlTxDrops[4];
- u32 ofldVlanDrops[4];
- u32 tcp6InErrs[4];
- u32 ofldNoNeigh;
- u32 ofldCongDefer;
+ u32 mac_in_errs[4];
+ u32 hdr_in_errs[4];
+ u32 tcp_in_errs[4];
+ u32 tnl_cong_drops[4];
+ u32 ofld_chan_drops[4];
+ u32 tnl_tx_drops[4];
+ u32 ofld_vlan_drops[4];
+ u32 tcp6_in_errs[4];
+ u32 ofld_no_neigh;
+ u32 ofld_cong_defer;
+};
+
+struct tp_cpl_stats {
+ u32 req[4];
+ u32 rsp[4];
+};
+
+struct tp_rdma_stats {
+ u32 rqe_dfr_pkt;
+ u32 rqe_dfr_mod;
};
struct sge_params {
@@ -224,7 +246,6 @@ struct sge_params {
};
struct tp_params {
- unsigned int ntxchan; /* # of Tx channels */
unsigned int tre; /* log2 of core clocks per TP tick */
unsigned int la_mask; /* what events are recorded by TP LA */
unsigned short tx_modq_map; /* TX modulation scheduler queue to */
@@ -273,6 +294,7 @@ struct pci_params {
#define CHELSIO_T4 0x4
#define CHELSIO_T5 0x5
+#define CHELSIO_T6 0x6
enum chip_type {
T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1),
@@ -284,6 +306,10 @@ enum chip_type {
T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1),
T5_FIRST_REV = T5_A0,
T5_LAST_REV = T5_A1,
+
+ T6_A0 = CHELSIO_CHIP_CODE(CHELSIO_T6, 0),
+ T6_FIRST_REV = T6_A0,
+ T6_LAST_REV = T6_A0,
};
struct devlog_params {
@@ -292,6 +318,15 @@ struct devlog_params {
u32 size; /* size of log */
};
+/* Stores chip specific parameters */
+struct arch_specific_params {
+ u8 nchan;
+ u16 mps_rplc_size;
+ u16 vfcount;
+ u32 sge_fl_db;
+ u16 mps_tcam_size;
+};
+
struct adapter_params {
struct sge_params sge;
struct tp_params tp;
@@ -317,6 +352,7 @@ struct adapter_params {
unsigned char nports; /* # of ethernet ports */
unsigned char portvec;
enum chip_type chip; /* chip code */
+ struct arch_specific_params arch; /* chip specific params */
unsigned char offload;
unsigned char bypass;
@@ -432,6 +468,7 @@ struct port_info {
u8 rss_mode;
struct link_config link_cfg;
u16 *rss;
+ struct port_stats stats_base;
#ifdef CONFIG_CHELSIO_T4_DCB
struct port_dcb_info dcb; /* Data Center Bridging support */
#endif
@@ -650,6 +687,7 @@ struct sge {
struct sge_rspq **ingr_map; /* qid->queue ingress queue map */
unsigned long *starving_fl;
unsigned long *txq_maperr;
+ unsigned long *blocked_fl;
struct timer_list rx_timer; /* refills starving FLs */
struct timer_list tx_timer; /* checks Tx queues */
};
@@ -671,6 +709,12 @@ struct l2t_data;
#endif
+struct doorbell_stats {
+ u32 db_drop;
+ u32 db_empty;
+ u32 db_full;
+};
+
struct adapter {
void __iomem *regs;
void __iomem *bar2;
@@ -678,7 +722,7 @@ struct adapter {
struct pci_dev *pdev;
struct device *pdev_dev;
unsigned int mbox;
- unsigned int fn;
+ unsigned int pf;
unsigned int flags;
enum chip_type chip;
@@ -688,13 +732,12 @@ struct adapter {
struct cxgb4_virt_res vres;
unsigned int swintr;
- unsigned int wol;
-
struct {
unsigned short vec;
char desc[IFNAMSIZ + 10];
} msix_info[MAX_INGQ + 1];
+ struct doorbell_stats db_stats;
struct sge sge;
struct net_device *port[MAX_NPORTS];
@@ -849,6 +892,16 @@ enum {
VLAN_REWRITE
};
+static inline int is_offload(const struct adapter *adap)
+{
+ return adap->params.offload;
+}
+
+static inline int is_t6(enum chip_type chip)
+{
+ return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T6;
+}
+
static inline int is_t5(enum chip_type chip)
{
return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T5;
@@ -1220,7 +1273,7 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
int t4_prep_adapter(struct adapter *adapter);
enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS };
-int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter,
+int t4_bar2_sge_qregs(struct adapter *adapter,
unsigned int qid,
enum t4_bar2_qtype qtype,
u64 *pbar2_qoffset,
@@ -1267,13 +1320,23 @@ int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr);
void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres);
const char *t4_get_port_type_description(enum fw_port_type port_type);
void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
+void t4_get_port_stats_offset(struct adapter *adap, int idx,
+ struct port_stats *stats,
+ struct port_stats *offset);
+void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p);
void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log);
void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN]);
void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
unsigned int mask, unsigned int val);
void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr);
+void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st);
+void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st);
+void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st);
+void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st);
void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
struct tp_tcp_stats *v6);
+void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
+ struct tp_fcoe_stats *st);
void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
const unsigned short *alpha, const unsigned short *beta);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index 371f75e..3719807 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -1084,41 +1084,89 @@ static inline void tcamxy2valmask(u64 x, u64 y, u8 *addr, u64 *mask)
static int mps_tcam_show(struct seq_file *seq, void *v)
{
- if (v == SEQ_START_TOKEN)
- seq_puts(seq, "Idx Ethernet address Mask Vld Ports PF"
- " VF Replication "
- "P0 P1 P2 P3 ML\n");
- else {
+ struct adapter *adap = seq->private;
+ unsigned int chip_ver = CHELSIO_CHIP_VERSION(adap->params.chip);
+
+ if (v == SEQ_START_TOKEN) {
+ if (adap->params.arch.mps_rplc_size > 128)
+ seq_puts(seq, "Idx Ethernet address Mask "
+ "Vld Ports PF VF "
+ "Replication "
+ " P0 P1 P2 P3 ML\n");
+ else
+ seq_puts(seq, "Idx Ethernet address Mask "
+ "Vld Ports PF VF Replication"
+ " P0 P1 P2 P3 ML\n");
+ } else {
u64 mask;
u8 addr[ETH_ALEN];
- struct adapter *adap = seq->private;
+ bool replicate;
unsigned int idx = (uintptr_t)v - 2;
- u64 tcamy = t4_read_reg64(adap, MPS_CLS_TCAM_Y_L(idx));
- u64 tcamx = t4_read_reg64(adap, MPS_CLS_TCAM_X_L(idx));
- u32 cls_lo = t4_read_reg(adap, MPS_CLS_SRAM_L(idx));
- u32 cls_hi = t4_read_reg(adap, MPS_CLS_SRAM_H(idx));
- u32 rplc[4] = {0, 0, 0, 0};
+ u64 tcamy, tcamx, val;
+ u32 cls_lo, cls_hi, ctl;
+ u32 rplc[8] = {0};
+
+ if (chip_ver > CHELSIO_T5) {
+ /* CtlCmdType - 0: Read, 1: Write
+ * CtlTcamSel - 0: TCAM0, 1: TCAM1
+ * CtlXYBitSel- 0: Y bit, 1: X bit
+ */
+
+ /* Read tcamy */
+ ctl = CTLCMDTYPE_V(0) | CTLXYBITSEL_V(0);
+ if (idx < 256)
+ ctl |= CTLTCAMINDEX_V(idx) | CTLTCAMSEL_V(0);
+ else
+ ctl |= CTLTCAMINDEX_V(idx - 256) |
+ CTLTCAMSEL_V(1);
+ t4_write_reg(adap, MPS_CLS_TCAM_DATA2_CTL_A, ctl);
+ val = t4_read_reg(adap, MPS_CLS_TCAM_DATA1_A);
+ tcamy = DMACH_G(val) << 32;
+ tcamy |= t4_read_reg(adap, MPS_CLS_TCAM_DATA0_A);
+
+ /* Read tcamx. Change the control param */
+ ctl |= CTLXYBITSEL_V(1);
+ t4_write_reg(adap, MPS_CLS_TCAM_DATA2_CTL_A, ctl);
+ val = t4_read_reg(adap, MPS_CLS_TCAM_DATA1_A);
+ tcamx = DMACH_G(val) << 32;
+ tcamx |= t4_read_reg(adap, MPS_CLS_TCAM_DATA0_A);
+ } else {
+ tcamy = t4_read_reg64(adap, MPS_CLS_TCAM_Y_L(idx));
+ tcamx = t4_read_reg64(adap, MPS_CLS_TCAM_X_L(idx));
+ }
+
+ cls_lo = t4_read_reg(adap, MPS_CLS_SRAM_L(idx));
+ cls_hi = t4_read_reg(adap, MPS_CLS_SRAM_H(idx));
if (tcamx & tcamy) {
seq_printf(seq, "%3u -\n", idx);
goto out;
}
- if (cls_lo & REPLICATE_F) {
+ rplc[0] = rplc[1] = rplc[2] = rplc[3] = 0;
+ if (chip_ver > CHELSIO_T5)
+ replicate = (cls_lo & T6_REPLICATE_F);
+ else
+ replicate = (cls_lo & REPLICATE_F);
+
+ if (replicate) {
struct fw_ldst_cmd ldst_cmd;
int ret;
+ struct fw_ldst_mps_rplc mps_rplc;
+ u32 ldst_addrspc;
memset(&ldst_cmd, 0, sizeof(ldst_cmd));
+ ldst_addrspc =
+ FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MPS);
ldst_cmd.op_to_addrspace =
htonl(FW_CMD_OP_V(FW_LDST_CMD) |
FW_CMD_REQUEST_F |
FW_CMD_READ_F |
- FW_LDST_CMD_ADDRSPACE_V(
- FW_LDST_ADDRSPC_MPS));
+ ldst_addrspc);
ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
- ldst_cmd.u.mps.fid_ctl =
+ ldst_cmd.u.mps.rplc.fid_idx =
htons(FW_LDST_CMD_FID_V(FW_LDST_MPS_RPLC) |
- FW_LDST_CMD_CTL_V(idx));
+ FW_LDST_CMD_IDX_V(idx));
ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd,
sizeof(ldst_cmd), &ldst_cmd);
if (ret)
@@ -1126,30 +1174,69 @@ static int mps_tcam_show(struct seq_file *seq, void *v)
"replication map for idx %d: %d\n",
idx, -ret);
else {
- rplc[0] = ntohl(ldst_cmd.u.mps.rplc31_0);
- rplc[1] = ntohl(ldst_cmd.u.mps.rplc63_32);
- rplc[2] = ntohl(ldst_cmd.u.mps.rplc95_64);
- rplc[3] = ntohl(ldst_cmd.u.mps.rplc127_96);
+ mps_rplc = ldst_cmd.u.mps.rplc;
+ rplc[0] = ntohl(mps_rplc.rplc31_0);
+ rplc[1] = ntohl(mps_rplc.rplc63_32);
+ rplc[2] = ntohl(mps_rplc.rplc95_64);
+ rplc[3] = ntohl(mps_rplc.rplc127_96);
+ if (adap->params.arch.mps_rplc_size > 128) {
+ rplc[4] = ntohl(mps_rplc.rplc159_128);
+ rplc[5] = ntohl(mps_rplc.rplc191_160);
+ rplc[6] = ntohl(mps_rplc.rplc223_192);
+ rplc[7] = ntohl(mps_rplc.rplc255_224);
+ }
}
}
tcamxy2valmask(tcamx, tcamy, addr, &mask);
- seq_printf(seq, "%3u %02x:%02x:%02x:%02x:%02x:%02x %012llx"
- "%3c %#x%4u%4d",
- idx, addr[0], addr[1], addr[2], addr[3], addr[4],
- addr[5], (unsigned long long)mask,
- (cls_lo & SRAM_VLD_F) ? 'Y' : 'N', PORTMAP_G(cls_hi),
- PF_G(cls_lo),
- (cls_lo & VF_VALID_F) ? VF_G(cls_lo) : -1);
- if (cls_lo & REPLICATE_F)
- seq_printf(seq, " %08x %08x %08x %08x",
- rplc[3], rplc[2], rplc[1], rplc[0]);
+ if (chip_ver > CHELSIO_T5)
+ seq_printf(seq, "%3u %02x:%02x:%02x:%02x:%02x:%02x "
+ "%012llx%3c %#x%4u%4d",
+ idx, addr[0], addr[1], addr[2], addr[3],
+ addr[4], addr[5], (unsigned long long)mask,
+ (cls_lo & T6_SRAM_VLD_F) ? 'Y' : 'N',
+ PORTMAP_G(cls_hi),
+ T6_PF_G(cls_lo),
+ (cls_lo & T6_VF_VALID_F) ?
+ T6_VF_G(cls_lo) : -1);
else
- seq_printf(seq, "%36c", ' ');
- seq_printf(seq, "%4u%3u%3u%3u %#x\n",
- SRAM_PRIO0_G(cls_lo), SRAM_PRIO1_G(cls_lo),
- SRAM_PRIO2_G(cls_lo), SRAM_PRIO3_G(cls_lo),
- (cls_lo >> MULTILISTEN0_S) & 0xf);
+ seq_printf(seq, "%3u %02x:%02x:%02x:%02x:%02x:%02x "
+ "%012llx%3c %#x%4u%4d",
+ idx, addr[0], addr[1], addr[2], addr[3],
+ addr[4], addr[5], (unsigned long long)mask,
+ (cls_lo & SRAM_VLD_F) ? 'Y' : 'N',
+ PORTMAP_G(cls_hi),
+ PF_G(cls_lo),
+ (cls_lo & VF_VALID_F) ? VF_G(cls_lo) : -1);
+
+ if (replicate) {
+ if (adap->params.arch.mps_rplc_size > 128)
+ seq_printf(seq, " %08x %08x %08x %08x "
+ "%08x %08x %08x %08x",
+ rplc[7], rplc[6], rplc[5], rplc[4],
+ rplc[3], rplc[2], rplc[1], rplc[0]);
+ else
+ seq_printf(seq, " %08x %08x %08x %08x",
+ rplc[3], rplc[2], rplc[1], rplc[0]);
+ } else {
+ if (adap->params.arch.mps_rplc_size > 128)
+ seq_printf(seq, "%72c", ' ');
+ else
+ seq_printf(seq, "%36c", ' ');
+ }
+
+ if (chip_ver > CHELSIO_T5)
+ seq_printf(seq, "%4u%3u%3u%3u %#x\n",
+ T6_SRAM_PRIO0_G(cls_lo),
+ T6_SRAM_PRIO1_G(cls_lo),
+ T6_SRAM_PRIO2_G(cls_lo),
+ T6_SRAM_PRIO3_G(cls_lo),
+ (cls_lo >> T6_MULTILISTEN0_S) & 0xf);
+ else
+ seq_printf(seq, "%4u%3u%3u%3u %#x\n",
+ SRAM_PRIO0_G(cls_lo), SRAM_PRIO1_G(cls_lo),
+ SRAM_PRIO2_G(cls_lo), SRAM_PRIO3_G(cls_lo),
+ (cls_lo >> MULTILISTEN0_S) & 0xf);
}
out: return 0;
}
@@ -1222,7 +1309,7 @@ static int sensors_show(struct seq_file *seq, void *v)
param[1] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_DIAG) |
FW_PARAMS_PARAM_Y_V(FW_PARAM_DEV_DIAG_VDD));
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
param, val);
if (ret < 0 || val[0] == 0)
@@ -1416,6 +1503,9 @@ static int rss_config_show(struct seq_file *seq, void *v)
seq_printf(seq, " HashDelay: %3d\n", HASHDELAY_G(rssconf));
if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
seq_printf(seq, " VfWrAddr: %3d\n", VFWRADDR_G(rssconf));
+ else
+ seq_printf(seq, " VfWrAddr: %3d\n",
+ T6_VFWRADDR_G(rssconf));
seq_printf(seq, " KeyMode: %s\n", keymode[KEYMODE_G(rssconf)]);
seq_printf(seq, " VfWrEn: %3s\n", yesno(rssconf & VFWREN_F));
seq_printf(seq, " KeyWrEn: %3s\n", yesno(rssconf & KEYWREN_F));
@@ -1634,14 +1724,14 @@ static int rss_vf_config_open(struct inode *inode, struct file *file)
struct adapter *adapter = inode->i_private;
struct seq_tab *p;
struct rss_vf_conf *vfconf;
- int vf;
+ int vf, vfcount = adapter->params.arch.vfcount;
- p = seq_open_tab(file, 128, sizeof(*vfconf), 1, rss_vf_config_show);
+ p = seq_open_tab(file, vfcount, sizeof(*vfconf), 1, rss_vf_config_show);
if (!p)
return -ENOMEM;
vfconf = (struct rss_vf_conf *)p->data;
- for (vf = 0; vf < 128; vf++) {
+ for (vf = 0; vf < vfcount; vf++) {
t4_read_rss_vf_config(adapter, vf, &vfconf[vf].rss_vf_vfl,
&vfconf[vf].rss_vf_vfh);
}
@@ -1959,6 +2049,61 @@ static void add_debugfs_mem(struct adapter *adap, const char *name,
size_mb << 20);
}
+static int blocked_fl_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+static ssize_t blocked_fl_read(struct file *filp, char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ int len;
+ const struct adapter *adap = filp->private_data;
+ char *buf;
+ ssize_t size = (adap->sge.egr_sz + 3) / 4 +
+ adap->sge.egr_sz / 32 + 2; /* includes ,/\n/\0 */
+
+ buf = kzalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ len = snprintf(buf, size - 1, "%*pb\n",
+ adap->sge.egr_sz, adap->sge.blocked_fl);
+ len += sprintf(buf + len, "\n");
+ size = simple_read_from_buffer(ubuf, count, ppos, buf, len);
+ t4_free_mem(buf);
+ return size;
+}
+
+static ssize_t blocked_fl_write(struct file *filp, const char __user *ubuf,
+ size_t count, loff_t *ppos)
+{
+ int err;
+ unsigned long *t;
+ struct adapter *adap = filp->private_data;
+
+ t = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz), sizeof(long), GFP_KERNEL);
+ if (!t)
+ return -ENOMEM;
+
+ err = bitmap_parse_user(ubuf, count, t, adap->sge.egr_sz);
+ if (err)
+ return err;
+
+ bitmap_copy(adap->sge.blocked_fl, t, adap->sge.egr_sz);
+ t4_free_mem(t);
+ return count;
+}
+
+static const struct file_operations blocked_fl_fops = {
+ .owner = THIS_MODULE,
+ .open = blocked_fl_open,
+ .read = blocked_fl_read,
+ .write = blocked_fl_write,
+ .llseek = generic_file_llseek,
+};
+
/* Add an array of Debug FS files.
*/
void add_debugfs_files(struct adapter *adap,
@@ -1978,7 +2123,7 @@ void add_debugfs_files(struct adapter *adap,
int t4_setup_debugfs(struct adapter *adap)
{
int i;
- u32 size;
+ u32 size = 0;
struct dentry *de;
static struct t4_debugfs_entry t4_debugfs_files[] = {
@@ -2022,6 +2167,7 @@ int t4_setup_debugfs(struct adapter *adap)
#if IS_ENABLED(CONFIG_IPV6)
{ "clip_tbl", &clip_tbl_debugfs_fops, S_IRUSR, 0 },
#endif
+ { "blocked_fl", &blocked_fl_fops, S_IRUSR | S_IWUSR, 0 },
};
/* Debug FS nodes common to all T5 and later adapters.
@@ -2048,12 +2194,7 @@ int t4_setup_debugfs(struct adapter *adap)
size = t4_read_reg(adap, MA_EDRAM1_BAR_A);
add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM1_SIZE_G(size));
}
- if (is_t4(adap->params.chip)) {
- size = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A);
- if (i & EXT_MEM_ENABLE_F)
- add_debugfs_mem(adap, "mc", MEM_MC,
- EXT_MEM_SIZE_G(size));
- } else {
+ if (is_t5(adap->params.chip)) {
if (i & EXT_MEM0_ENABLE_F) {
size = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A);
add_debugfs_mem(adap, "mc0", MEM_MC0,
@@ -2064,6 +2205,11 @@ int t4_setup_debugfs(struct adapter *adap)
add_debugfs_mem(adap, "mc1", MEM_MC1,
EXT_MEM1_SIZE_G(size));
}
+ } else {
+ if (i & EXT_MEM_ENABLE_F)
+ size = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A);
+ add_debugfs_mem(adap, "mc", MEM_MC,
+ EXT_MEM_SIZE_G(size));
}
de = debugfs_create_file_size("flash", S_IRUSR, adap->debugfs_root, adap,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
index 401272a..0194c91 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c
@@ -108,15 +108,82 @@ static const char stats_strings[][ETH_GSTRING_LEN] = {
"VLANinsertions ",
"GROpackets ",
"GROmerged ",
- "WriteCoalSuccess ",
- "WriteCoalFail ",
+};
+
+static char adapter_stats_strings[][ETH_GSTRING_LEN] = {
+ "db_drop ",
+ "db_full ",
+ "db_empty ",
+ "tcp_ipv4_out_rsts ",
+ "tcp_ipv4_in_segs ",
+ "tcp_ipv4_out_segs ",
+ "tcp_ipv4_retrans_segs ",
+ "tcp_ipv6_out_rsts ",
+ "tcp_ipv6_in_segs ",
+ "tcp_ipv6_out_segs ",
+ "tcp_ipv6_retrans_segs ",
+ "usm_ddp_frames ",
+ "usm_ddp_octets ",
+ "usm_ddp_drops ",
+ "rdma_no_rqe_mod_defer ",
+ "rdma_no_rqe_pkt_defer ",
+ "tp_err_ofld_no_neigh ",
+ "tp_err_ofld_cong_defer ",
+ "write_coal_success ",
+ "write_coal_fail ",
+};
+
+static char channel_stats_strings[][ETH_GSTRING_LEN] = {
+ "--------Channel--------- ",
+ "tp_cpl_requests ",
+ "tp_cpl_responses ",
+ "tp_mac_in_errs ",
+ "tp_hdr_in_errs ",
+ "tp_tcp_in_errs ",
+ "tp_tcp6_in_errs ",
+ "tp_tnl_cong_drops ",
+ "tp_tnl_tx_drops ",
+ "tp_ofld_vlan_drops ",
+ "tp_ofld_chan_drops ",
+ "fcoe_octets_ddp ",
+ "fcoe_frames_ddp ",
+ "fcoe_frames_drop ",
+};
+
+static char loopback_stats_strings[][ETH_GSTRING_LEN] = {
+ "-------Loopback----------- ",
+ "octets_ok ",
+ "frames_ok ",
+ "bcast_frames ",
+ "mcast_frames ",
+ "ucast_frames ",
+ "error_frames ",
+ "frames_64 ",
+ "frames_65_to_127 ",
+ "frames_128_to_255 ",
+ "frames_256_to_511 ",
+ "frames_512_to_1023 ",
+ "frames_1024_to_1518 ",
+ "frames_1519_to_max ",
+ "frames_dropped ",
+ "bg0_frames_dropped ",
+ "bg1_frames_dropped ",
+ "bg2_frames_dropped ",
+ "bg3_frames_dropped ",
+ "bg0_frames_trunc ",
+ "bg1_frames_trunc ",
+ "bg2_frames_trunc ",
+ "bg3_frames_trunc ",
};
static int get_sset_count(struct net_device *dev, int sset)
{
switch (sset) {
case ETH_SS_STATS:
- return ARRAY_SIZE(stats_strings);
+ return ARRAY_SIZE(stats_strings) +
+ ARRAY_SIZE(adapter_stats_strings) +
+ ARRAY_SIZE(channel_stats_strings) +
+ ARRAY_SIZE(loopback_stats_strings);
default:
return -EOPNOTSUPP;
}
@@ -168,8 +235,18 @@ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
- if (stringset == ETH_SS_STATS)
+ if (stringset == ETH_SS_STATS) {
memcpy(data, stats_strings, sizeof(stats_strings));
+ data += sizeof(stats_strings);
+ memcpy(data, adapter_stats_strings,
+ sizeof(adapter_stats_strings));
+ data += sizeof(adapter_stats_strings);
+ memcpy(data, channel_stats_strings,
+ sizeof(channel_stats_strings));
+ data += sizeof(channel_stats_strings);
+ memcpy(data, loopback_stats_strings,
+ sizeof(loopback_stats_strings));
+ }
}
/* port stats maintained per queue of the port. They should be in the same
@@ -185,6 +262,45 @@ struct queue_port_stats {
u64 gro_merged;
};
+struct adapter_stats {
+ u64 db_drop;
+ u64 db_full;
+ u64 db_empty;
+ u64 tcp_v4_out_rsts;
+ u64 tcp_v4_in_segs;
+ u64 tcp_v4_out_segs;
+ u64 tcp_v4_retrans_segs;
+ u64 tcp_v6_out_rsts;
+ u64 tcp_v6_in_segs;
+ u64 tcp_v6_out_segs;
+ u64 tcp_v6_retrans_segs;
+ u64 frames;
+ u64 octets;
+ u64 drops;
+ u64 rqe_dfr_mod;
+ u64 rqe_dfr_pkt;
+ u64 ofld_no_neigh;
+ u64 ofld_cong_defer;
+ u64 wc_success;
+ u64 wc_fail;
+};
+
+struct channel_stats {
+ u64 cpl_req;
+ u64 cpl_rsp;
+ u64 mac_in_errs;
+ u64 hdr_in_errs;
+ u64 tcp_in_errs;
+ u64 tcp6_in_errs;
+ u64 tnl_cong_drops;
+ u64 tnl_tx_drops;
+ u64 ofld_vlan_drops;
+ u64 ofld_chan_drops;
+ u64 octets_ddp;
+ u64 frames_ddp;
+ u64 frames_drop;
+};
+
static void collect_sge_port_stats(const struct adapter *adap,
const struct port_info *p,
struct queue_port_stats *s)
@@ -205,30 +321,121 @@ static void collect_sge_port_stats(const struct adapter *adap,
}
}
+static void collect_adapter_stats(struct adapter *adap, struct adapter_stats *s)
+{
+ struct tp_tcp_stats v4, v6;
+ struct tp_rdma_stats rdma_stats;
+ struct tp_err_stats err_stats;
+ struct tp_usm_stats usm_stats;
+ u64 val1, val2;
+
+ memset(s, 0, sizeof(*s));
+
+ spin_lock(&adap->stats_lock);
+ t4_tp_get_tcp_stats(adap, &v4, &v6);
+ t4_tp_get_rdma_stats(adap, &rdma_stats);
+ t4_get_usm_stats(adap, &usm_stats);
+ t4_tp_get_err_stats(adap, &err_stats);
+ spin_unlock(&adap->stats_lock);
+
+ s->db_drop = adap->db_stats.db_drop;
+ s->db_full = adap->db_stats.db_full;
+ s->db_empty = adap->db_stats.db_empty;
+
+ s->tcp_v4_out_rsts = v4.tcp_out_rsts;
+ s->tcp_v4_in_segs = v4.tcp_in_segs;
+ s->tcp_v4_out_segs = v4.tcp_out_segs;
+ s->tcp_v4_retrans_segs = v4.tcp_retrans_segs;
+ s->tcp_v6_out_rsts = v6.tcp_out_rsts;
+ s->tcp_v6_in_segs = v6.tcp_in_segs;
+ s->tcp_v6_out_segs = v6.tcp_out_segs;
+ s->tcp_v6_retrans_segs = v6.tcp_retrans_segs;
+
+ if (is_offload(adap)) {
+ s->frames = usm_stats.frames;
+ s->octets = usm_stats.octets;
+ s->drops = usm_stats.drops;
+ s->rqe_dfr_mod = rdma_stats.rqe_dfr_mod;
+ s->rqe_dfr_pkt = rdma_stats.rqe_dfr_pkt;
+ }
+
+ s->ofld_no_neigh = err_stats.ofld_no_neigh;
+ s->ofld_cong_defer = err_stats.ofld_cong_defer;
+
+ if (!is_t4(adap->params.chip)) {
+ int v;
+
+ v = t4_read_reg(adap, SGE_STAT_CFG_A);
+ if (STATSOURCE_T5_G(v) == 7) {
+ val2 = t4_read_reg(adap, SGE_STAT_MATCH_A);
+ val1 = t4_read_reg(adap, SGE_STAT_TOTAL_A);
+ s->wc_success = val1 - val2;
+ s->wc_fail = val2;
+ }
+ }
+}
+
+static void collect_channel_stats(struct adapter *adap, struct channel_stats *s,
+ u8 i)
+{
+ struct tp_cpl_stats cpl_stats;
+ struct tp_err_stats err_stats;
+ struct tp_fcoe_stats fcoe_stats;
+
+ memset(s, 0, sizeof(*s));
+
+ spin_lock(&adap->stats_lock);
+ t4_tp_get_cpl_stats(adap, &cpl_stats);
+ t4_tp_get_err_stats(adap, &err_stats);
+ t4_get_fcoe_stats(adap, i, &fcoe_stats);
+ spin_unlock(&adap->stats_lock);
+
+ s->cpl_req = cpl_stats.req[i];
+ s->cpl_rsp = cpl_stats.rsp[i];
+ s->mac_in_errs = err_stats.mac_in_errs[i];
+ s->hdr_in_errs = err_stats.hdr_in_errs[i];
+ s->tcp_in_errs = err_stats.tcp_in_errs[i];
+ s->tcp6_in_errs = err_stats.tcp6_in_errs[i];
+ s->tnl_cong_drops = err_stats.tnl_cong_drops[i];
+ s->tnl_tx_drops = err_stats.tnl_tx_drops[i];
+ s->ofld_vlan_drops = err_stats.ofld_vlan_drops[i];
+ s->ofld_chan_drops = err_stats.ofld_chan_drops[i];
+ s->octets_ddp = fcoe_stats.octets_ddp;
+ s->frames_ddp = fcoe_stats.frames_ddp;
+ s->frames_drop = fcoe_stats.frames_drop;
+}
+
static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
u64 *data)
{
struct port_info *pi = netdev_priv(dev);
struct adapter *adapter = pi->adapter;
- u32 val1, val2;
+ struct lb_port_stats s;
+ int i;
+ u64 *p0;
- t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
+ t4_get_port_stats_offset(adapter, pi->tx_chan,
+ (struct port_stats *)data,
+ &pi->stats_base);
data += sizeof(struct port_stats) / sizeof(u64);
collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
data += sizeof(struct queue_port_stats) / sizeof(u64);
- if (!is_t4(adapter->params.chip)) {
- t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7));
- val1 = t4_read_reg(adapter, SGE_STAT_TOTAL_A);
- val2 = t4_read_reg(adapter, SGE_STAT_MATCH_A);
- *data = val1 - val2;
- data++;
- *data = val2;
- data++;
- } else {
- memset(data, 0, 2 * sizeof(u64));
- *data += 2;
- }
+ collect_adapter_stats(adapter, (struct adapter_stats *)data);
+ data += sizeof(struct adapter_stats) / sizeof(u64);
+
+ *data++ = (u64)pi->port_id;
+ collect_channel_stats(adapter, (struct channel_stats *)data,
+ pi->port_id);
+ data += sizeof(struct channel_stats) / sizeof(u64);
+
+ *data++ = (u64)pi->port_id;
+ memset(&s, 0, sizeof(s));
+ t4_get_lb_stats(adapter, pi->port_id, &s);
+
+ p0 = &s.octets;
+ for (i = 0; i < ARRAY_SIZE(loopback_stats_strings) - 1; i++)
+ *data++ = (unsigned long long)*p0++;
}
static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
@@ -250,7 +457,7 @@ static int restart_autoneg(struct net_device *dev)
return -EAGAIN;
if (p->link_cfg.autoneg != AUTONEG_ENABLE)
return -EINVAL;
- t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
+ t4_restart_aneg(p->adapter, p->adapter->pf, p->tx_chan);
return 0;
}
@@ -267,7 +474,7 @@ static int identify_port(struct net_device *dev,
else
return -EINVAL;
- return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
+ return t4_identify_port(adap, adap->pf, netdev2pinfo(dev)->viid, val);
}
static unsigned int from_fw_linkcaps(enum fw_port_type type, unsigned int caps)
@@ -439,7 +646,7 @@ static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
lc->autoneg = cmd->autoneg;
if (netif_running(dev))
- return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
+ return t4_link_start(p->adapter, p->adapter->pf, p->tx_chan,
lc);
return 0;
}
@@ -472,7 +679,7 @@ static int set_pauseparam(struct net_device *dev,
if (epause->tx_pause)
lc->requested_fc |= PAUSE_TX;
if (netif_running(dev))
- return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
+ return t4_link_start(p->adapter, p->adapter->pf, p->tx_chan,
lc);
return 0;
}
@@ -617,7 +824,7 @@ static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
*/
static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
{
- int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
+ int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
if (vaddr >= 0)
vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
@@ -626,7 +833,7 @@ static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
{
- int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
+ int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE);
if (vaddr >= 0)
vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
@@ -669,8 +876,8 @@ static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
aligned_offset = eeprom->offset & ~3;
aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
- if (adapter->fn > 0) {
- u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
+ if (adapter->pf > 0) {
+ u32 start = 1024 + adapter->pf * EEPROMPFSIZE;
if (aligned_offset < start ||
aligned_offset + aligned_len > start + EEPROMPFSIZE)
@@ -740,37 +947,6 @@ static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
return ret;
}
-#define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
-#define BCAST_CRC 0xa0ccc1a6
-
-static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
-{
- wol->supported = WAKE_BCAST | WAKE_MAGIC;
- wol->wolopts = netdev2adap(dev)->wol;
- memset(&wol->sopass, 0, sizeof(wol->sopass));
-}
-
-static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
-{
- int err = 0;
- struct port_info *pi = netdev_priv(dev);
-
- if (wol->wolopts & ~WOL_SUPPORTED)
- return -EINVAL;
- t4_wol_magic_enable(pi->adapter, pi->tx_chan,
- (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
- if (wol->wolopts & WAKE_BCAST) {
- err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
- ~0ULL, 0, false);
- if (!err)
- err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
- ~6ULL, ~0ULL, BCAST_CRC, true);
- } else {
- t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
- }
- return err;
-}
-
static u32 get_rss_table_size(struct net_device *dev)
{
const struct port_info *pi = netdev_priv(dev);
@@ -900,8 +1076,6 @@ static const struct ethtool_ops cxgb_ethtool_ops = {
.get_ethtool_stats = get_stats,
.get_regs_len = get_regs_len,
.get_regs = get_regs,
- .get_wol = get_wol,
- .set_wol = set_wol,
.get_rxnfc = get_rxnfc,
.get_rxfh_indir_size = get_rss_table_size,
.get_rxfh = get_rss_table,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 73ac153..3057154 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -135,8 +135,10 @@ struct filter_entry {
#define FW4_FNAME "cxgb4/t4fw.bin"
#define FW5_FNAME "cxgb4/t5fw.bin"
+#define FW6_FNAME "cxgb4/t6fw.bin"
#define FW4_CFNAME "cxgb4/t4-config.txt"
#define FW5_CFNAME "cxgb4/t5-config.txt"
+#define FW6_CFNAME "cxgb4/t6-config.txt"
#define PHY_AQ1202_FIRMWARE "cxgb4/aq1202_fw.cld"
#define PHY_BCM84834_FIRMWARE "cxgb4/bcm8483.bin"
#define PHY_AQ1202_DEVICEID 0x4409
@@ -322,7 +324,7 @@ static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
* level") we need to issue the Set Parameters Commannd
* without sleeping (timeout < 0).
*/
- err = t4_set_params_timeout(adap, adap->mbox, adap->fn, 0, 1,
+ err = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
&name, &value,
-FW_CMD_MAX_TIMEOUT);
@@ -387,7 +389,7 @@ static int set_addr_filters(const struct net_device *dev, bool sleep)
int uc_cnt = netdev_uc_count(dev);
int mc_cnt = netdev_mc_count(dev);
const struct port_info *pi = netdev_priv(dev);
- unsigned int mb = pi->adapter->fn;
+ unsigned int mb = pi->adapter->pf;
/* first do the secondary unicast addresses */
netdev_for_each_uc_addr(ha, dev) {
@@ -444,7 +446,7 @@ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
ret = set_addr_filters(dev, sleep_ok);
if (ret == 0)
- ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
+ ret = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, mtu,
(dev->flags & IFF_PROMISC) ? 1 : 0,
(dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
sleep_ok);
@@ -461,7 +463,7 @@ static int link_start(struct net_device *dev)
{
int ret;
struct port_info *pi = netdev_priv(dev);
- unsigned int mb = pi->adapter->fn;
+ unsigned int mb = pi->adapter->pf;
/*
* We do not set address filters and promiscuity here, the stack does
@@ -879,7 +881,7 @@ int cxgb4_write_rss(const struct port_info *pi, const u16 *queues)
for (i = 0; i < pi->rss_size; i++, queues++)
rss[i] = rxq[*queues].rspq.abs_id;
- err = t4_config_rss_range(adapter, adapter->fn, pi->viid, 0,
+ err = t4_config_rss_range(adapter, adapter->pf, pi->viid, 0,
pi->rss_size, rss, pi->rss_size);
/* If Tunnel All Lookup isn't specified in the global RSS
* Configuration, then we need to specify a default Ingress
@@ -1351,11 +1353,6 @@ static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
return fallback(dev, skb) % dev->real_num_tx_queues;
}
-static inline int is_offload(const struct adapter *adap)
-{
- return adap->params.offload;
-}
-
static int closest_timer(const struct sge *s, int time)
{
int i, delta, match = 0, min_delta = INT_MAX;
@@ -1416,8 +1413,8 @@ int cxgb4_set_rspq_intr_params(struct sge_rspq *q,
FW_PARAMS_PARAM_X_V(
FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
FW_PARAMS_PARAM_YZ_V(q->cntxt_id);
- err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
- &new_idx);
+ err = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
+ &v, &new_idx);
if (err)
return err;
}
@@ -1438,7 +1435,7 @@ static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
if (!(changed & NETIF_F_HW_VLAN_CTAG_RX))
return 0;
- err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
+ err = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, -1,
-1, -1, -1,
!!(features & NETIF_F_HW_VLAN_CTAG_RX), true);
if (unlikely(err))
@@ -1721,7 +1718,7 @@ static int tid_init(struct tid_info *t)
bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
/* Reserve stid 0 for T4/T5 adapters */
if (!t->stid_base &&
- (is_t4(adap->params.chip) || is_t5(adap->params.chip)))
+ (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5))
__set_bit(0, t->stid_bmap);
return 0;
@@ -2069,25 +2066,6 @@ out:
}
EXPORT_SYMBOL(cxgb4_sync_txq_pidx);
-void cxgb4_disable_db_coalescing(struct net_device *dev)
-{
- struct adapter *adap;
-
- adap = netdev2adap(dev);
- t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, NOCOALESCE_F,
- NOCOALESCE_F);
-}
-EXPORT_SYMBOL(cxgb4_disable_db_coalescing);
-
-void cxgb4_enable_db_coalescing(struct net_device *dev)
-{
- struct adapter *adap;
-
- adap = netdev2adap(dev);
- t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, NOCOALESCE_F, 0);
-}
-EXPORT_SYMBOL(cxgb4_enable_db_coalescing);
-
int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
{
struct adapter *adap;
@@ -2127,10 +2105,7 @@ int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
if (offset < mc0_end) {
memtype = MEM_MC0;
memaddr = offset - edc1_end;
- } else if (is_t4(adap->params.chip)) {
- /* T4 only has a single memory channel */
- goto err;
- } else {
+ } else if (is_t5(adap->params.chip)) {
size = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A);
mc1_size = EXT_MEM1_SIZE_G(size) << 20;
mc1_end = mc0_end + mc1_size;
@@ -2141,6 +2116,9 @@ int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
/* offset beyond the end of any memory */
goto err;
}
+ } else {
+ /* T4/T6 only has a single memory channel */
+ goto err;
}
}
@@ -2175,7 +2153,7 @@ int cxgb4_bar2_sge_qregs(struct net_device *dev,
u64 *pbar2_qoffset,
unsigned int *pbar2_qid)
{
- return cxgb4_t4_bar2_sge_qregs(netdev2adap(dev),
+ return t4_bar2_sge_qregs(netdev2adap(dev),
qid,
(qtype == CXGB4_BAR2_QTYPE_EGRESS
? T4_BAR2_QTYPE_EGRESS
@@ -2305,9 +2283,13 @@ static void process_db_full(struct work_struct *work)
drain_db_fifo(adap, dbfifo_drain_delay);
enable_dbs(adap);
notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
- t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
- DBFIFO_HP_INT_F | DBFIFO_LP_INT_F,
- DBFIFO_HP_INT_F | DBFIFO_LP_INT_F);
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
+ t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
+ DBFIFO_HP_INT_F | DBFIFO_LP_INT_F,
+ DBFIFO_HP_INT_F | DBFIFO_LP_INT_F);
+ else
+ t4_set_reg_field(adap, SGE_INT_ENABLE3_A,
+ DBFIFO_LP_INT_F, DBFIFO_LP_INT_F);
}
static void sync_txq_pidx(struct adapter *adap, struct sge_txq *q)
@@ -2369,7 +2351,7 @@ static void process_db_drop(struct work_struct *work)
drain_db_fifo(adap, dbfifo_drain_delay);
enable_dbs(adap);
notify_rdma_uld(adap, CXGB4_CONTROL_DB_EMPTY);
- } else {
+ } else if (is_t5(adap->params.chip)) {
u32 dropped_db = t4_read_reg(adap, 0x010ac);
u16 qid = (dropped_db >> 15) & 0x1ffff;
u16 pidx_inc = dropped_db & 0x1fff;
@@ -2377,7 +2359,7 @@ static void process_db_drop(struct work_struct *work)
unsigned int bar2_qid;
int ret;
- ret = cxgb4_t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS,
+ ret = t4_bar2_sge_qregs(adap, qid, T4_BAR2_QTYPE_EGRESS,
&bar2_qoffset, &bar2_qid);
if (ret)
dev_err(adap->pdev_dev, "doorbell drop recovery: "
@@ -2390,7 +2372,8 @@ static void process_db_drop(struct work_struct *work)
t4_set_reg_field(adap, 0x10b0, 1<<15, 1<<15);
}
- t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0);
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
+ t4_set_reg_field(adap, SGE_DOORBELL_CONTROL_A, DROPPED_DB_F, 0);
}
void t4_db_full(struct adapter *adap)
@@ -2420,7 +2403,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
unsigned short i;
lli.pdev = adap->pdev;
- lli.pf = adap->fn;
+ lli.pf = adap->pf;
lli.l2t = adap->l2t;
lli.tids = &adap->tids;
lli.ports = adap->port;
@@ -2757,7 +2740,7 @@ static int cxgb_close(struct net_device *dev)
netif_tx_stop_all_queues(dev);
netif_carrier_off(dev);
- return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
+ return t4_enable_vi(adapter, adapter->pf, pi->viid, false, false);
}
/* Return an error number if the indicated filter isn't writable ...
@@ -2901,7 +2884,8 @@ static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
spin_unlock(&adapter->stats_lock);
return ns;
}
- t4_get_port_stats(adapter, p->tx_chan, &stats);
+ t4_get_port_stats_offset(adapter, p->tx_chan, &stats,
+ &p->stats_base);
spin_unlock(&adapter->stats_lock);
ns->tx_bytes = stats.tx_octets;
@@ -2960,7 +2944,7 @@ static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
} else
return -EINVAL;
- mbox = pi->adapter->fn;
+ mbox = pi->adapter->pf;
if (cmd == SIOCGMIIREG)
ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
data->reg_num, &data->val_out);
@@ -2987,7 +2971,7 @@ static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
if (new_mtu < 81 || new_mtu > MAX_MTU) /* accommodate SACK */
return -EINVAL;
- ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
+ ret = t4_set_rxmode(pi->adapter, pi->adapter->pf, pi->viid, new_mtu, -1,
-1, -1, -1, true);
if (!ret)
dev->mtu = new_mtu;
@@ -3003,7 +2987,7 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p)
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
- ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
+ ret = t4_change_mac(pi->adapter, pi->adapter->pf, pi->viid,
pi->xact_addr_filt, addr->sa_data, true, true);
if (ret < 0)
return ret;
@@ -3100,7 +3084,7 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
FW_CMD_REQUEST_F | FW_CMD_READ_F);
c->cfvalid_to_len16 = htonl(FW_LEN16(*c));
- ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
+ ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), c);
if (ret < 0)
return ret;
@@ -3116,18 +3100,18 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
}
c->op_to_write = htonl(FW_CMD_OP_V(FW_CAPS_CONFIG_CMD) |
FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
- ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
+ ret = t4_wr_mbox(adap, adap->mbox, c, sizeof(*c), NULL);
if (ret < 0)
return ret;
- ret = t4_config_glbl_rss(adap, adap->fn,
+ ret = t4_config_glbl_rss(adap, adap->pf,
FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
FW_RSS_GLB_CONFIG_CMD_TNLMAPEN_F |
FW_RSS_GLB_CONFIG_CMD_TNLALLLKP_F);
if (ret < 0)
return ret;
- ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, adap->sge.egr_sz, 64,
+ ret = t4_cfg_pfvf(adap, adap->mbox, adap->pf, 0, adap->sge.egr_sz, 64,
MAX_INGQ, 0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF,
FW_CMD_CAP_PF);
if (ret < 0)
@@ -3171,7 +3155,7 @@ static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
}
/* get basic stuff going */
- return t4_early_init(adap, adap->fn);
+ return t4_early_init(adap, adap->pf);
}
/*
@@ -3409,6 +3393,9 @@ static int adap_init0_config(struct adapter *adapter, int reset)
case CHELSIO_T5:
fw_config_file = FW5_CFNAME;
break;
+ case CHELSIO_T6:
+ fw_config_file = FW6_CFNAME;
+ break;
default:
dev_err(adapter->pdev_dev, "Device %d is not supported\n",
adapter->pdev->device);
@@ -3434,7 +3421,7 @@ static int adap_init0_config(struct adapter *adapter, int reset)
params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
ret = t4_query_params(adapter, adapter->mbox,
- adapter->fn, 0, 1, params, val);
+ adapter->pf, 0, 1, params, val);
if (ret == 0) {
/*
* For t4_memory_rw() below addresses and
@@ -3605,7 +3592,24 @@ static struct fw_info fw_info_array[] = {
.intfver_iscsi = FW_INTFVER(T5, ISCSI),
.intfver_fcoe = FW_INTFVER(T5, FCOE),
},
+ }, {
+ .chip = CHELSIO_T6,
+ .fs_name = FW6_CFNAME,
+ .fw_mod_name = FW6_FNAME,
+ .fw_hdr = {
+ .chip = FW_HDR_CHIP_T6,
+ .fw_ver = __cpu_to_be32(FW_VERSION(T6)),
+ .intfver_nic = FW_INTFVER(T6, NIC),
+ .intfver_vnic = FW_INTFVER(T6, VNIC),
+ .intfver_ofld = FW_INTFVER(T6, OFLD),
+ .intfver_ri = FW_INTFVER(T6, RI),
+ .intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU),
+ .intfver_iscsi = FW_INTFVER(T6, ISCSI),
+ .intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU),
+ .intfver_fcoe = FW_INTFVER(T6, FCOE),
+ },
}
+
};
static struct fw_info *find_fw_info(int chip)
@@ -3723,7 +3727,7 @@ static int adap_init0(struct adapter *adap)
v =
FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1, &v, &port_vec);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1, &v, &port_vec);
if (ret < 0)
goto bye;
@@ -3746,7 +3750,7 @@ static int adap_init0(struct adapter *adap)
*/
params[0] = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CF));
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
params, val);
/* If the firmware doesn't support Configuration Files,
@@ -3805,7 +3809,7 @@ static int adap_init0(struct adapter *adap)
params[3] = FW_PARAM_PFVF(FILTER_START);
params[4] = FW_PARAM_PFVF(FILTER_END);
params[5] = FW_PARAM_PFVF(IQFLINT_START);
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params, val);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params, val);
if (ret < 0)
goto bye;
adap->sge.egr_start = val[0];
@@ -3823,7 +3827,7 @@ static int adap_init0(struct adapter *adap)
*/
params[0] = FW_PARAM_PFVF(EQ_END);
params[1] = FW_PARAM_PFVF(IQFLINT_END);
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
if (ret < 0)
goto bye;
adap->sge.egr_sz = val[0] - adap->sge.egr_start + 1;
@@ -3844,7 +3848,7 @@ static int adap_init0(struct adapter *adap)
}
/* Allocate the memory for the vaious egress queue bitmaps
- * ie starving_fl and txq_maperr.
+ * ie starving_fl, txq_maperr and blocked_fl.
*/
adap->sge.starving_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
sizeof(long), GFP_KERNEL);
@@ -3860,9 +3864,18 @@ static int adap_init0(struct adapter *adap)
goto bye;
}
+#ifdef CONFIG_DEBUG_FS
+ adap->sge.blocked_fl = kcalloc(BITS_TO_LONGS(adap->sge.egr_sz),
+ sizeof(long), GFP_KERNEL);
+ if (!adap->sge.blocked_fl) {
+ ret = -ENOMEM;
+ goto bye;
+ }
+#endif
+
params[0] = FW_PARAM_PFVF(CLIP_START);
params[1] = FW_PARAM_PFVF(CLIP_END);
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
if (ret < 0)
goto bye;
adap->clipt_start = val[0];
@@ -3871,7 +3884,7 @@ static int adap_init0(struct adapter *adap)
/* query params related to active filter region */
params[0] = FW_PARAM_PFVF(ACTIVE_FILTER_START);
params[1] = FW_PARAM_PFVF(ACTIVE_FILTER_END);
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params, val);
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params, val);
/* If Active filter size is set we enable establishing
* offload connection through firmware work request
*/
@@ -3888,7 +3901,7 @@ static int adap_init0(struct adapter *adap)
*/
params[0] = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
val[0] = 1;
- (void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val);
+ (void)t4_set_params(adap, adap->mbox, adap->pf, 0, 1, params, val);
/*
* Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
@@ -3900,7 +3913,7 @@ static int adap_init0(struct adapter *adap)
adap->params.ulptx_memwrite_dsgl = false;
} else {
params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0,
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0,
1, params, val);
adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
}
@@ -3926,7 +3939,7 @@ static int adap_init0(struct adapter *adap)
params[3] = FW_PARAM_PFVF(TDDP_START);
params[4] = FW_PARAM_PFVF(TDDP_END);
params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
params, val);
if (ret < 0)
goto bye;
@@ -3964,7 +3977,7 @@ static int adap_init0(struct adapter *adap)
params[3] = FW_PARAM_PFVF(RQ_END);
params[4] = FW_PARAM_PFVF(PBL_START);
params[5] = FW_PARAM_PFVF(PBL_END);
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6,
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6,
params, val);
if (ret < 0)
goto bye;
@@ -3981,7 +3994,7 @@ static int adap_init0(struct adapter *adap)
params[3] = FW_PARAM_PFVF(CQ_END);
params[4] = FW_PARAM_PFVF(OCQ_START);
params[5] = FW_PARAM_PFVF(OCQ_END);
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 6, params,
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 6, params,
val);
if (ret < 0)
goto bye;
@@ -3994,7 +4007,7 @@ static int adap_init0(struct adapter *adap)
params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2, params,
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2, params,
val);
if (ret < 0) {
adap->params.max_ordird_qp = 8;
@@ -4012,7 +4025,7 @@ static int adap_init0(struct adapter *adap)
if (caps_cmd.iscsicaps) {
params[0] = FW_PARAM_PFVF(ISCSI_START);
params[1] = FW_PARAM_PFVF(ISCSI_END);
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 2,
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 2,
params, val);
if (ret < 0)
goto bye;
@@ -4072,6 +4085,9 @@ bye:
kfree(adap->sge.ingr_map);
kfree(adap->sge.starving_fl);
kfree(adap->sge.txq_maperr);
+#ifdef CONFIG_DEBUG_FS
+ kfree(adap->sge.blocked_fl);
+#endif
if (ret != -ETIMEDOUT && ret != -EIO)
t4_fw_bye(adap, adap->mbox);
return ret;
@@ -4139,7 +4155,7 @@ static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
if (t4_wait_dev_ready(adap->regs) < 0)
return PCI_ERS_RESULT_DISCONNECT;
- if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL) < 0)
+ if (t4_fw_hello(adap, adap->mbox, adap->pf, MASTER_MUST, NULL) < 0)
return PCI_ERS_RESULT_DISCONNECT;
adap->flags |= FW_OK;
if (adap_init1(adap, &c))
@@ -4148,7 +4164,7 @@ static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
for_each_port(adap, i) {
struct port_info *p = adap2pinfo(adap, i);
- ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
+ ret = t4_alloc_vi(adap, adap->mbox, p->tx_chan, adap->pf, 0, 1,
NULL, NULL);
if (ret < 0)
return PCI_ERS_RESULT_DISCONNECT;
@@ -4515,6 +4531,9 @@ static void free_some_resources(struct adapter *adapter)
kfree(adapter->sge.ingr_map);
kfree(adapter->sge.starving_fl);
kfree(adapter->sge.txq_maperr);
+#ifdef CONFIG_DEBUG_FS
+ kfree(adapter->sge.blocked_fl);
+#endif
disable_msi(adapter);
for_each_port(adapter, i)
@@ -4523,7 +4542,7 @@ static void free_some_resources(struct adapter *adapter)
free_netdev(adapter->port[i]);
}
if (adapter->flags & FW_OK)
- t4_fw_bye(adapter, adapter->fn);
+ t4_fw_bye(adapter, adapter->pf);
}
#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
@@ -4614,7 +4633,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
adapter->pdev = pdev;
adapter->pdev_dev = &pdev->dev;
adapter->mbox = func;
- adapter->fn = func;
+ adapter->pf = func;
adapter->msg_enable = dflt_msg_enable;
memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
@@ -4634,7 +4653,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!is_t4(adapter->params.chip)) {
s_qpp = (QUEUESPERPAGEPF0_S +
(QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) *
- adapter->fn);
+ adapter->pf);
qpp = 1 << QUEUESPERPAGEPF0_G(t4_read_reg(adapter,
SGE_EGRESS_QUEUES_PER_PAGE_PF_A) >> s_qpp);
num_seg = PAGE_SIZE / SEGMENT_SIZE;
@@ -4657,10 +4676,15 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
err = -ENOMEM;
goto out_free_adapter;
}
+ t4_write_reg(adapter, SGE_STAT_CFG_A,
+ STATSOURCE_T5_V(7) | STATMODE_V(0));
}
setup_memwin(adapter);
err = adap_init0(adapter);
+#ifdef CONFIG_DEBUG_FS
+ bitmap_zero(adapter->sge.blocked_fl, adapter->sge.egr_sz);
+#endif
setup_memwin_rdma(adapter);
if (err)
goto out_unmap_bar;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index df34293..14e8110 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -298,8 +298,6 @@ struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
unsigned int skb_len, unsigned int pull_len);
int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx, u16 size);
int cxgb4_flush_eq_cache(struct net_device *dev);
-void cxgb4_disable_db_coalescing(struct net_device *dev);
-void cxgb4_enable_db_coalescing(struct net_device *dev);
int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte);
u64 cxgb4_read_sge_timestamp(struct net_device *dev);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index dd18fcb..6b7c37fd0 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -522,14 +522,13 @@ static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
{
- u32 val;
if (q->pend_cred >= 8) {
+ u32 val = adap->params.arch.sge_fl_db;
+
if (is_t4(adap->params.chip))
- val = PIDX_V(q->pend_cred / 8);
+ val |= PIDX_V(q->pend_cred / 8);
else
- val = PIDX_T5_V(q->pend_cred / 8) |
- DBTYPE_F;
- val |= DBPRIO_F;
+ val |= PIDX_T5_V(q->pend_cred / 8);
/* Make sure all memory writes to the Free List queue are
* committed before we tell the hardware about them.
@@ -588,6 +587,11 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
struct rx_sw_desc *sd = &q->sdesc[q->pidx];
int node;
+#ifdef CONFIG_DEBUG_FS
+ if (test_bit(q->cntxt_id - adap->sge.egr_start, adap->sge.blocked_fl))
+ goto out;
+#endif
+
gfp |= __GFP_NOWARN;
node = dev_to_node(adap->pdev_dev);
@@ -1029,7 +1033,7 @@ static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
* Figure out what HW csum a packet wants and return the appropriate control
* bits.
*/
-static u64 hwcsum(const struct sk_buff *skb)
+static u64 hwcsum(enum chip_type chip, const struct sk_buff *skb)
{
int csum_type;
const struct iphdr *iph = ip_hdr(skb);
@@ -1060,11 +1064,16 @@ nocsum: /*
goto nocsum;
}
- if (likely(csum_type >= TX_CSUM_TCPIP))
- return TXPKT_CSUM_TYPE_V(csum_type) |
- TXPKT_IPHDR_LEN_V(skb_network_header_len(skb)) |
- TXPKT_ETHHDR_LEN_V(skb_network_offset(skb) - ETH_HLEN);
- else {
+ if (likely(csum_type >= TX_CSUM_TCPIP)) {
+ u64 hdr_len = TXPKT_IPHDR_LEN_V(skb_network_header_len(skb));
+ int eth_hdr_len = skb_network_offset(skb) - ETH_HLEN;
+
+ if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5)
+ hdr_len |= TXPKT_ETHHDR_LEN_V(eth_hdr_len);
+ else
+ hdr_len |= T6_TXPKT_ETHHDR_LEN_V(eth_hdr_len);
+ return TXPKT_CSUM_TYPE_V(csum_type) | hdr_len;
+ } else {
int start = skb_transport_offset(skb);
return TXPKT_CSUM_TYPE_V(csum_type) |
@@ -1232,9 +1241,15 @@ out_free: dev_kfree_skb_any(skb);
else
lso->c.len = htonl(LSO_T5_XFER_SIZE_V(skb->len));
cpl = (void *)(lso + 1);
- cntrl = TXPKT_CSUM_TYPE_V(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
- TXPKT_IPHDR_LEN_V(l3hdr_len) |
- TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
+ cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+ else
+ cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+
+ cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
+ TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
+ TXPKT_IPHDR_LEN_V(l3hdr_len);
q->tso++;
q->tx_cso += ssi->gso_segs;
} else {
@@ -1243,7 +1258,8 @@ out_free: dev_kfree_skb_any(skb);
FW_WR_IMMDLEN_V(len));
cpl = (void *)(wr + 1);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS_F;
+ cntrl = hwcsum(adap->params.chip, skb) |
+ TXPKT_IPCSUM_DIS_F;
q->tx_cso++;
}
}
@@ -1260,7 +1276,7 @@ out_free: dev_kfree_skb_any(skb);
cpl->ctrl0 = htonl(TXPKT_OPCODE_V(CPL_TX_PKT_XT) |
TXPKT_INTF_V(pi->tx_chan) |
- TXPKT_PF_V(adap->fn));
+ TXPKT_PF_V(adap->pf));
cpl->pack = htons(0);
cpl->len = htons(skb->len);
cpl->ctrl1 = cpu_to_be64(cntrl);
@@ -2385,7 +2401,7 @@ static void __iomem *bar2_address(struct adapter *adapter,
u64 bar2_qoffset;
int ret;
- ret = cxgb4_t4_bar2_sge_qregs(adapter, qid, qtype,
+ ret = t4_bar2_sge_qregs(adapter, qid, qtype,
&bar2_qoffset, pbar2_qid);
if (ret)
return NULL;
@@ -2416,7 +2432,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
memset(&c, 0, sizeof(c));
c.op_to_vfn = htonl(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
FW_CMD_WRITE_F | FW_CMD_EXEC_F |
- FW_IQ_CMD_PFN_V(adap->fn) | FW_IQ_CMD_VFN_V(0));
+ FW_IQ_CMD_PFN_V(adap->pf) | FW_IQ_CMD_VFN_V(0));
c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC_F | FW_IQ_CMD_IQSTART_F |
FW_LEN16(c));
c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE_V(FW_IQ_TYPE_FL_INT_CAP) |
@@ -2435,6 +2451,8 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
c.iqns_to_fl0congen = htonl(FW_IQ_CMD_IQFLINTCONGEN_F);
if (fl) {
+ enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
+
/* Allocate the ring for the hardware free list (with space
* for its status page) along with the associated software
* descriptor ring. The free list size needs to be a multiple
@@ -2463,12 +2481,14 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
FW_IQ_CMD_FL0CONGEN_F);
c.fl0dcaen_to_fl0cidxfthresh =
htons(FW_IQ_CMD_FL0FBMIN_V(FETCHBURSTMIN_64B_X) |
- FW_IQ_CMD_FL0FBMAX_V(FETCHBURSTMAX_512B_X));
+ FW_IQ_CMD_FL0FBMAX_V((chip <= CHELSIO_T5) ?
+ FETCHBURSTMAX_512B_X :
+ FETCHBURSTMAX_256B_X));
c.fl0size = htons(flsz);
c.fl0addr = cpu_to_be64(fl->addr);
}
- ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
+ ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
if (ret)
goto err;
@@ -2536,7 +2556,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
CONMCTXT_CNGCHMAP_V(1 << (i << 2));
}
}
- ret = t4_set_params(adap, adap->mbox, adap->fn, 0, 1,
+ ret = t4_set_params(adap, adap->mbox, adap->pf, 0, 1,
&param, &val);
if (ret)
dev_warn(adap->pdev_dev, "Failed to set Congestion"
@@ -2601,7 +2621,7 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
memset(&c, 0, sizeof(c));
c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_ETH_CMD) | FW_CMD_REQUEST_F |
FW_CMD_WRITE_F | FW_CMD_EXEC_F |
- FW_EQ_ETH_CMD_PFN_V(adap->fn) |
+ FW_EQ_ETH_CMD_PFN_V(adap->pf) |
FW_EQ_ETH_CMD_VFN_V(0));
c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC_F |
FW_EQ_ETH_CMD_EQSTART_F | FW_LEN16(c));
@@ -2618,7 +2638,7 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
FW_EQ_ETH_CMD_EQSIZE_V(nentries));
c.eqaddr = cpu_to_be64(txq->q.phys_addr);
- ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
+ ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
if (ret) {
kfree(txq->q.sdesc);
txq->q.sdesc = NULL;
@@ -2656,7 +2676,7 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST_F |
FW_CMD_WRITE_F | FW_CMD_EXEC_F |
- FW_EQ_CTRL_CMD_PFN_V(adap->fn) |
+ FW_EQ_CTRL_CMD_PFN_V(adap->pf) |
FW_EQ_CTRL_CMD_VFN_V(0));
c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC_F |
FW_EQ_CTRL_CMD_EQSTART_F | FW_LEN16(c));
@@ -2673,7 +2693,7 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
FW_EQ_CTRL_CMD_EQSIZE_V(nentries));
c.eqaddr = cpu_to_be64(txq->q.phys_addr);
- ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
+ ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
if (ret) {
dma_free_coherent(adap->pdev_dev,
nentries * sizeof(struct tx_desc),
@@ -2711,7 +2731,7 @@ int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
memset(&c, 0, sizeof(c));
c.op_to_vfn = htonl(FW_CMD_OP_V(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST_F |
FW_CMD_WRITE_F | FW_CMD_EXEC_F |
- FW_EQ_OFLD_CMD_PFN_V(adap->fn) |
+ FW_EQ_OFLD_CMD_PFN_V(adap->pf) |
FW_EQ_OFLD_CMD_VFN_V(0));
c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC_F |
FW_EQ_OFLD_CMD_EQSTART_F | FW_LEN16(c));
@@ -2726,7 +2746,7 @@ int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
FW_EQ_OFLD_CMD_EQSIZE_V(nentries));
c.eqaddr = cpu_to_be64(txq->q.phys_addr);
- ret = t4_wr_mbox(adap, adap->fn, &c, sizeof(c), &c);
+ ret = t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), &c);
if (ret) {
kfree(txq->q.sdesc);
txq->q.sdesc = NULL;
@@ -2765,7 +2785,7 @@ static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL;
- t4_iq_free(adap, adap->fn, adap->fn, 0, FW_IQ_TYPE_FL_INT_CAP,
+ t4_iq_free(adap, adap->mbox, adap->pf, 0, FW_IQ_TYPE_FL_INT_CAP,
rq->cntxt_id, fl_id, 0xffff);
dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
rq->desc, rq->phys_addr);
@@ -2820,7 +2840,7 @@ void t4_free_sge_resources(struct adapter *adap)
free_rspq_fl(adap, &eq->rspq,
eq->fl.size ? &eq->fl : NULL);
if (etq->q.desc) {
- t4_eth_eq_free(adap, adap->fn, adap->fn, 0,
+ t4_eth_eq_free(adap, adap->mbox, adap->pf, 0,
etq->q.cntxt_id);
free_tx_desc(adap, &etq->q, etq->q.in_use, true);
kfree(etq->q.sdesc);
@@ -2839,7 +2859,7 @@ void t4_free_sge_resources(struct adapter *adap)
if (q->q.desc) {
tasklet_kill(&q->qresume_tsk);
- t4_ofld_eq_free(adap, adap->fn, adap->fn, 0,
+ t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0,
q->q.cntxt_id);
free_tx_desc(adap, &q->q, q->q.in_use, false);
kfree(q->q.sdesc);
@@ -2854,7 +2874,7 @@ void t4_free_sge_resources(struct adapter *adap)
if (cq->q.desc) {
tasklet_kill(&cq->qresume_tsk);
- t4_ctrl_eq_free(adap, adap->fn, adap->fn, 0,
+ t4_ctrl_eq_free(adap, adap->mbox, adap->pf, 0,
cq->q.cntxt_id);
__skb_queue_purge(&cq->sendq);
free_txq(adap, &cq->q);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 7b92f0f..c21ab26 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -150,7 +150,12 @@ void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
*/
void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
{
- u32 req = ENABLE_F | FUNCTION_V(adap->fn) | REGISTER_V(reg);
+ u32 req = FUNCTION_V(adap->pf) | REGISTER_V(reg);
+
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
+ req |= ENABLE_F;
+ else
+ req |= T6_ENABLE_F;
if (is_t4(adap->params.chip))
req |= LOCALCFG_F;
@@ -381,9 +386,8 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
/* Offset into the region of memory which is being accessed
* MEM_EDC0 = 0
* MEM_EDC1 = 1
- * MEM_MC = 2 -- T4
- * MEM_MC0 = 2 -- For T5
- * MEM_MC1 = 3 -- For T5
+ * MEM_MC = 2 -- MEM_MC for chips with only 1 memory controller
+ * MEM_MC1 = 3 -- for chips with 2 memory controllers (e.g. T5)
*/
edc_size = EDRAM0_SIZE_G(t4_read_reg(adap, MA_EDRAM0_BAR_A));
if (mtype != MEM_MC1)
@@ -412,7 +416,7 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
mem_base = PCIEOFST_G(mem_reg) << PCIEOFST_SHIFT_X;
if (is_t4(adap->params.chip))
mem_base -= adap->t4_bar0;
- win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->fn);
+ win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->pf);
/* Calculate our initial PCI-E Memory Window Position and Offset into
* that Window.
@@ -547,7 +551,7 @@ u32 t4_read_pcie_cfg4(struct adapter *adap, int reg)
ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS_V(1);
ldst_cmd.u.pcie.ctrl_to_fn =
- (FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(adap->fn));
+ (FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(adap->pf));
ldst_cmd.u.pcie.r = reg;
/* If the LDST Command succeeds, return the result, otherwise
@@ -634,6 +638,7 @@ unsigned int t4_get_regs_len(struct adapter *adapter)
return T4_REGMAP_SIZE;
case CHELSIO_T5:
+ case CHELSIO_T6:
return T5_REGMAP_SIZE;
}
@@ -1316,6 +1321,344 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
0x51300, 0x51308,
};
+ static const unsigned int t6_reg_ranges[] = {
+ 0x1008, 0x114c,
+ 0x1180, 0x11b4,
+ 0x11fc, 0x1250,
+ 0x1280, 0x133c,
+ 0x1800, 0x18fc,
+ 0x3000, 0x302c,
+ 0x3060, 0x30d8,
+ 0x30e0, 0x30fc,
+ 0x3140, 0x357c,
+ 0x35a8, 0x35cc,
+ 0x35ec, 0x35ec,
+ 0x3600, 0x5624,
+ 0x56cc, 0x575c,
+ 0x580c, 0x5814,
+ 0x5890, 0x58bc,
+ 0x5940, 0x595c,
+ 0x5980, 0x598c,
+ 0x59b0, 0x59dc,
+ 0x59fc, 0x5a18,
+ 0x5a60, 0x5a6c,
+ 0x5a80, 0x5a9c,
+ 0x5b94, 0x5bfc,
+ 0x5c10, 0x5ec0,
+ 0x5ec8, 0x5ec8,
+ 0x6000, 0x6040,
+ 0x6058, 0x6154,
+ 0x7700, 0x7798,
+ 0x77c0, 0x7880,
+ 0x78cc, 0x78fc,
+ 0x7b00, 0x7c54,
+ 0x7d00, 0x7efc,
+ 0x8dc0, 0x8de0,
+ 0x8df8, 0x8e84,
+ 0x8ea0, 0x8f88,
+ 0x8fb8, 0x911c,
+ 0x9400, 0x9470,
+ 0x9600, 0x971c,
+ 0x9800, 0x9808,
+ 0x9820, 0x983c,
+ 0x9850, 0x9864,
+ 0x9c00, 0x9c6c,
+ 0x9c80, 0x9cec,
+ 0x9d00, 0x9d6c,
+ 0x9d80, 0x9dec,
+ 0x9e00, 0x9e6c,
+ 0x9e80, 0x9eec,
+ 0x9f00, 0x9f6c,
+ 0x9f80, 0xa020,
+ 0xd004, 0xd03c,
+ 0xdfc0, 0xdfe0,
+ 0xe000, 0xf008,
+ 0x11000, 0x11014,
+ 0x11048, 0x11110,
+ 0x11118, 0x1117c,
+ 0x11190, 0x11260,
+ 0x11300, 0x1130c,
+ 0x12000, 0x1205c,
+ 0x19040, 0x1906c,
+ 0x19078, 0x19080,
+ 0x1908c, 0x19124,
+ 0x19150, 0x191b0,
+ 0x191d0, 0x191e8,
+ 0x19238, 0x192b8,
+ 0x193f8, 0x19474,
+ 0x19490, 0x194cc,
+ 0x194f0, 0x194f8,
+ 0x19c00, 0x19c80,
+ 0x19c94, 0x19cbc,
+ 0x19ce4, 0x19d28,
+ 0x19d50, 0x19d78,
+ 0x19d94, 0x19dc8,
+ 0x19df0, 0x19e10,
+ 0x19e50, 0x19e6c,
+ 0x19ea0, 0x19f34,
+ 0x19f40, 0x19f50,
+ 0x19f90, 0x19fac,
+ 0x19fc4, 0x19fe4,
+ 0x1a000, 0x1a06c,
+ 0x1a0b0, 0x1a120,
+ 0x1a128, 0x1a138,
+ 0x1a190, 0x1a1c4,
+ 0x1a1fc, 0x1a1fc,
+ 0x1e008, 0x1e00c,
+ 0x1e040, 0x1e04c,
+ 0x1e284, 0x1e290,
+ 0x1e2c0, 0x1e2c0,
+ 0x1e2e0, 0x1e2e0,
+ 0x1e300, 0x1e384,
+ 0x1e3c0, 0x1e3c8,
+ 0x1e408, 0x1e40c,
+ 0x1e440, 0x1e44c,
+ 0x1e684, 0x1e690,
+ 0x1e6c0, 0x1e6c0,
+ 0x1e6e0, 0x1e6e0,
+ 0x1e700, 0x1e784,
+ 0x1e7c0, 0x1e7c8,
+ 0x1e808, 0x1e80c,
+ 0x1e840, 0x1e84c,
+ 0x1ea84, 0x1ea90,
+ 0x1eac0, 0x1eac0,
+ 0x1eae0, 0x1eae0,
+ 0x1eb00, 0x1eb84,
+ 0x1ebc0, 0x1ebc8,
+ 0x1ec08, 0x1ec0c,
+ 0x1ec40, 0x1ec4c,
+ 0x1ee84, 0x1ee90,
+ 0x1eec0, 0x1eec0,
+ 0x1eee0, 0x1eee0,
+ 0x1ef00, 0x1ef84,
+ 0x1efc0, 0x1efc8,
+ 0x1f008, 0x1f00c,
+ 0x1f040, 0x1f04c,
+ 0x1f284, 0x1f290,
+ 0x1f2c0, 0x1f2c0,
+ 0x1f2e0, 0x1f2e0,
+ 0x1f300, 0x1f384,
+ 0x1f3c0, 0x1f3c8,
+ 0x1f408, 0x1f40c,
+ 0x1f440, 0x1f44c,
+ 0x1f684, 0x1f690,
+ 0x1f6c0, 0x1f6c0,
+ 0x1f6e0, 0x1f6e0,
+ 0x1f700, 0x1f784,
+ 0x1f7c0, 0x1f7c8,
+ 0x1f808, 0x1f80c,
+ 0x1f840, 0x1f84c,
+ 0x1fa84, 0x1fa90,
+ 0x1fac0, 0x1fac0,
+ 0x1fae0, 0x1fae0,
+ 0x1fb00, 0x1fb84,
+ 0x1fbc0, 0x1fbc8,
+ 0x1fc08, 0x1fc0c,
+ 0x1fc40, 0x1fc4c,
+ 0x1fe84, 0x1fe90,
+ 0x1fec0, 0x1fec0,
+ 0x1fee0, 0x1fee0,
+ 0x1ff00, 0x1ff84,
+ 0x1ffc0, 0x1ffc8,
+ 0x30000, 0x30070,
+ 0x30100, 0x3015c,
+ 0x30190, 0x301d0,
+ 0x30200, 0x30318,
+ 0x30400, 0x3052c,
+ 0x30540, 0x3061c,
+ 0x30800, 0x3088c,
+ 0x308c0, 0x30908,
+ 0x30910, 0x309b8,
+ 0x30a00, 0x30a04,
+ 0x30a0c, 0x30a2c,
+ 0x30a44, 0x30a50,
+ 0x30a74, 0x30c24,
+ 0x30d00, 0x30d3c,
+ 0x30d44, 0x30d7c,
+ 0x30de0, 0x30de0,
+ 0x30e00, 0x30ed4,
+ 0x30f00, 0x30fa4,
+ 0x30fc0, 0x30fc4,
+ 0x31000, 0x31004,
+ 0x31080, 0x310fc,
+ 0x31208, 0x31220,
+ 0x3123c, 0x31254,
+ 0x31300, 0x31300,
+ 0x31308, 0x3131c,
+ 0x31338, 0x3133c,
+ 0x31380, 0x31380,
+ 0x31388, 0x313a8,
+ 0x313b4, 0x313b4,
+ 0x31400, 0x31420,
+ 0x31438, 0x3143c,
+ 0x31480, 0x31480,
+ 0x314a8, 0x314a8,
+ 0x314b0, 0x314b4,
+ 0x314c8, 0x314d4,
+ 0x31a40, 0x31a4c,
+ 0x31af0, 0x31b20,
+ 0x31b38, 0x31b3c,
+ 0x31b80, 0x31b80,
+ 0x31ba8, 0x31ba8,
+ 0x31bb0, 0x31bb4,
+ 0x31bc8, 0x31bd4,
+ 0x32140, 0x3218c,
+ 0x321f0, 0x32200,
+ 0x32218, 0x32218,
+ 0x32400, 0x32400,
+ 0x32408, 0x3241c,
+ 0x32618, 0x32620,
+ 0x32664, 0x32664,
+ 0x326a8, 0x326a8,
+ 0x326ec, 0x326ec,
+ 0x32a00, 0x32abc,
+ 0x32b00, 0x32b78,
+ 0x32c00, 0x32c00,
+ 0x32c08, 0x32c3c,
+ 0x32e00, 0x32e2c,
+ 0x32f00, 0x32f2c,
+ 0x33000, 0x330ac,
+ 0x330c0, 0x331ac,
+ 0x331c0, 0x332c4,
+ 0x332e4, 0x333c4,
+ 0x333e4, 0x334ac,
+ 0x334c0, 0x335ac,
+ 0x335c0, 0x336c4,
+ 0x336e4, 0x337c4,
+ 0x337e4, 0x337fc,
+ 0x33814, 0x33814,
+ 0x33854, 0x33868,
+ 0x33880, 0x3388c,
+ 0x338c0, 0x338d0,
+ 0x338e8, 0x338ec,
+ 0x33900, 0x339ac,
+ 0x339c0, 0x33ac4,
+ 0x33ae4, 0x33b10,
+ 0x33b24, 0x33b50,
+ 0x33bf0, 0x33c10,
+ 0x33c24, 0x33c50,
+ 0x33cf0, 0x33cfc,
+ 0x34000, 0x34070,
+ 0x34100, 0x3415c,
+ 0x34190, 0x341d0,
+ 0x34200, 0x34318,
+ 0x34400, 0x3452c,
+ 0x34540, 0x3461c,
+ 0x34800, 0x3488c,
+ 0x348c0, 0x34908,
+ 0x34910, 0x349b8,
+ 0x34a00, 0x34a04,
+ 0x34a0c, 0x34a2c,
+ 0x34a44, 0x34a50,
+ 0x34a74, 0x34c24,
+ 0x34d00, 0x34d3c,
+ 0x34d44, 0x34d7c,
+ 0x34de0, 0x34de0,
+ 0x34e00, 0x34ed4,
+ 0x34f00, 0x34fa4,
+ 0x34fc0, 0x34fc4,
+ 0x35000, 0x35004,
+ 0x35080, 0x350fc,
+ 0x35208, 0x35220,
+ 0x3523c, 0x35254,
+ 0x35300, 0x35300,
+ 0x35308, 0x3531c,
+ 0x35338, 0x3533c,
+ 0x35380, 0x35380,
+ 0x35388, 0x353a8,
+ 0x353b4, 0x353b4,
+ 0x35400, 0x35420,
+ 0x35438, 0x3543c,
+ 0x35480, 0x35480,
+ 0x354a8, 0x354a8,
+ 0x354b0, 0x354b4,
+ 0x354c8, 0x354d4,
+ 0x35a40, 0x35a4c,
+ 0x35af0, 0x35b20,
+ 0x35b38, 0x35b3c,
+ 0x35b80, 0x35b80,
+ 0x35ba8, 0x35ba8,
+ 0x35bb0, 0x35bb4,
+ 0x35bc8, 0x35bd4,
+ 0x36140, 0x3618c,
+ 0x361f0, 0x36200,
+ 0x36218, 0x36218,
+ 0x36400, 0x36400,
+ 0x36408, 0x3641c,
+ 0x36618, 0x36620,
+ 0x36664, 0x36664,
+ 0x366a8, 0x366a8,
+ 0x366ec, 0x366ec,
+ 0x36a00, 0x36abc,
+ 0x36b00, 0x36b78,
+ 0x36c00, 0x36c00,
+ 0x36c08, 0x36c3c,
+ 0x36e00, 0x36e2c,
+ 0x36f00, 0x36f2c,
+ 0x37000, 0x370ac,
+ 0x370c0, 0x371ac,
+ 0x371c0, 0x372c4,
+ 0x372e4, 0x373c4,
+ 0x373e4, 0x374ac,
+ 0x374c0, 0x375ac,
+ 0x375c0, 0x376c4,
+ 0x376e4, 0x377c4,
+ 0x377e4, 0x377fc,
+ 0x37814, 0x37814,
+ 0x37854, 0x37868,
+ 0x37880, 0x3788c,
+ 0x378c0, 0x378d0,
+ 0x378e8, 0x378ec,
+ 0x37900, 0x379ac,
+ 0x379c0, 0x37ac4,
+ 0x37ae4, 0x37b10,
+ 0x37b24, 0x37b50,
+ 0x37bf0, 0x37c10,
+ 0x37c24, 0x37c50,
+ 0x37cf0, 0x37cfc,
+ 0x40040, 0x40040,
+ 0x40080, 0x40084,
+ 0x40100, 0x40100,
+ 0x40140, 0x401bc,
+ 0x40200, 0x40214,
+ 0x40228, 0x40228,
+ 0x40240, 0x40258,
+ 0x40280, 0x40280,
+ 0x40304, 0x40304,
+ 0x40330, 0x4033c,
+ 0x41304, 0x413dc,
+ 0x41400, 0x4141c,
+ 0x41480, 0x414d0,
+ 0x44000, 0x4407c,
+ 0x440c0, 0x4427c,
+ 0x442c0, 0x4447c,
+ 0x444c0, 0x4467c,
+ 0x446c0, 0x4487c,
+ 0x448c0, 0x44a7c,
+ 0x44ac0, 0x44c7c,
+ 0x44cc0, 0x44e7c,
+ 0x44ec0, 0x4507c,
+ 0x450c0, 0x451fc,
+ 0x45800, 0x45868,
+ 0x45880, 0x45884,
+ 0x458a0, 0x458b0,
+ 0x45a00, 0x45a68,
+ 0x45a80, 0x45a84,
+ 0x45aa0, 0x45ab0,
+ 0x460c0, 0x460e4,
+ 0x47000, 0x4708c,
+ 0x47200, 0x47250,
+ 0x47400, 0x47420,
+ 0x47600, 0x47618,
+ 0x47800, 0x4782c,
+ 0x50000, 0x500cc,
+ 0x50400, 0x50400,
+ 0x50800, 0x508cc,
+ 0x50c00, 0x50c00,
+ 0x51000, 0x510b0,
+ 0x51300, 0x51324,
+ };
+
u32 *buf_end = (u32 *)((char *)buf + buf_size);
const unsigned int *reg_ranges;
int reg_ranges_size, range;
@@ -1335,6 +1678,11 @@ void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
break;
+ case CHELSIO_T6:
+ reg_ranges = t6_reg_ranges;
+ reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
+ break;
+
default:
dev_err(adap->pdev_dev,
"Unsupported chip version %d\n", chip_version);
@@ -1948,7 +2296,8 @@ static bool t4_fw_matches_chip(const struct adapter *adap,
* which will keep us "honest" in the future ...
*/
if ((is_t4(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T4) ||
- (is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5))
+ (is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5) ||
+ (is_t6(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T6))
return true;
dev_err(adap->pdev_dev,
@@ -2062,7 +2411,7 @@ int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver)
FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_VERSION));
- ret = t4_query_params(adap, adap->mbox, adap->fn, 0, 1,
+ ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
&param, &val);
if (ret < 0)
return ret;
@@ -2134,7 +2483,7 @@ int t4_load_phy_fw(struct adapter *adap,
FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
val = phy_fw_size;
- ret = t4_query_params_rw(adap, adap->mbox, adap->fn, 0, 1,
+ ret = t4_query_params_rw(adap, adap->mbox, adap->pf, 0, 1,
&param, &val, 1);
if (ret < 0)
return ret;
@@ -2163,7 +2512,7 @@ int t4_load_phy_fw(struct adapter *adap,
FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
- ret = t4_set_params_timeout(adap, adap->mbox, adap->fn, 0, 1,
+ ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
&param, &val, 30000);
/* If we have version number support, then check to see that the new
@@ -2199,7 +2548,7 @@ int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
c.op_to_vfn =
cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
- FW_PARAMS_CMD_PFN_V(adap->fn) |
+ FW_PARAMS_CMD_PFN_V(adap->pf) |
FW_PARAMS_CMD_VFN_V(0));
c.retval_len16 = cpu_to_be32(FW_LEN16(c));
c.param[0].mnem =
@@ -2488,6 +2837,7 @@ static void tp_intr_handler(struct adapter *adapter)
static void sge_intr_handler(struct adapter *adapter)
{
u64 v;
+ u32 err;
static const struct intr_info sge_intr_info[] = {
{ ERR_CPL_EXCEED_IQE_SIZE_F,
@@ -2496,8 +2846,6 @@ static void sge_intr_handler(struct adapter *adapter)
"SGE GTS CIDX increment too large", -1, 0 },
{ ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 },
{ DBFIFO_LP_INT_F, NULL, -1, 0, t4_db_full },
- { DBFIFO_HP_INT_F, NULL, -1, 0, t4_db_full },
- { ERR_DROPPED_DB_F, NULL, -1, 0, t4_db_dropped },
{ ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F,
"SGE IQID > 1023 received CPL for FL", -1, 0 },
{ ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1,
@@ -2510,13 +2858,19 @@ static void sge_intr_handler(struct adapter *adapter)
0 },
{ ERR_ING_CTXT_PRIO_F,
"SGE too many priority ingress contexts", -1, 0 },
- { ERR_EGR_CTXT_PRIO_F,
- "SGE too many priority egress contexts", -1, 0 },
{ INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 },
{ EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 },
{ 0 }
};
+ static struct intr_info t4t5_sge_intr_info[] = {
+ { ERR_DROPPED_DB_F, NULL, -1, 0, t4_db_dropped },
+ { DBFIFO_HP_INT_F, NULL, -1, 0, t4_db_full },
+ { ERR_EGR_CTXT_PRIO_F,
+ "SGE too many priority egress contexts", -1, 0 },
+ { 0 }
+ };
+
v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1_A) |
((u64)t4_read_reg(adapter, SGE_INT_CAUSE2_A) << 32);
if (v) {
@@ -2526,8 +2880,23 @@ static void sge_intr_handler(struct adapter *adapter)
t4_write_reg(adapter, SGE_INT_CAUSE2_A, v >> 32);
}
- if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A, sge_intr_info) ||
- v != 0)
+ v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A, sge_intr_info);
+ if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
+ v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A,
+ t4t5_sge_intr_info);
+
+ err = t4_read_reg(adapter, SGE_ERROR_STATS_A);
+ if (err & ERROR_QID_VALID_F) {
+ dev_err(adapter->pdev_dev, "SGE error for queue %u\n",
+ ERROR_QID_G(err));
+ if (err & UNCAPTURED_ERROR_F)
+ dev_err(adapter->pdev_dev,
+ "SGE UNCAPTURED_ERROR set (clearing)\n");
+ t4_write_reg(adapter, SGE_ERROR_STATS_A, ERROR_QID_VALID_F |
+ UNCAPTURED_ERROR_F);
+ }
+
+ if (v != 0)
t4_fatal_err(adapter);
}
@@ -2700,6 +3069,7 @@ static void cplsw_intr_handler(struct adapter *adapter)
*/
static void le_intr_handler(struct adapter *adap)
{
+ enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
static const struct intr_info le_intr_info[] = {
{ LIPMISS_F, "LE LIP miss", -1, 0 },
{ LIP0_F, "LE 0 LIP error", -1, 0 },
@@ -2709,7 +3079,18 @@ static void le_intr_handler(struct adapter *adap)
{ 0 }
};
- if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE_A, le_intr_info))
+ static struct intr_info t6_le_intr_info[] = {
+ { T6_LIPMISS_F, "LE LIP miss", -1, 0 },
+ { T6_LIP0_F, "LE 0 LIP error", -1, 0 },
+ { TCAMINTPERR_F, "LE parity error", -1, 1 },
+ { T6_UNKNOWNCMD_F, "LE unknown command", -1, 1 },
+ { SSRAMINTPERR_F, "LE request queue parity error", -1, 1 },
+ { 0 }
+ };
+
+ if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE_A,
+ (chip <= CHELSIO_T5) ?
+ le_intr_info : t6_le_intr_info))
t4_fatal_err(adap);
}
@@ -2978,7 +3359,7 @@ int t4_slow_intr_handler(struct adapter *adapter)
pcie_intr_handler(adapter);
if (cause & MC_F)
mem_intr_handler(adapter, MEM_MC);
- if (!is_t4(adapter->params.chip) && (cause & MC1_S))
+ if (is_t5(adapter->params.chip) && (cause & MC1_F))
mem_intr_handler(adapter, MEM_MC1);
if (cause & EDC0_F)
mem_intr_handler(adapter, MEM_EDC0);
@@ -3024,17 +3405,18 @@ int t4_slow_intr_handler(struct adapter *adapter)
*/
void t4_intr_enable(struct adapter *adapter)
{
+ u32 val = 0;
u32 pf = SOURCEPF_G(t4_read_reg(adapter, PL_WHOAMI_A));
+ if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
+ val = ERR_DROPPED_DB_F | ERR_EGR_CTXT_PRIO_F | DBFIFO_HP_INT_F;
t4_write_reg(adapter, SGE_INT_ENABLE3_A, ERR_CPL_EXCEED_IQE_SIZE_F |
ERR_INVALID_CIDX_INC_F | ERR_CPL_OPCODE_0_F |
- ERR_DROPPED_DB_F | ERR_DATA_CPL_ON_HIGH_QID1_F |
+ ERR_DATA_CPL_ON_HIGH_QID1_F | INGRESS_SIZE_ERR_F |
ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F |
ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F |
ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F |
- ERR_EGR_CTXT_PRIO_F | INGRESS_SIZE_ERR_F |
- DBFIFO_HP_INT_F | DBFIFO_LP_INT_F |
- EGRESS_SIZE_ERR_F);
+ DBFIFO_LP_INT_F | EGRESS_SIZE_ERR_F | val);
t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), PF_INTR_MASK);
t4_set_reg_field(adapter, PL_INT_MAP0_A, 0, 1 << pf);
}
@@ -3248,11 +3630,29 @@ void t4_read_rss_key(struct adapter *adap, u32 *key)
*/
void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx)
{
+ u8 rss_key_addr_cnt = 16;
+ u32 vrt = t4_read_reg(adap, TP_RSS_CONFIG_VRT_A);
+
+ /* T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
+ * allows access to key addresses 16-63 by using KeyWrAddrX
+ * as index[5:4](upper 2) into key table
+ */
+ if ((CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) &&
+ (vrt & KEYEXTEND_F) && (KEYMODE_G(vrt) == 3))
+ rss_key_addr_cnt = 32;
+
t4_write_indirect(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, key, 10,
TP_RSS_SECRET_KEY0_A);
- if (idx >= 0 && idx < 16)
- t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
- KEYWRADDR_V(idx) | KEYWREN_F);
+
+ if (idx >= 0 && idx < rss_key_addr_cnt) {
+ if (rss_key_addr_cnt > 16)
+ t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
+ KEYWRADDRX_V(idx >> 4) |
+ T6_VFWRADDR_V(idx) | KEYWREN_F);
+ else
+ t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
+ KEYWRADDR_V(idx) | KEYWREN_F);
+ }
}
/**
@@ -3286,8 +3686,13 @@ void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
{
u32 vrt, mask, data;
- mask = VFWRADDR_V(VFWRADDR_M);
- data = VFWRADDR_V(index);
+ if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) {
+ mask = VFWRADDR_V(VFWRADDR_M);
+ data = VFWRADDR_V(index);
+ } else {
+ mask = T6_VFWRADDR_V(T6_VFWRADDR_M);
+ data = T6_VFWRADDR_V(index);
+ }
/* Request that the index'th VF Table values be read into VFL/VFH.
*/
@@ -3355,18 +3760,18 @@ void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
if (v4) {
t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST_A);
- v4->tcpOutRsts = STAT(OUT_RST);
- v4->tcpInSegs = STAT64(IN_SEG);
- v4->tcpOutSegs = STAT64(OUT_SEG);
- v4->tcpRetransSegs = STAT64(RXT_SEG);
+ v4->tcp_out_rsts = STAT(OUT_RST);
+ v4->tcp_in_segs = STAT64(IN_SEG);
+ v4->tcp_out_segs = STAT64(OUT_SEG);
+ v4->tcp_retrans_segs = STAT64(RXT_SEG);
}
if (v6) {
t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST_A);
- v6->tcpOutRsts = STAT(OUT_RST);
- v6->tcpInSegs = STAT64(IN_SEG);
- v6->tcpOutSegs = STAT64(OUT_SEG);
- v6->tcpRetransSegs = STAT64(RXT_SEG);
+ v6->tcp_out_rsts = STAT(OUT_RST);
+ v6->tcp_in_segs = STAT64(IN_SEG);
+ v6->tcp_out_segs = STAT64(OUT_SEG);
+ v6->tcp_retrans_segs = STAT64(RXT_SEG);
}
#undef STAT64
#undef STAT
@@ -3374,6 +3779,130 @@ void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
}
/**
+ * t4_tp_get_err_stats - read TP's error MIB counters
+ * @adap: the adapter
+ * @st: holds the counter values
+ *
+ * Returns the values of TP's error counters.
+ */
+void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
+{
+ /* T6 and later has 2 channels */
+ if (adap->params.arch.nchan == NCHAN) {
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->mac_in_errs, 12, TP_MIB_MAC_IN_ERR_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->tnl_cong_drops, 8,
+ TP_MIB_TNL_CNG_DROP_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->tnl_tx_drops, 4,
+ TP_MIB_TNL_DROP_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->ofld_vlan_drops, 4,
+ TP_MIB_OFD_VLN_DROP_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->tcp6_in_errs, 4,
+ TP_MIB_TCP_V6IN_ERR_0_A);
+ } else {
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->mac_in_errs, 2, TP_MIB_MAC_IN_ERR_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->hdr_in_errs, 2, TP_MIB_HDR_IN_ERR_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->tcp_in_errs, 2, TP_MIB_TCP_IN_ERR_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->tnl_cong_drops, 2,
+ TP_MIB_TNL_CNG_DROP_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->ofld_chan_drops, 2,
+ TP_MIB_OFD_CHN_DROP_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->tnl_tx_drops, 2, TP_MIB_TNL_DROP_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->ofld_vlan_drops, 2,
+ TP_MIB_OFD_VLN_DROP_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ st->tcp6_in_errs, 2, TP_MIB_TCP_V6IN_ERR_0_A);
+ }
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A,
+ &st->ofld_no_neigh, 2, TP_MIB_OFD_ARP_DROP_A);
+}
+
+/**
+ * t4_tp_get_cpl_stats - read TP's CPL MIB counters
+ * @adap: the adapter
+ * @st: holds the counter values
+ *
+ * Returns the values of TP's CPL counters.
+ */
+void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st)
+{
+ /* T6 and later has 2 channels */
+ if (adap->params.arch.nchan == NCHAN) {
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req,
+ 8, TP_MIB_CPL_IN_REQ_0_A);
+ } else {
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->req,
+ 2, TP_MIB_CPL_IN_REQ_0_A);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, st->rsp,
+ 2, TP_MIB_CPL_OUT_RSP_0_A);
+ }
+}
+
+/**
+ * t4_tp_get_rdma_stats - read TP's RDMA MIB counters
+ * @adap: the adapter
+ * @st: holds the counter values
+ *
+ * Returns the values of TP's RDMA counters.
+ */
+void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st)
+{
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->rqe_dfr_pkt,
+ 2, TP_MIB_RQE_DFR_PKT_A);
+}
+
+/**
+ * t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
+ * @adap: the adapter
+ * @idx: the port index
+ * @st: holds the counter values
+ *
+ * Returns the values of TP's FCoE counters for the selected port.
+ */
+void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
+ struct tp_fcoe_stats *st)
+{
+ u32 val[2];
+
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->frames_ddp,
+ 1, TP_MIB_FCOE_DDP_0_A + idx);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, &st->frames_drop,
+ 1, TP_MIB_FCOE_DROP_0_A + idx);
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val,
+ 2, TP_MIB_FCOE_BYTE_0_HI_A + 2 * idx);
+ st->octets_ddp = ((u64)val[0] << 32) | val[1];
+}
+
+/**
+ * t4_get_usm_stats - read TP's non-TCP DDP MIB counters
+ * @adap: the adapter
+ * @st: holds the counter values
+ *
+ * Returns the values of TP's counters for non-TCP directly-placed packets.
+ */
+void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st)
+{
+ u32 val[4];
+
+ t4_read_indirect(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, val, 4,
+ TP_MIB_USM_PKTS_A);
+ st->frames = val[0];
+ st->drops = val[1];
+ st->octets = ((u64)val[2] << 32) | val[3];
+}
+
+/**
* t4_read_mtu_tbl - returns the values in the HW path MTU table
* @adap: the adapter
* @mtus: where to store the MTU values
@@ -3630,6 +4159,28 @@ const char *t4_get_port_type_description(enum fw_port_type port_type)
}
/**
+ * t4_get_port_stats_offset - collect port stats relative to a previous
+ * snapshot
+ * @adap: The adapter
+ * @idx: The port
+ * @stats: Current stats to fill
+ * @offset: Previous stats snapshot
+ */
+void t4_get_port_stats_offset(struct adapter *adap, int idx,
+ struct port_stats *stats,
+ struct port_stats *offset)
+{
+ u64 *s, *o;
+ int i;
+
+ t4_get_port_stats(adap, idx, stats);
+ for (i = 0, s = (u64 *)stats, o = (u64 *)offset;
+ i < (sizeof(struct port_stats) / sizeof(u64));
+ i++, s++, o++)
+ *s -= *o;
+}
+
+/**
* t4_get_port_stats - collect port statistics
* @adap: the adapter
* @idx: the port index
@@ -3713,103 +4264,51 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
}
/**
- * t4_wol_magic_enable - enable/disable magic packet WoL
+ * t4_get_lb_stats - collect loopback port statistics
* @adap: the adapter
- * @port: the physical port index
- * @addr: MAC address expected in magic packets, %NULL to disable
- *
- * Enables/disables magic packet wake-on-LAN for the selected port.
- */
-void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
- const u8 *addr)
-{
- u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
-
- if (is_t4(adap->params.chip)) {
- mag_id_reg_l = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO);
- mag_id_reg_h = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI);
- port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2_A);
- } else {
- mag_id_reg_l = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_LO);
- mag_id_reg_h = T5_PORT_REG(port, MAC_PORT_MAGIC_MACID_HI);
- port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2_A);
- }
-
- if (addr) {
- t4_write_reg(adap, mag_id_reg_l,
- (addr[2] << 24) | (addr[3] << 16) |
- (addr[4] << 8) | addr[5]);
- t4_write_reg(adap, mag_id_reg_h,
- (addr[0] << 8) | addr[1]);
- }
- t4_set_reg_field(adap, port_cfg_reg, MAGICEN_F,
- addr ? MAGICEN_F : 0);
-}
-
-/**
- * t4_wol_pat_enable - enable/disable pattern-based WoL
- * @adap: the adapter
- * @port: the physical port index
- * @map: bitmap of which HW pattern filters to set
- * @mask0: byte mask for bytes 0-63 of a packet
- * @mask1: byte mask for bytes 64-127 of a packet
- * @crc: Ethernet CRC for selected bytes
- * @enable: enable/disable switch
+ * @idx: the loopback port index
+ * @p: the stats structure to fill
*
- * Sets the pattern filters indicated in @map to mask out the bytes
- * specified in @mask0/@mask1 in received packets and compare the CRC of
- * the resulting packet against @crc. If @enable is %true pattern-based
- * WoL is enabled, otherwise disabled.
+ * Return HW statistics for the given loopback port.
*/
-int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
- u64 mask0, u64 mask1, unsigned int crc, bool enable)
+void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
{
- int i;
- u32 port_cfg_reg;
-
- if (is_t4(adap->params.chip))
- port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2_A);
- else
- port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2_A);
-
- if (!enable) {
- t4_set_reg_field(adap, port_cfg_reg, PATEN_F, 0);
- return 0;
- }
- if (map > 0xff)
- return -EINVAL;
+ u32 bgmap = t4_get_mps_bg_map(adap, idx);
-#define EPIO_REG(name) \
+#define GET_STAT(name) \
+ t4_read_reg64(adap, \
(is_t4(adap->params.chip) ? \
- PORT_REG(port, XGMAC_PORT_EPIO_##name##_A) : \
- T5_PORT_REG(port, MAC_PORT_EPIO_##name##_A))
-
- t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
- t4_write_reg(adap, EPIO_REG(DATA2), mask1);
- t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
-
- for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
- if (!(map & 1))
- continue;
+ PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L) : \
+ T5_PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L)))
+#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
- /* write byte masks */
- t4_write_reg(adap, EPIO_REG(DATA0), mask0);
- t4_write_reg(adap, EPIO_REG(OP), ADDRESS_V(i) | EPIOWR_F);
- t4_read_reg(adap, EPIO_REG(OP)); /* flush */
- if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY_F)
- return -ETIMEDOUT;
-
- /* write CRC */
- t4_write_reg(adap, EPIO_REG(DATA0), crc);
- t4_write_reg(adap, EPIO_REG(OP), ADDRESS_V(i + 32) | EPIOWR_F);
- t4_read_reg(adap, EPIO_REG(OP)); /* flush */
- if (t4_read_reg(adap, EPIO_REG(OP)) & SF_BUSY_F)
- return -ETIMEDOUT;
- }
-#undef EPIO_REG
+ p->octets = GET_STAT(BYTES);
+ p->frames = GET_STAT(FRAMES);
+ p->bcast_frames = GET_STAT(BCAST);
+ p->mcast_frames = GET_STAT(MCAST);
+ p->ucast_frames = GET_STAT(UCAST);
+ p->error_frames = GET_STAT(ERROR);
+
+ p->frames_64 = GET_STAT(64B);
+ p->frames_65_127 = GET_STAT(65B_127B);
+ p->frames_128_255 = GET_STAT(128B_255B);
+ p->frames_256_511 = GET_STAT(256B_511B);
+ p->frames_512_1023 = GET_STAT(512B_1023B);
+ p->frames_1024_1518 = GET_STAT(1024B_1518B);
+ p->frames_1519_max = GET_STAT(1519B_MAX);
+ p->drop = GET_STAT(DROP_FRAMES);
+
+ p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
+ p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
+ p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
+ p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
+ p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
+ p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
+ p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
+ p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
- t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2_A), 0, PATEN_F);
- return 0;
+#undef GET_STAT
+#undef GET_STAT_COM
}
/* t4_mk_filtdelwr - create a delete filter WR
@@ -4798,45 +5297,71 @@ int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
unsigned int viid, bool free, unsigned int naddr,
const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
{
- int i, ret;
+ int offset, ret = 0;
struct fw_vi_mac_cmd c;
- struct fw_vi_mac_exact *p;
- unsigned int max_naddr = is_t4(adap->params.chip) ?
- NUM_MPS_CLS_SRAM_L_INSTANCES :
- NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+ unsigned int nfilters = 0;
+ unsigned int max_naddr = adap->params.arch.mps_tcam_size;
+ unsigned int rem = naddr;
- if (naddr > 7)
+ if (naddr > max_naddr)
return -EINVAL;
- memset(&c, 0, sizeof(c));
- c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
- FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
- (free ? FW_CMD_EXEC_F : 0) |
- FW_VI_MAC_CMD_VIID_V(viid));
- c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(free) |
- FW_CMD_LEN16_V((naddr + 2) / 2));
-
- for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
- p->valid_to_idx =
- cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
- FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_ADD_MAC));
- memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
- }
+ for (offset = 0; offset < naddr ; /**/) {
+ unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact) ?
+ rem : ARRAY_SIZE(c.u.exact));
+ size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
+ u.exact[fw_naddr]), 16);
+ struct fw_vi_mac_exact *p;
+ int i;
- ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
- if (ret)
- return ret;
+ memset(&c, 0, sizeof(c));
+ c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
+ FW_CMD_REQUEST_F |
+ FW_CMD_WRITE_F |
+ FW_CMD_EXEC_V(free) |
+ FW_VI_MAC_CMD_VIID_V(viid));
+ c.freemacs_to_len16 =
+ cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(free) |
+ FW_CMD_LEN16_V(len16));
+
+ for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
+ p->valid_to_idx =
+ cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
+ FW_VI_MAC_CMD_IDX_V(
+ FW_VI_MAC_ADD_MAC));
+ memcpy(p->macaddr, addr[offset + i],
+ sizeof(p->macaddr));
+ }
+
+ /* It's okay if we run out of space in our MAC address arena.
+ * Some of the addresses we submit may get stored so we need
+ * to run through the reply to see what the results were ...
+ */
+ ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
+ if (ret && ret != -FW_ENOMEM)
+ break;
- for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
- u16 index = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
+ for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
+ u16 index = FW_VI_MAC_CMD_IDX_G(
+ be16_to_cpu(p->valid_to_idx));
+
+ if (idx)
+ idx[offset + i] = (index >= max_naddr ?
+ 0xffff : index);
+ if (index < max_naddr)
+ nfilters++;
+ else if (hash)
+ *hash |= (1ULL <<
+ hash_mac_addr(addr[offset + i]));
+ }
- if (idx)
- idx[i] = index >= max_naddr ? 0xffff : index;
- if (index < max_naddr)
- ret++;
- else if (hash)
- *hash |= (1ULL << hash_mac_addr(addr[i]));
+ free = false;
+ offset += fw_naddr;
+ rem -= fw_naddr;
}
+
+ if (ret == 0 || ret == -FW_ENOMEM)
+ ret = nfilters;
return ret;
}
@@ -4865,9 +5390,7 @@ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
int ret, mode;
struct fw_vi_mac_cmd c;
struct fw_vi_mac_exact *p = c.u.exact;
- unsigned int max_mac_addr = is_t4(adap->params.chip) ?
- NUM_MPS_CLS_SRAM_L_INSTANCES :
- NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+ unsigned int max_mac_addr = adap->params.arch.mps_tcam_size;
if (idx < 0) /* new allocation */
idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
@@ -5276,9 +5799,30 @@ int t4_prep_adapter(struct adapter *adapter)
switch (ver) {
case CHELSIO_T4:
adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
+ adapter->params.arch.sge_fl_db = DBPRIO_F;
+ adapter->params.arch.mps_tcam_size =
+ NUM_MPS_CLS_SRAM_L_INSTANCES;
+ adapter->params.arch.mps_rplc_size = 128;
+ adapter->params.arch.nchan = NCHAN;
+ adapter->params.arch.vfcount = 128;
break;
case CHELSIO_T5:
adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
+ adapter->params.arch.sge_fl_db = DBPRIO_F | DBTYPE_F;
+ adapter->params.arch.mps_tcam_size =
+ NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+ adapter->params.arch.mps_rplc_size = 128;
+ adapter->params.arch.nchan = NCHAN;
+ adapter->params.arch.vfcount = 128;
+ break;
+ case CHELSIO_T6:
+ adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
+ adapter->params.arch.sge_fl_db = 0;
+ adapter->params.arch.mps_tcam_size =
+ NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+ adapter->params.arch.mps_rplc_size = 256;
+ adapter->params.arch.nchan = 2;
+ adapter->params.arch.vfcount = 256;
break;
default:
dev_err(adapter->pdev_dev, "Device %d is not supported\n",
@@ -5299,7 +5843,7 @@ int t4_prep_adapter(struct adapter *adapter)
}
/**
- * cxgb4_t4_bar2_sge_qregs - return BAR2 SGE Queue register information
+ * t4_bar2_sge_qregs - return BAR2 SGE Queue register information
* @adapter: the adapter
* @qid: the Queue ID
* @qtype: the Ingress or Egress type for @qid
@@ -5323,7 +5867,7 @@ int t4_prep_adapter(struct adapter *adapter)
* Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
* then these "Inferred Queue ID" register may not be used.
*/
-int cxgb4_t4_bar2_sge_qregs(struct adapter *adapter,
+int t4_bar2_sge_qregs(struct adapter *adapter,
unsigned int qid,
enum t4_bar2_qtype qtype,
u64 *pbar2_qoffset,
@@ -5457,13 +6001,13 @@ int t4_init_sge_params(struct adapter *adapter)
*/
hps = t4_read_reg(adapter, SGE_HOST_PAGE_SIZE_A);
s_hps = (HOSTPAGESIZEPF0_S +
- (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * adapter->fn);
+ (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * adapter->pf);
sge_params->hps = ((hps >> s_hps) & HOSTPAGESIZEPF0_M);
/* Extract the SGE Egress and Ingess Queues Per Page for our PF.
*/
s_qpp = (QUEUESPERPAGEPF0_S +
- (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * adapter->fn);
+ (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * adapter->pf);
qpp = t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF_A);
sge_params->eq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
qpp = t4_read_reg(adapter, SGE_INGRESS_QUEUES_PER_PAGE_PF_A);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
index 88067d9..f9a2cb1 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
@@ -52,8 +52,6 @@ enum {
MBOX_LEN = 64, /* mailbox size in bytes */
TRACE_LEN = 112, /* length of trace data and mask */
FILTER_OPT_LEN = 36, /* filter tuple width for optional components */
- NWOL_PAT = 8, /* # of WoL patterns */
- WOL_PAT_LEN = 128, /* length of WoL patterns */
};
enum {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index d90f8a0..132cb8f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -686,6 +686,9 @@ struct cpl_tx_pkt {
#define TXPKT_ETHHDR_LEN_S 34
#define TXPKT_ETHHDR_LEN_V(x) ((__u64)(x) << TXPKT_ETHHDR_LEN_S)
+#define T6_TXPKT_ETHHDR_LEN_S 32
+#define T6_TXPKT_ETHHDR_LEN_V(x) ((__u64)(x) << T6_TXPKT_ETHHDR_LEN_S)
+
#define TXPKT_CSUM_TYPE_S 40
#define TXPKT_CSUM_TYPE_V(x) ((__u64)(x) << TXPKT_CSUM_TYPE_S)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index 326674b..af3462d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -418,6 +418,20 @@
#define SGE_INGRESS_QUEUES_PER_PAGE_PF_A 0x10f4
#define SGE_INGRESS_QUEUES_PER_PAGE_VF_A 0x10f8
+#define SGE_ERROR_STATS_A 0x1100
+
+#define UNCAPTURED_ERROR_S 18
+#define UNCAPTURED_ERROR_V(x) ((x) << UNCAPTURED_ERROR_S)
+#define UNCAPTURED_ERROR_F UNCAPTURED_ERROR_V(1U)
+
+#define ERROR_QID_VALID_S 17
+#define ERROR_QID_VALID_V(x) ((x) << ERROR_QID_VALID_S)
+#define ERROR_QID_VALID_F ERROR_QID_VALID_V(1U)
+
+#define ERROR_QID_S 0
+#define ERROR_QID_M 0x1ffffU
+#define ERROR_QID_G(x) (((x) >> ERROR_QID_S) & ERROR_QID_M)
+
#define HP_INT_THRESH_S 28
#define HP_INT_THRESH_M 0xfU
#define HP_INT_THRESH_V(x) ((x) << HP_INT_THRESH_S)
@@ -448,8 +462,13 @@
#define SGE_STAT_MATCH_A 0x10e8
#define SGE_STAT_CFG_A 0x10ec
+#define STATMODE_S 2
+#define STATMODE_V(x) ((x) << STATMODE_S)
+
#define STATSOURCE_T5_S 9
+#define STATSOURCE_T5_M 0xfU
#define STATSOURCE_T5_V(x) ((x) << STATSOURCE_T5_S)
+#define STATSOURCE_T5_G(x) (((x) >> STATSOURCE_T5_S) & STATSOURCE_T5_M)
#define SGE_DBFIFO_STATUS2_A 0x1118
@@ -705,6 +724,10 @@
#define REGISTER_S 0
#define REGISTER_V(x) ((x) << REGISTER_S)
+#define T6_ENABLE_S 31
+#define T6_ENABLE_V(x) ((x) << T6_ENABLE_S)
+#define T6_ENABLE_F T6_ENABLE_V(1U)
+
#define PFNUM_S 0
#define PFNUM_V(x) ((x) << PFNUM_S)
@@ -1399,6 +1422,8 @@
#define CSUM_HAS_PSEUDO_HDR_F CSUM_HAS_PSEUDO_HDR_V(1U)
#define TP_MIB_MAC_IN_ERR_0_A 0x0
+#define TP_MIB_HDR_IN_ERR_0_A 0x4
+#define TP_MIB_TCP_IN_ERR_0_A 0x8
#define TP_MIB_TCP_OUT_RST_A 0xc
#define TP_MIB_TCP_IN_SEG_HI_A 0x10
#define TP_MIB_TCP_IN_SEG_LO_A 0x11
@@ -1407,11 +1432,19 @@
#define TP_MIB_TCP_RXT_SEG_HI_A 0x14
#define TP_MIB_TCP_RXT_SEG_LO_A 0x15
#define TP_MIB_TNL_CNG_DROP_0_A 0x18
+#define TP_MIB_OFD_CHN_DROP_0_A 0x1c
#define TP_MIB_TCP_V6IN_ERR_0_A 0x28
#define TP_MIB_TCP_V6OUT_RST_A 0x2c
#define TP_MIB_OFD_ARP_DROP_A 0x36
+#define TP_MIB_CPL_IN_REQ_0_A 0x38
+#define TP_MIB_CPL_OUT_RSP_0_A 0x3c
#define TP_MIB_TNL_DROP_0_A 0x44
+#define TP_MIB_FCOE_DDP_0_A 0x48
+#define TP_MIB_FCOE_DROP_0_A 0x4c
+#define TP_MIB_FCOE_BYTE_0_HI_A 0x50
#define TP_MIB_OFD_VLN_DROP_0_A 0x58
+#define TP_MIB_USM_PKTS_A 0x5c
+#define TP_MIB_RQE_DFR_PKT_A 0x64
#define ULP_TX_INT_CAUSE_A 0x8dcc
@@ -1572,6 +1605,7 @@
#define MPS_PORT_STAT_LB_PORT_1519B_MAX_L 0x520
#define MPS_PORT_STAT_LB_PORT_1519B_MAX_H 0x524
#define MPS_PORT_STAT_LB_PORT_DROP_FRAMES 0x528
+#define MPS_PORT_STAT_LB_PORT_DROP_FRAMES_L 0x528
#define MPS_PORT_STAT_RX_PORT_BYTES_L 0x540
#define MPS_PORT_STAT_RX_PORT_BYTES_H 0x544
#define MPS_PORT_STAT_RX_PORT_FRAMES_L 0x548
@@ -2054,6 +2088,11 @@
#define VFLKPIDX_M 0xffU
#define VFLKPIDX_G(x) (((x) >> VFLKPIDX_S) & VFLKPIDX_M)
+#define T6_VFWRADDR_S 8
+#define T6_VFWRADDR_M 0xffU
+#define T6_VFWRADDR_V(x) ((x) << T6_VFWRADDR_S)
+#define T6_VFWRADDR_G(x) (((x) >> T6_VFWRADDR_S) & T6_VFWRADDR_M)
+
#define TP_RSS_CONFIG_CNG_A 0x7e04
#define TP_RSS_SECRET_KEY0_A 0x40
#define TP_RSS_PF0_CONFIG_A 0x30
@@ -2175,7 +2214,28 @@
#define MPS_RX_PERR_INT_CAUSE_A 0x11074
#define MPS_CLS_TCAM_Y_L_A 0xf000
+#define MPS_CLS_TCAM_DATA0_A 0xf000
+#define MPS_CLS_TCAM_DATA1_A 0xf004
+
+#define DMACH_S 0
+#define DMACH_M 0xffffU
+#define DMACH_G(x) (((x) >> DMACH_S) & DMACH_M)
+
#define MPS_CLS_TCAM_X_L_A 0xf008
+#define MPS_CLS_TCAM_DATA2_CTL_A 0xf008
+
+#define CTLCMDTYPE_S 31
+#define CTLCMDTYPE_V(x) ((x) << CTLCMDTYPE_S)
+#define CTLCMDTYPE_F CTLCMDTYPE_V(1U)
+
+#define CTLTCAMSEL_S 25
+#define CTLTCAMSEL_V(x) ((x) << CTLTCAMSEL_S)
+
+#define CTLTCAMINDEX_S 17
+#define CTLTCAMINDEX_V(x) ((x) << CTLTCAMINDEX_S)
+
+#define CTLXYBITSEL_S 16
+#define CTLXYBITSEL_V(x) ((x) << CTLXYBITSEL_S)
#define MPS_CLS_TCAM_Y_L(idx) (MPS_CLS_TCAM_Y_L_A + (idx) * 16)
#define NUM_MPS_CLS_TCAM_Y_L_INSTANCES 512
@@ -2184,6 +2244,45 @@
#define NUM_MPS_CLS_TCAM_X_L_INSTANCES 512
#define MPS_CLS_SRAM_L_A 0xe000
+
+#define T6_MULTILISTEN0_S 26
+
+#define T6_SRAM_PRIO3_S 23
+#define T6_SRAM_PRIO3_M 0x7U
+#define T6_SRAM_PRIO3_G(x) (((x) >> T6_SRAM_PRIO3_S) & T6_SRAM_PRIO3_M)
+
+#define T6_SRAM_PRIO2_S 20
+#define T6_SRAM_PRIO2_M 0x7U
+#define T6_SRAM_PRIO2_G(x) (((x) >> T6_SRAM_PRIO2_S) & T6_SRAM_PRIO2_M)
+
+#define T6_SRAM_PRIO1_S 17
+#define T6_SRAM_PRIO1_M 0x7U
+#define T6_SRAM_PRIO1_G(x) (((x) >> T6_SRAM_PRIO1_S) & T6_SRAM_PRIO1_M)
+
+#define T6_SRAM_PRIO0_S 14
+#define T6_SRAM_PRIO0_M 0x7U
+#define T6_SRAM_PRIO0_G(x) (((x) >> T6_SRAM_PRIO0_S) & T6_SRAM_PRIO0_M)
+
+#define T6_SRAM_VLD_S 13
+#define T6_SRAM_VLD_V(x) ((x) << T6_SRAM_VLD_S)
+#define T6_SRAM_VLD_F T6_SRAM_VLD_V(1U)
+
+#define T6_REPLICATE_S 12
+#define T6_REPLICATE_V(x) ((x) << T6_REPLICATE_S)
+#define T6_REPLICATE_F T6_REPLICATE_V(1U)
+
+#define T6_PF_S 9
+#define T6_PF_M 0x7U
+#define T6_PF_G(x) (((x) >> T6_PF_S) & T6_PF_M)
+
+#define T6_VF_VALID_S 8
+#define T6_VF_VALID_V(x) ((x) << T6_VF_VALID_S)
+#define T6_VF_VALID_F T6_VF_VALID_V(1U)
+
+#define T6_VF_S 0
+#define T6_VF_M 0xffU
+#define T6_VF_G(x) (((x) >> T6_VF_S) & T6_VF_M)
+
#define MPS_CLS_SRAM_H_A 0xe004
#define MPS_CLS_SRAM_L(idx) (MPS_CLS_SRAM_L_A + (idx) * 8)
@@ -2433,6 +2532,8 @@
#define CIM_F CIM_V(1U)
#define MC1_S 31
+#define MC1_V(x) ((x) << MC1_S)
+#define MC1_F MC1_V(1U)
#define PL_INT_ENABLE_A 0x19410
#define PL_INT_MAP0_A 0x19414
@@ -2463,6 +2564,18 @@
#define REV_V(x) ((x) << REV_S)
#define REV_G(x) (((x) >> REV_S) & REV_M)
+#define T6_UNKNOWNCMD_S 3
+#define T6_UNKNOWNCMD_V(x) ((x) << T6_UNKNOWNCMD_S)
+#define T6_UNKNOWNCMD_F T6_UNKNOWNCMD_V(1U)
+
+#define T6_LIP0_S 2
+#define T6_LIP0_V(x) ((x) << T6_LIP0_S)
+#define T6_LIP0_F T6_LIP0_V(1U)
+
+#define T6_LIPMISS_S 1
+#define T6_LIPMISS_V(x) ((x) << T6_LIPMISS_S)
+#define T6_LIPMISS_F T6_LIPMISS_V(1U)
+
#define LE_DB_INT_CAUSE_A 0x19c3c
#define REQQPARERR_S 16
@@ -2485,6 +2598,14 @@
#define LIP0_V(x) ((x) << LIP0_S)
#define LIP0_F LIP0_V(1U)
+#define TCAMINTPERR_S 13
+#define TCAMINTPERR_V(x) ((x) << TCAMINTPERR_S)
+#define TCAMINTPERR_F TCAMINTPERR_V(1U)
+
+#define SSRAMINTPERR_S 10
+#define SSRAMINTPERR_V(x) ((x) << SSRAMINTPERR_S)
+#define SSRAMINTPERR_F SSRAMINTPERR_V(1U)
+
#define NCSI_INT_CAUSE_A 0x1a0d8
#define CIM_DM_PRTY_ERR_S 8
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_values.h b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h
index 72ec1f9..7bdee3b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_values.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_values.h
@@ -63,6 +63,7 @@
#define FETCHBURSTMIN_64B_X 2
+#define FETCHBURSTMAX_256B_X 2
#define FETCHBURSTMAX_512B_X 3
#define HOSTFCMODE_STATUS_PAGE_X 2
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 0848317..aceb1e8 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -788,15 +788,27 @@ struct fw_ldst_cmd {
__be16 vctl;
__be16 rval;
} mdio;
- struct fw_ldst_mps {
- __be16 fid_ctl;
- __be16 rplcpf_pkd;
- __be32 rplc127_96;
- __be32 rplc95_64;
- __be32 rplc63_32;
- __be32 rplc31_0;
- __be32 atrb;
- __be16 vlan[16];
+ union fw_ldst_mps {
+ struct fw_ldst_mps_rplc {
+ __be16 fid_idx;
+ __be16 rplcpf_pkd;
+ __be32 rplc255_224;
+ __be32 rplc223_192;
+ __be32 rplc191_160;
+ __be32 rplc159_128;
+ __be32 rplc127_96;
+ __be32 rplc95_64;
+ __be32 rplc63_32;
+ __be32 rplc31_0;
+ } rplc;
+ struct fw_ldst_mps_atrb {
+ __be16 fid_mpsid;
+ __be16 r2[3];
+ __be32 r3[2];
+ __be32 r4;
+ __be32 atrb;
+ __be16 vlan[16];
+ } atrb;
} mps;
struct fw_ldst_func {
u8 access_ctl;
@@ -831,8 +843,8 @@ struct fw_ldst_cmd {
#define FW_LDST_CMD_FID_S 15
#define FW_LDST_CMD_FID_V(x) ((x) << FW_LDST_CMD_FID_S)
-#define FW_LDST_CMD_CTL_S 0
-#define FW_LDST_CMD_CTL_V(x) ((x) << FW_LDST_CMD_CTL_S)
+#define FW_LDST_CMD_IDX_S 0
+#define FW_LDST_CMD_IDX_V(x) ((x) << FW_LDST_CMD_IDX_S)
#define FW_LDST_CMD_RPLCPF_S 0
#define FW_LDST_CMD_RPLCPF_V(x) ((x) << FW_LDST_CMD_RPLCPF_S)
@@ -2536,13 +2548,8 @@ enum fw_port_mod_sub_type {
FW_PORT_MOD_SUB_TYPE_TWINAX_7 = 0xC,
};
-/* port stats */
-#define FW_NUM_PORT_STATS 50
-#define FW_NUM_PORT_TX_STATS 23
-#define FW_NUM_PORT_RX_STATS 27
-
enum fw_port_stats_tx_index {
- FW_STAT_TX_PORT_BYTES_IX,
+ FW_STAT_TX_PORT_BYTES_IX = 0,
FW_STAT_TX_PORT_FRAMES_IX,
FW_STAT_TX_PORT_BCAST_IX,
FW_STAT_TX_PORT_MCAST_IX,
@@ -2564,11 +2571,12 @@ enum fw_port_stats_tx_index {
FW_STAT_TX_PORT_PPP4_IX,
FW_STAT_TX_PORT_PPP5_IX,
FW_STAT_TX_PORT_PPP6_IX,
- FW_STAT_TX_PORT_PPP7_IX
+ FW_STAT_TX_PORT_PPP7_IX,
+ FW_NUM_PORT_TX_STATS
};
enum fw_port_stat_rx_index {
- FW_STAT_RX_PORT_BYTES_IX,
+ FW_STAT_RX_PORT_BYTES_IX = 0,
FW_STAT_RX_PORT_FRAMES_IX,
FW_STAT_RX_PORT_BCAST_IX,
FW_STAT_RX_PORT_MCAST_IX,
@@ -2594,9 +2602,14 @@ enum fw_port_stat_rx_index {
FW_STAT_RX_PORT_PPP5_IX,
FW_STAT_RX_PORT_PPP6_IX,
FW_STAT_RX_PORT_PPP7_IX,
- FW_STAT_RX_PORT_LESS_64B_IX
+ FW_STAT_RX_PORT_LESS_64B_IX,
+ FW_STAT_RX_PORT_MAC_ERROR_IX,
+ FW_NUM_PORT_RX_STATS
};
+/* port stats */
+#define FW_NUM_PORT_STATS (FW_NUM_PORT_TX_STATS + FW_NUM_PORT_RX_STATS)
+
struct fw_port_stats_cmd {
__be32 op_to_portid;
__be32 retval_len16;
@@ -3025,7 +3038,8 @@ struct fw_hdr {
enum fw_hdr_chip {
FW_HDR_CHIP_T4,
- FW_HDR_CHIP_T5
+ FW_HDR_CHIP_T5,
+ FW_HDR_CHIP_T6
};
#define FW_HDR_FW_VER_MAJOR_S 24
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
index b9d1cba..32b2135 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
@@ -45,4 +45,9 @@
#define T5FW_VERSION_MICRO 0x20
#define T5FW_VERSION_BUILD 0x00
+#define T6FW_VERSION_MAJOR 0x01
+#define T6FW_VERSION_MINOR 0x0D
+#define T6FW_VERSION_MICRO 0x2D
+#define T6FW_VERSION_BUILD 0x00
+
#endif
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 2e41d15..ad53e5a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -524,7 +524,7 @@ static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl)
*/
static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
{
- u32 val;
+ u32 val = adapter->params.arch.sge_fl_db;
/* The SGE keeps track of its Producer and Consumer Indices in terms
* of Egress Queue Units so we can only tell it about integral numbers
@@ -532,11 +532,9 @@ static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
*/
if (fl->pend_cred >= FL_PER_EQ_UNIT) {
if (is_t4(adapter->params.chip))
- val = PIDX_V(fl->pend_cred / FL_PER_EQ_UNIT);
+ val |= PIDX_V(fl->pend_cred / FL_PER_EQ_UNIT);
else
- val = PIDX_T5_V(fl->pend_cred / FL_PER_EQ_UNIT) |
- DBTYPE_F;
- val |= DBPRIO_F;
+ val |= PIDX_T5_V(fl->pend_cred / FL_PER_EQ_UNIT);
/* Make sure all memory writes to the Free List queue are
* committed before we tell the hardware about them.
@@ -1084,7 +1082,7 @@ static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *tq,
* Figure out what HW csum a packet wants and return the appropriate control
* bits.
*/
-static u64 hwcsum(const struct sk_buff *skb)
+static u64 hwcsum(enum chip_type chip, const struct sk_buff *skb)
{
int csum_type;
const struct iphdr *iph = ip_hdr(skb);
@@ -1116,11 +1114,16 @@ nocsum:
goto nocsum;
}
- if (likely(csum_type >= TX_CSUM_TCPIP))
- return TXPKT_CSUM_TYPE_V(csum_type) |
- TXPKT_IPHDR_LEN_V(skb_network_header_len(skb)) |
- TXPKT_ETHHDR_LEN_V(skb_network_offset(skb) - ETH_HLEN);
- else {
+ if (likely(csum_type >= TX_CSUM_TCPIP)) {
+ u64 hdr_len = TXPKT_IPHDR_LEN_V(skb_network_header_len(skb));
+ int eth_hdr_len = skb_network_offset(skb) - ETH_HLEN;
+
+ if (chip <= CHELSIO_T5)
+ hdr_len |= TXPKT_ETHHDR_LEN_V(eth_hdr_len);
+ else
+ hdr_len |= T6_TXPKT_ETHHDR_LEN_V(eth_hdr_len);
+ return TXPKT_CSUM_TYPE_V(csum_type) | hdr_len;
+ } else {
int start = skb_transport_offset(skb);
return TXPKT_CSUM_TYPE_V(csum_type) |
@@ -1308,10 +1311,15 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
* accounting.
*/
cpl = (void *)(lso + 1);
- cntrl = (TXPKT_CSUM_TYPE_V(v6 ?
+
+ if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
+ cntrl = TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+ else
+ cntrl = T6_TXPKT_ETHHDR_LEN_V(eth_xtra_len);
+
+ cntrl |= TXPKT_CSUM_TYPE_V(v6 ?
TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
- TXPKT_IPHDR_LEN_V(l3hdr_len) |
- TXPKT_ETHHDR_LEN_V(eth_xtra_len));
+ TXPKT_IPHDR_LEN_V(l3hdr_len);
txq->tso++;
txq->tx_cso += ssi->gso_segs;
} else {
@@ -1328,7 +1336,8 @@ int t4vf_eth_xmit(struct sk_buff *skb, struct net_device *dev)
*/
cpl = (void *)(wr + 1);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
- cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS_F;
+ cntrl = hwcsum(adapter->params.chip, skb) |
+ TXPKT_IPCSUM_DIS_F;
txq->tx_cso++;
} else
cntrl = TXPKT_L4CSUM_DIS_F | TXPKT_IPCSUM_DIS_F;
@@ -2162,8 +2171,8 @@ static void __iomem *bar2_address(struct adapter *adapter,
u64 bar2_qoffset;
int ret;
- ret = t4_bar2_sge_qregs(adapter, qid, qtype,
- &bar2_qoffset, pbar2_qid);
+ ret = t4vf_bar2_sge_qregs(adapter, qid, qtype,
+ &bar2_qoffset, pbar2_qid);
if (ret)
return NULL;
@@ -2247,6 +2256,8 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
cmd.iqaddr = cpu_to_be64(rspq->phys_addr);
if (fl) {
+ enum chip_type chip =
+ CHELSIO_CHIP_VERSION(adapter->params.chip);
/*
* Allocate the ring for the hardware free list (with space
* for its status page) along with the associated software
@@ -2286,7 +2297,9 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
cmd.fl0dcaen_to_fl0cidxfthresh =
cpu_to_be16(
FW_IQ_CMD_FL0FBMIN_V(SGE_FETCHBURSTMIN_64B) |
- FW_IQ_CMD_FL0FBMAX_V(SGE_FETCHBURSTMAX_512B));
+ FW_IQ_CMD_FL0FBMAX_V((chip <= CHELSIO_T5) ?
+ FETCHBURSTMAX_512B_X :
+ FETCHBURSTMAX_256B_X));
cmd.fl0size = cpu_to_be16(flsz);
cmd.fl0addr = cpu_to_be64(fl->addr);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
index b9debb4..88b8981b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
@@ -51,6 +51,7 @@
*/
#define CHELSIO_T4 0x4
#define CHELSIO_T5 0x5
+#define CHELSIO_T6 0x6
enum chip_type {
T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1),
@@ -156,6 +157,12 @@ struct vpd_params {
u32 cclk; /* Core Clock (KHz) */
};
+/* Stores chip specific parameters */
+struct arch_specific_params {
+ u32 sge_fl_db;
+ u16 mps_tcam_size;
+};
+
/*
* Global Receive Side Scaling (RSS) parameters in host-native format.
*/
@@ -215,6 +222,7 @@ struct adapter_params {
struct vpd_params vpd; /* Vital Product Data */
struct rss_params rss; /* Receive Side Scaling */
struct vf_resources vfres; /* Virtual Function Resource limits */
+ struct arch_specific_params arch; /* chip specific params */
enum chip_type chip; /* chip code */
u8 nports; /* # of Ethernet "ports" */
};
@@ -284,11 +292,11 @@ int t4vf_fw_reset(struct adapter *);
int t4vf_set_params(struct adapter *, unsigned int, const u32 *, const u32 *);
enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS };
-int t4_bar2_sge_qregs(struct adapter *adapter,
- unsigned int qid,
- enum t4_bar2_qtype qtype,
- u64 *pbar2_qoffset,
- unsigned int *pbar2_qid);
+int t4vf_bar2_sge_qregs(struct adapter *adapter,
+ unsigned int qid,
+ enum t4_bar2_qtype qtype,
+ u64 *pbar2_qoffset,
+ unsigned int *pbar2_qid);
int t4vf_get_sge_params(struct adapter *);
int t4vf_get_vpd_params(struct adapter *);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 966ee90..0db6dc9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -428,7 +428,7 @@ int t4vf_set_params(struct adapter *adapter, unsigned int nparams,
}
/**
- * t4_bar2_sge_qregs - return BAR2 SGE Queue register information
+ * t4vf_bar2_sge_qregs - return BAR2 SGE Queue register information
* @adapter: the adapter
* @qid: the Queue ID
* @qtype: the Ingress or Egress type for @qid
@@ -452,11 +452,11 @@ int t4vf_set_params(struct adapter *adapter, unsigned int nparams,
* Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
* then these "Inferred Queue ID" register may not be used.
*/
-int t4_bar2_sge_qregs(struct adapter *adapter,
- unsigned int qid,
- enum t4_bar2_qtype qtype,
- u64 *pbar2_qoffset,
- unsigned int *pbar2_qid)
+int t4vf_bar2_sge_qregs(struct adapter *adapter,
+ unsigned int qid,
+ enum t4_bar2_qtype qtype,
+ u64 *pbar2_qoffset,
+ unsigned int *pbar2_qid)
{
unsigned int page_shift, page_size, qpp_shift, qpp_mask;
u64 bar2_page_offset, bar2_qoffset;
@@ -1191,9 +1191,7 @@ int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free,
unsigned nfilters = 0;
unsigned int rem = naddr;
struct fw_vi_mac_cmd cmd, rpl;
- unsigned int max_naddr = is_t4(adapter->params.chip) ?
- NUM_MPS_CLS_SRAM_L_INSTANCES :
- NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+ unsigned int max_naddr = adapter->params.arch.mps_tcam_size;
if (naddr > max_naddr)
return -EINVAL;
@@ -1285,9 +1283,7 @@ int t4vf_change_mac(struct adapter *adapter, unsigned int viid,
struct fw_vi_mac_exact *p = &cmd.u.exact[0];
size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
u.exact[1]), 16);
- unsigned int max_naddr = is_t4(adapter->params.chip) ?
- NUM_MPS_CLS_SRAM_L_INSTANCES :
- NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+ unsigned int max_mac_addr = adapter->params.arch.mps_tcam_size;
/*
* If this is a new allocation, determine whether it should be
@@ -1310,7 +1306,7 @@ int t4vf_change_mac(struct adapter *adapter, unsigned int viid,
if (ret == 0) {
p = &rpl.u.exact[0];
ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
- if (ret >= max_naddr)
+ if (ret >= max_mac_addr)
ret = -ENOMEM;
}
return ret;
@@ -1590,11 +1586,25 @@ int t4vf_prep_adapter(struct adapter *adapter)
switch (CHELSIO_PCI_ID_VER(adapter->pdev->device)) {
case CHELSIO_T4:
adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, 0);
+ adapter->params.arch.sge_fl_db = DBPRIO_F;
+ adapter->params.arch.mps_tcam_size =
+ NUM_MPS_CLS_SRAM_L_INSTANCES;
break;
case CHELSIO_T5:
chipid = REV_G(t4_read_reg(adapter, PL_VF_REV_A));
adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, chipid);
+ adapter->params.arch.sge_fl_db = DBPRIO_F | DBTYPE_F;
+ adapter->params.arch.mps_tcam_size =
+ NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
+ break;
+
+ case CHELSIO_T6:
+ chipid = REV_G(t4_read_reg(adapter, PL_VF_REV_A));
+ adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, chipid);
+ adapter->params.arch.sge_fl_db = 0;
+ adapter->params.arch.mps_tcam_size =
+ NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
break;
}
diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
index badff18..8966f31 100644
--- a/drivers/net/ethernet/dec/tulip/de4x5.c
+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
@@ -5189,16 +5189,16 @@ de4x5_parse_params(struct net_device *dev)
if (strstr(p, "fdx") || strstr(p, "FDX")) lp->params.fdx = true;
if (strstr(p, "autosense") || strstr(p, "AUTOSENSE")) {
- if (strstr(p, "TP")) {
- lp->params.autosense = TP;
- } else if (strstr(p, "TP_NW")) {
+ if (strstr(p, "TP_NW")) {
lp->params.autosense = TP_NW;
+ } else if (strstr(p, "TP")) {
+ lp->params.autosense = TP;
+ } else if (strstr(p, "BNC_AUI")) {
+ lp->params.autosense = BNC;
} else if (strstr(p, "BNC")) {
lp->params.autosense = BNC;
} else if (strstr(p, "AUI")) {
lp->params.autosense = AUI;
- } else if (strstr(p, "BNC_AUI")) {
- lp->params.autosense = BNC;
} else if (strstr(p, "10Mb")) {
lp->params.autosense = _10Mb;
} else if (strstr(p, "100Mb")) {
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index 2c30c0c..447d092 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -1115,7 +1115,7 @@ static void uli526x_timer(unsigned long data)
netif_carrier_off(dev);
}
}
- db->init=0;
+ db->init = 0;
/* Timer active again */
db->timer.expires = ULI526X_TIMER_WUT;
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index 1274b6f..cf0a5fc 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -463,10 +463,8 @@ rio_open (struct net_device *dev)
dw32(MACCtrl, dr32(MACCtrl) | AutoVLANuntagging);
}
- init_timer (&np->timer);
+ setup_timer(&np->timer, rio_timer, (unsigned long)dev);
np->timer.expires = jiffies + 1*HZ;
- np->timer.data = (unsigned long) dev;
- np->timer.function = rio_timer;
add_timer (&np->timer);
/* Start Tx/Rx */
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 75696d4..1365a56 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -2593,11 +2593,11 @@ static int be_evt_queues_create(struct be_adapter *adapter)
adapter->cfg_num_qs);
for_all_evt_queues(adapter, eqo, i) {
+ int numa_node = dev_to_node(&adapter->pdev->dev);
if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
return -ENOMEM;
- cpumask_set_cpu_local_first(i, dev_to_node(&adapter->pdev->dev),
- eqo->affinity_mask);
-
+ cpumask_set_cpu(cpumask_local_spread(i, numa_node),
+ eqo->affinity_mask);
netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
BE_NAPI_WEIGHT);
napi_hash_add(&eqo->napi);
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index de79193..b9df0cb 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2084,12 +2084,8 @@ static void emac_ethtool_get_pauseparam(struct net_device *ndev,
static int emac_get_regs_len(struct emac_instance *dev)
{
- if (emac_has_feature(dev, EMAC_FTR_EMAC4))
- return sizeof(struct emac_ethtool_regs_subhdr) +
- EMAC4_ETHTOOL_REGS_SIZE(dev);
- else
return sizeof(struct emac_ethtool_regs_subhdr) +
- EMAC_ETHTOOL_REGS_SIZE(dev);
+ sizeof(struct emac_regs);
}
static int emac_ethtool_get_regs_len(struct net_device *ndev)
@@ -2114,15 +2110,15 @@ static void *emac_dump_regs(struct emac_instance *dev, void *buf)
struct emac_ethtool_regs_subhdr *hdr = buf;
hdr->index = dev->cell_index;
- if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
+ if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
+ hdr->version = EMAC4SYNC_ETHTOOL_REGS_VER;
+ } else if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
hdr->version = EMAC4_ETHTOOL_REGS_VER;
- memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE(dev));
- return (void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE(dev);
} else {
hdr->version = EMAC_ETHTOOL_REGS_VER;
- memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE(dev));
- return (void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE(dev);
}
+ memcpy_fromio(hdr + 1, dev->emacp, sizeof(struct emac_regs));
+ return (void *)(hdr + 1) + sizeof(struct emac_regs);
}
static void emac_ethtool_get_regs(struct net_device *ndev,
diff --git a/drivers/net/ethernet/ibm/emac/core.h b/drivers/net/ethernet/ibm/emac/core.h
index 67f342a..28df374 100644
--- a/drivers/net/ethernet/ibm/emac/core.h
+++ b/drivers/net/ethernet/ibm/emac/core.h
@@ -461,10 +461,7 @@ struct emac_ethtool_regs_subhdr {
};
#define EMAC_ETHTOOL_REGS_VER 0
-#define EMAC_ETHTOOL_REGS_SIZE(dev) ((dev)->rsrc_regs.end - \
- (dev)->rsrc_regs.start + 1)
-#define EMAC4_ETHTOOL_REGS_VER 1
-#define EMAC4_ETHTOOL_REGS_SIZE(dev) ((dev)->rsrc_regs.end - \
- (dev)->rsrc_regs.start + 1)
+#define EMAC4_ETHTOOL_REGS_VER 1
+#define EMAC4SYNC_ETHTOOL_REGS_VER 2
#endif /* __IBM_NEWEMAC_CORE_H */
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 35357ae..d2657a4 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -2922,9 +2922,7 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_master(pdev);
- init_timer(&nic->watchdog);
- nic->watchdog.function = e100_watchdog;
- nic->watchdog.data = (unsigned long)nic;
+ setup_timer(&nic->watchdog, e100_watchdog, (unsigned long)nic);
INIT_WORK(&nic->tx_timeout_task, e100_tx_timeout_task);
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index 08f22f3..2af603f 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.h b/drivers/net/ethernet/intel/e1000e/80003es2lan.h
index 535a943..a2162e1 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.h
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.h
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index 32e7775..5f70164 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/82571.h b/drivers/net/ethernet/intel/e1000e/82571.h
index 2e758f7..abc6a9a 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.h
+++ b/drivers/net/ethernet/intel/e1000e/82571.h
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h
index 0570c66..133d407 100644
--- a/drivers/net/ethernet/intel/e1000e/defines.h
+++ b/drivers/net/ethernet/intel/e1000e/defines.h
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 0abc942..0b748d1 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -98,6 +98,8 @@ struct e1000_info;
#define DEFAULT_RADV 8
#define BURST_RDTR 0x20
#define BURST_RADV 0x20
+#define PCICFG_DESC_RING_STATUS 0xe4
+#define FLUSH_DESC_REQUIRED 0x100
/* in the case of WTHRESH, it appears at least the 82571/2 hardware
* writes back 4 descriptors when WTHRESH=5, and 3 descriptors when
@@ -384,6 +386,10 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca);
#define INCVALUE_SHIFT_25MHz 18
#define INCPERIOD_25MHz 1
+#define INCVALUE_24MHz 125
+#define INCVALUE_SHIFT_24MHz 14
+#define INCPERIOD_24MHz 3
+
/* Another drawback of scaling the incvalue by a large factor is the
* 64-bit SYSTIM register overflows more quickly. This is dealt with
* by simply reading the clock before it overflows.
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index 11f486e..ad6daa6 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -1516,8 +1516,19 @@ static int e1000_set_es2lan_mac_loopback(struct e1000_adapter *adapter)
static int e1000_setup_loopback_test(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
- u32 rctl;
-
+ u32 rctl, fext_nvm11, tarc0;
+
+ if (hw->mac.type == e1000_pch_spt) {
+ fext_nvm11 = er32(FEXTNVM11);
+ fext_nvm11 |= E1000_FEXTNVM11_DISABLE_MULR_FIX;
+ ew32(FEXTNVM11, fext_nvm11);
+ tarc0 = er32(TARC(0));
+ /* clear bits 28 & 29 (control of MULR concurrent requests) */
+ tarc0 &= 0xcfffffff;
+ /* set bit 29 (value of MULR requests is now 2) */
+ tarc0 |= 0x20000000;
+ ew32(TARC(0), tarc0);
+ }
if (hw->phy.media_type == e1000_media_type_fiber ||
hw->phy.media_type == e1000_media_type_internal_serdes) {
switch (hw->mac.type) {
@@ -1542,7 +1553,7 @@ static int e1000_setup_loopback_test(struct e1000_adapter *adapter)
static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
- u32 rctl;
+ u32 rctl, fext_nvm11, tarc0;
u16 phy_reg;
rctl = er32(RCTL);
@@ -1550,6 +1561,16 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
ew32(RCTL, rctl);
switch (hw->mac.type) {
+ case e1000_pch_spt:
+ fext_nvm11 = er32(FEXTNVM11);
+ fext_nvm11 &= ~E1000_FEXTNVM11_DISABLE_MULR_FIX;
+ ew32(FEXTNVM11, fext_nvm11);
+ tarc0 = er32(TARC(0));
+ /* clear bits 28 & 29 (control of MULR concurrent requests) */
+ /* set bit 29 (value of MULR requests is now 0) */
+ tarc0 &= 0xcfffffff;
+ ew32(TARC(0), tarc0);
+ /* fall through */
case e1000_80003es2lan:
if (hw->phy.media_type == e1000_media_type_fiber ||
hw->phy.media_type == e1000_media_type_internal_serdes) {
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index 19e8c48..c9da465 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index e18443a..b074b9a 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -1014,7 +1014,6 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
u16 speed, duplex, scale = 0;
u16 max_snoop, max_nosnoop;
u16 max_ltr_enc; /* max LTR latency encoded */
- s64 lat_ns; /* latency (ns) */
u64 value;
u32 rxa;
@@ -1040,14 +1039,10 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link)
* 2^25*(2^10-1) ns. The scale is encoded as 0=2^0ns,
* 1=2^5ns, 2=2^10ns,...5=2^25ns.
*/
- lat_ns = ((s64)rxa * 1024 -
- (2 * (s64)hw->adapter->max_frame_size)) * 8 * 1000;
- if (lat_ns < 0) {
- value = 0;
- } else {
- value = lat_ns;
- do_div(value, speed);
- }
+ rxa *= 512;
+ value = (rxa > hw->adapter->max_frame_size) ?
+ (rxa - hw->adapter->max_frame_size) * (16000 / speed) :
+ 0;
while (value > PCI_LTR_VALUE_MASK) {
scale++;
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h
index 770a573..2645985 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.h
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -98,8 +98,15 @@
#define E1000_FEXTNVM6_K1_OFF_ENABLE 0x80000000
/* bit for disabling packet buffer read */
#define E1000_FEXTNVM7_DISABLE_PB_READ 0x00040000
-
+#define E1000_FEXTNVM7_SIDE_CLK_UNGATE 0x00000004
#define E1000_FEXTNVM7_DISABLE_SMB_PERST 0x00000020
+#define E1000_FEXTNVM9_IOSFSB_CLKGATE_DIS 0x00000800
+#define E1000_FEXTNVM9_IOSFSB_CLKREQ_DIS 0x00001000
+#define E1000_FEXTNVM11_DISABLE_PB_READ 0x00000200
+#define E1000_FEXTNVM11_DISABLE_MULR_FIX 0x00002000
+
+/* bit24: RXDCTL thresholds granularity: 0 - cache lines, 1 - descriptors */
+#define E1000_RXDCTL_THRESH_UNIT_DESC 0x01000000
#define K1_ENTRY_LATENCY 0
#define K1_MIN_TIME 1
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index 30b74d5..e59d7c2 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/mac.h b/drivers/net/ethernet/intel/e1000e/mac.h
index 0513d90..8284618 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.h
+++ b/drivers/net/ethernet/intel/e1000e/mac.h
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/manage.c b/drivers/net/ethernet/intel/e1000e/manage.c
index 06edfca..cc9b3be 100644
--- a/drivers/net/ethernet/intel/e1000e/manage.c
+++ b/drivers/net/ethernet/intel/e1000e/manage.c
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/manage.h b/drivers/net/ethernet/intel/e1000e/manage.h
index a8c27f9..0b9ea59 100644
--- a/drivers/net/ethernet/intel/e1000e/manage.h
+++ b/drivers/net/ethernet/intel/e1000e/manage.h
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 7dd2c11..e62b9dc 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -48,7 +48,7 @@
#define DRV_EXTRAVERSION "-k"
-#define DRV_VERSION "2.3.2" DRV_EXTRAVERSION
+#define DRV_VERSION "3.2.5" DRV_EXTRAVERSION
char e1000e_driver_name[] = "e1000e";
const char e1000e_driver_version[] = DRV_VERSION;
@@ -3525,22 +3525,30 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
switch (hw->mac.type) {
case e1000_pch2lan:
case e1000_pch_lpt:
- case e1000_pch_spt:
- /* On I217, I218 and I219, the clock frequency is 25MHz
- * or 96MHz as indicated by the System Clock Frequency
- * Indication
- */
- if (((hw->mac.type != e1000_pch_lpt) &&
- (hw->mac.type != e1000_pch_spt)) ||
- (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) {
+ if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
/* Stable 96MHz frequency */
incperiod = INCPERIOD_96MHz;
incvalue = INCVALUE_96MHz;
shift = INCVALUE_SHIFT_96MHz;
adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHz;
+ } else {
+ /* Stable 25MHz frequency */
+ incperiod = INCPERIOD_25MHz;
+ incvalue = INCVALUE_25MHz;
+ shift = INCVALUE_SHIFT_25MHz;
+ adapter->cc.shift = shift;
+ }
+ break;
+ case e1000_pch_spt:
+ if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
+ /* Stable 24MHz frequency */
+ incperiod = INCPERIOD_24MHz;
+ incvalue = INCVALUE_24MHz;
+ shift = INCVALUE_SHIFT_24MHz;
+ adapter->cc.shift = shift;
break;
}
- /* fall-through */
+ return -EINVAL;
case e1000_82574:
case e1000_82583:
/* Stable 25MHz frequency */
@@ -3788,6 +3796,108 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter)
}
/**
+ * e1000_flush_tx_ring - remove all descriptors from the tx_ring
+ *
+ * We want to clear all pending descriptors from the TX ring.
+ * zeroing happens when the HW reads the regs. We assign the ring itself as
+ * the data of the next descriptor. We don't care about the data we are about
+ * to reset the HW.
+ */
+static void e1000_flush_tx_ring(struct e1000_adapter *adapter)
+{
+ struct e1000_hw *hw = &adapter->hw;
+ struct e1000_ring *tx_ring = adapter->tx_ring;
+ struct e1000_tx_desc *tx_desc = NULL;
+ u32 tdt, tctl, txd_lower = E1000_TXD_CMD_IFCS;
+ u16 size = 512;
+
+ tctl = er32(TCTL);
+ ew32(TCTL, tctl | E1000_TCTL_EN);
+ tdt = er32(TDT(0));
+ BUG_ON(tdt != tx_ring->next_to_use);
+ tx_desc = E1000_TX_DESC(*tx_ring, tx_ring->next_to_use);
+ tx_desc->buffer_addr = tx_ring->dma;
+
+ tx_desc->lower.data = cpu_to_le32(txd_lower | size);
+ tx_desc->upper.data = 0;
+ /* flush descriptors to memory before notifying the HW */
+ wmb();
+ tx_ring->next_to_use++;
+ if (tx_ring->next_to_use == tx_ring->count)
+ tx_ring->next_to_use = 0;
+ ew32(TDT(0), tx_ring->next_to_use);
+ mmiowb();
+ usleep_range(200, 250);
+}
+
+/**
+ * e1000_flush_rx_ring - remove all descriptors from the rx_ring
+ *
+ * Mark all descriptors in the RX ring as consumed and disable the rx ring
+ */
+static void e1000_flush_rx_ring(struct e1000_adapter *adapter)
+{
+ u32 rctl, rxdctl;
+ struct e1000_hw *hw = &adapter->hw;
+
+ rctl = er32(RCTL);
+ ew32(RCTL, rctl & ~E1000_RCTL_EN);
+ e1e_flush();
+ usleep_range(100, 150);
+
+ rxdctl = er32(RXDCTL(0));
+ /* zero the lower 14 bits (prefetch and host thresholds) */
+ rxdctl &= 0xffffc000;
+
+ /* update thresholds: prefetch threshold to 31, host threshold to 1
+ * and make sure the granularity is "descriptors" and not "cache lines"
+ */
+ rxdctl |= (0x1F | (1 << 8) | E1000_RXDCTL_THRESH_UNIT_DESC);
+
+ ew32(RXDCTL(0), rxdctl);
+ /* momentarily enable the RX ring for the changes to take effect */
+ ew32(RCTL, rctl | E1000_RCTL_EN);
+ e1e_flush();
+ usleep_range(100, 150);
+ ew32(RCTL, rctl & ~E1000_RCTL_EN);
+}
+
+/**
+ * e1000_flush_desc_rings - remove all descriptors from the descriptor rings
+ *
+ * In i219, the descriptor rings must be emptied before resetting the HW
+ * or before changing the device state to D3 during runtime (runtime PM).
+ *
+ * Failure to do this will cause the HW to enter a unit hang state which can
+ * only be released by PCI reset on the device
+ *
+ */
+
+static void e1000_flush_desc_rings(struct e1000_adapter *adapter)
+{
+ u16 hang_state;
+ u32 fext_nvm11, tdlen;
+ struct e1000_hw *hw = &adapter->hw;
+
+ /* First, disable MULR fix in FEXTNVM11 */
+ fext_nvm11 = er32(FEXTNVM11);
+ fext_nvm11 |= E1000_FEXTNVM11_DISABLE_MULR_FIX;
+ ew32(FEXTNVM11, fext_nvm11);
+ /* do nothing if we're not in faulty state, or if the queue is empty */
+ tdlen = er32(TDLEN(0));
+ pci_read_config_word(adapter->pdev, PCICFG_DESC_RING_STATUS,
+ &hang_state);
+ if (!(hang_state & FLUSH_DESC_REQUIRED) || !tdlen)
+ return;
+ e1000_flush_tx_ring(adapter);
+ /* recheck, maybe the fault is caused by the rx ring */
+ pci_read_config_word(adapter->pdev, PCICFG_DESC_RING_STATUS,
+ &hang_state);
+ if (hang_state & FLUSH_DESC_REQUIRED)
+ e1000_flush_rx_ring(adapter);
+}
+
+/**
* e1000e_reset - bring the hardware into a known good state
*
* This function boots the hardware and enables some settings that
@@ -3943,6 +4053,8 @@ void e1000e_reset(struct e1000_adapter *adapter)
}
}
+ if (hw->mac.type == e1000_pch_spt)
+ e1000_flush_desc_rings(adapter);
/* Allow time for pending master requests to run */
mac->ops.reset_hw(hw);
@@ -4016,6 +4128,20 @@ void e1000e_reset(struct e1000_adapter *adapter)
phy_data &= ~IGP02E1000_PM_SPD;
e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
}
+ if (hw->mac.type == e1000_pch_spt && adapter->int_mode == 0) {
+ u32 reg;
+
+ /* Fextnvm7 @ 0xe4[2] = 1 */
+ reg = er32(FEXTNVM7);
+ reg |= E1000_FEXTNVM7_SIDE_CLK_UNGATE;
+ ew32(FEXTNVM7, reg);
+ /* Fextnvm9 @ 0x5bb4[13:12] = 11 */
+ reg = er32(FEXTNVM9);
+ reg |= E1000_FEXTNVM9_IOSFSB_CLKGATE_DIS |
+ E1000_FEXTNVM9_IOSFSB_CLKREQ_DIS;
+ ew32(FEXTNVM9, reg);
+ }
+
}
int e1000e_up(struct e1000_adapter *adapter)
@@ -4115,8 +4241,6 @@ void e1000e_down(struct e1000_adapter *adapter, bool reset)
spin_unlock(&adapter->stats64_lock);
e1000e_flush_descriptors(adapter);
- e1000_clean_tx_ring(adapter->tx_ring);
- e1000_clean_rx_ring(adapter->rx_ring);
adapter->link_speed = 0;
adapter->link_duplex = 0;
@@ -4127,8 +4251,14 @@ void e1000e_down(struct e1000_adapter *adapter, bool reset)
e1000_lv_jumbo_workaround_ich8lan(hw, false))
e_dbg("failed to disable jumbo frame workaround mode\n");
- if (reset && !pci_channel_offline(adapter->pdev))
- e1000e_reset(adapter);
+ if (!pci_channel_offline(adapter->pdev)) {
+ if (reset)
+ e1000e_reset(adapter);
+ else if (hw->mac.type == e1000_pch_spt)
+ e1000_flush_desc_rings(adapter);
+ }
+ e1000_clean_tx_ring(adapter->tx_ring);
+ e1000_clean_rx_ring(adapter->rx_ring);
}
void e1000e_reinit_locked(struct e1000_adapter *adapter)
@@ -4151,9 +4281,16 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc)
cc);
struct e1000_hw *hw = &adapter->hw;
cycle_t systim, systim_next;
+ /* SYSTIMH latching upon SYSTIML read does not work well. To fix that
+ * we don't want to allow overflow of SYSTIML and a change to SYSTIMH
+ * to occur between reads, so if we read a vale close to overflow, we
+ * wait for overflow to occur and read both registers when its safe.
+ */
+ u32 systim_overflow_latch_fix = 0x3FFFFFFF;
- /* latch SYSTIMH on read of SYSTIML */
- systim = (cycle_t)er32(SYSTIML);
+ do {
+ systim = (cycle_t)er32(SYSTIML);
+ } while (systim > systim_overflow_latch_fix);
systim |= (cycle_t)er32(SYSTIMH) << 32;
if ((hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82583)) {
@@ -7301,7 +7438,7 @@ static int __init e1000_init_module(void)
pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
e1000e_driver_version);
- pr_info("Copyright(c) 1999 - 2014 Intel Corporation.\n");
+ pr_info("Copyright(c) 1999 - 2015 Intel Corporation.\n");
ret = pci_register_driver(&e1000_driver);
return ret;
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c
index fa6b103..49f205c 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.c
+++ b/drivers/net/ethernet/intel/e1000e/nvm.c
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.h b/drivers/net/ethernet/intel/e1000e/nvm.h
index 342bf69..5d46967 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.h
+++ b/drivers/net/ethernet/intel/e1000e/nvm.h
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c
index aa1923f..6d8c39a 100644
--- a/drivers/net/ethernet/intel/e1000e/param.c
+++ b/drivers/net/ethernet/intel/e1000e/param.c
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index b2005e1..de13aea 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/phy.h b/drivers/net/ethernet/intel/e1000e/phy.h
index 537d278..55bfe47 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.h
+++ b/drivers/net/ethernet/intel/e1000e/phy.h
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c
index 8d7b21d..25a0ad5 100644
--- a/drivers/net/ethernet/intel/e1000e/ptp.c
+++ b/drivers/net/ethernet/intel/e1000e/ptp.c
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h
index 85eefc4..b24e5fe 100644
--- a/drivers/net/ethernet/intel/e1000e/regs.h
+++ b/drivers/net/ethernet/intel/e1000e/regs.h
@@ -1,5 +1,5 @@
/* Intel PRO/1000 Linux driver
- * Copyright(c) 1999 - 2014 Intel Corporation.
+ * Copyright(c) 1999 - 2015 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -38,6 +38,8 @@
#define E1000_FEXTNVM4 0x00024 /* Future Extended NVM 4 - RW */
#define E1000_FEXTNVM6 0x00010 /* Future Extended NVM 6 - RW */
#define E1000_FEXTNVM7 0x000E4 /* Future Extended NVM 7 - RW */
+#define E1000_FEXTNVM9 0x5BB4 /* Future Extended NVM 9 - RW */
+#define E1000_FEXTNVM11 0x5BBC /* Future Extended NVM 11 - RW */
#define E1000_PCIEANACFG 0x00F18 /* PCIE Analog Config */
#define E1000_FCT 0x00030 /* Flow Control Type - RW */
#define E1000_VET 0x00038 /* VLAN Ether Type - RW */
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index 33c35d3..aca9cef 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -182,6 +182,7 @@ struct i40e_lump_tracking {
enum i40e_fd_stat_idx {
I40E_FD_STAT_ATR,
I40E_FD_STAT_SB,
+ I40E_FD_STAT_ATR_TUNNEL,
I40E_FD_STAT_PF_COUNT
};
#define I40E_FD_STAT_PF_IDX(pf_id) ((pf_id) * I40E_FD_STAT_PF_COUNT)
@@ -189,6 +190,8 @@ enum i40e_fd_stat_idx {
(I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_ATR)
#define I40E_FD_SB_STAT_IDX(pf_id) \
(I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_SB)
+#define I40E_FD_ATR_TUNNEL_STAT_IDX(pf_id) \
+ (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_ATR_TUNNEL)
struct i40e_fdir_filter {
struct hlist_node fdir_node;
@@ -263,8 +266,6 @@ struct i40e_pf {
struct hlist_head fdir_filter_list;
u16 fdir_pf_active_filters;
- u16 fd_sb_cnt_idx;
- u16 fd_atr_cnt_idx;
unsigned long fd_flush_timestamp;
u32 fd_flush_cnt;
u32 fd_add_err;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 4cbaaeb..9a68c65 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -147,6 +147,7 @@ static struct i40e_stats i40e_gstrings_stats[] = {
I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
I40E_PF_STAT("fdir_flush_cnt", fd_flush_cnt),
I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match),
+ I40E_PF_STAT("fdir_atr_tunnel_match", stats.fd_atr_tunnel_match),
I40E_PF_STAT("fdir_sb_match", stats.fd_sb_match),
/* LPI stats */
@@ -1548,6 +1549,17 @@ static int i40e_loopback_test(struct net_device *netdev, u64 *data)
return *data;
}
+static inline bool i40e_active_vfs(struct i40e_pf *pf)
+{
+ struct i40e_vf *vfs = pf->vf;
+ int i;
+
+ for (i = 0; i < pf->num_alloc_vfs; i++)
+ if (vfs[i].vf_states & I40E_VF_STAT_ACTIVE)
+ return true;
+ return false;
+}
+
static void i40e_diag_test(struct net_device *netdev,
struct ethtool_test *eth_test, u64 *data)
{
@@ -1560,6 +1572,20 @@ static void i40e_diag_test(struct net_device *netdev,
netif_info(pf, drv, netdev, "offline testing starting\n");
set_bit(__I40E_TESTING, &pf->state);
+
+ if (i40e_active_vfs(pf)) {
+ dev_warn(&pf->pdev->dev,
+ "Please take active VFS offline and restart the adapter before running NIC diagnostics\n");
+ data[I40E_ETH_TEST_REG] = 1;
+ data[I40E_ETH_TEST_EEPROM] = 1;
+ data[I40E_ETH_TEST_INTR] = 1;
+ data[I40E_ETH_TEST_LOOPBACK] = 1;
+ data[I40E_ETH_TEST_LINK] = 1;
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ clear_bit(__I40E_TESTING, &pf->state);
+ goto skip_ol_tests;
+ }
+
/* If the device is online then take it offline */
if (if_running)
/* indicate we're in test mode */
@@ -1605,6 +1631,8 @@ static void i40e_diag_test(struct net_device *netdev,
data[I40E_ETH_TEST_LOOPBACK] = 0;
}
+skip_ol_tests:
+
netif_info(pf, drv, netdev, "testing finished\n");
}
@@ -2265,7 +2293,7 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
input->pctype = 0;
input->dest_vsi = vsi->id;
input->fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID;
- input->cnt_index = pf->fd_sb_cnt_idx;
+ input->cnt_index = I40E_FD_SB_STAT_IDX(pf->hw.pf_id);
input->flow_type = fsp->flow_type;
input->ip4_proto = fsp->h_u.usr_ip4_spec.proto;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
index 1803afe..c8b621e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
@@ -118,7 +118,7 @@ static inline int i40e_fcoe_fc_eof(struct sk_buff *skb, u8 *eof)
*
* The FC EOF is converted to the value understood by HW for descriptor
* programming. Never call this w/o calling i40e_fcoe_eof_is_supported()
- * first.
+ * first and that already checks for all supported valid eof values.
**/
static inline u32 i40e_fcoe_ctxt_eof(u8 eof)
{
@@ -132,9 +132,12 @@ static inline u32 i40e_fcoe_ctxt_eof(u8 eof)
case FC_EOF_A:
return I40E_TX_DESC_CMD_L4T_EOFT_EOF_A;
default:
- /* FIXME: still returns 0 */
- pr_err("Unrecognized EOF %x\n", eof);
- return 0;
+ /* Supported valid eof shall be already checked by
+ * calling i40e_fcoe_eof_is_supported() first,
+ * therefore this default case shall never hit.
+ */
+ WARN_ON(1);
+ return -EINVAL;
}
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index a54c144..0a3e928 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -39,7 +39,7 @@ static const char i40e_driver_string[] =
#define DRV_VERSION_MAJOR 1
#define DRV_VERSION_MINOR 3
-#define DRV_VERSION_BUILD 2
+#define DRV_VERSION_BUILD 4
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -772,9 +772,8 @@ static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
dcb_cfg = &hw->local_dcbx_config;
- /* See if DCB enabled with PFC TC */
- if (!(pf->flags & I40E_FLAG_DCB_ENABLED) ||
- !(dcb_cfg->pfc.pfcenable)) {
+ /* Collect Link XOFF stats when PFC is disabled */
+ if (!dcb_cfg->pfc.pfcenable) {
i40e_update_link_xoff_rx(pf);
return;
}
@@ -1097,12 +1096,18 @@ static void i40e_update_pf_stats(struct i40e_pf *pf)
&osd->rx_jabber, &nsd->rx_jabber);
/* FDIR stats */
- i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_atr_cnt_idx),
+ i40e_stat_update32(hw,
+ I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(pf->hw.pf_id)),
pf->stat_offsets_loaded,
&osd->fd_atr_match, &nsd->fd_atr_match);
- i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_sb_cnt_idx),
+ i40e_stat_update32(hw,
+ I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(pf->hw.pf_id)),
pf->stat_offsets_loaded,
&osd->fd_sb_match, &nsd->fd_sb_match);
+ i40e_stat_update32(hw,
+ I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id)),
+ pf->stat_offsets_loaded,
+ &osd->fd_atr_tunnel_match, &nsd->fd_atr_tunnel_match);
val = rd32(hw, I40E_PRTPM_EEE_STAT);
nsd->tx_lpi_status =
@@ -4739,7 +4744,8 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
pf->fd_add_err = pf->fd_atr_cnt = 0;
if (pf->fd_tcp_rule > 0) {
pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
- dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n");
+ if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n");
pf->fd_tcp_rule = 0;
}
i40e_fdir_filter_restore(vsi);
@@ -5428,7 +5434,8 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
- dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
+ if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
}
}
/* Wait for some more space to be available to turn on ATR */
@@ -5436,7 +5443,8 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
(pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) {
pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
- dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n");
+ if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n");
}
}
}
@@ -5469,7 +5477,8 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
if (!(time_after(jiffies, min_flush_time)) &&
(fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
- dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
+ if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
disable_atr = true;
}
@@ -5496,7 +5505,8 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
if (!disable_atr)
pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
- dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
+ if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
}
}
}
@@ -7676,12 +7686,8 @@ static int i40e_sw_init(struct i40e_pf *pf)
(pf->hw.func_caps.fd_filters_best_effort > 0)) {
pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
- /* Setup a counter for fd_atr per PF */
- pf->fd_atr_cnt_idx = I40E_FD_ATR_STAT_IDX(pf->hw.pf_id);
if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
pf->flags |= I40E_FLAG_FD_SB_ENABLED;
- /* Setup a counter for fd_sb per PF */
- pf->fd_sb_cnt_idx = I40E_FD_SB_STAT_IDX(pf->hw.pf_id);
} else {
dev_info(&pf->pdev->dev,
"Flow Director Sideband mode Disabled in MFP mode\n");
@@ -7771,7 +7777,8 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0;
pf->fdir_pf_active_filters = 0;
pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
- dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
+ if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
/* if ATR was auto disabled it can be re-enabled. */
if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
(pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 0b4a7be..cc82a7f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -165,9 +165,6 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
tx_desc->cmd_type_offset_bsz =
build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
- /* set the timestamp */
- tx_buf->time_stamp = jiffies;
-
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch.
*/
@@ -283,7 +280,8 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
if (add) {
pf->fd_tcp_rule++;
if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
- dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
+ if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n");
pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
}
} else {
@@ -291,7 +289,8 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
(pf->fd_tcp_rule - 1) : 0;
if (pf->fd_tcp_rule == 0) {
pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
- dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
+ if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n");
}
}
@@ -501,7 +500,8 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
!(pf->auto_disable_flags &
I40E_FLAG_FD_SB_ENABLED)) {
- dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
+ if (I40E_DEBUG_FD & pf->hw.debug_mask)
+ dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
pf->auto_disable_flags |=
I40E_FLAG_FD_SB_ENABLED;
}
@@ -807,10 +807,6 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
tx_ring->vsi->seid,
tx_ring->queue_index,
tx_ring->next_to_use, i);
- dev_info(tx_ring->dev, "tx_bi[next_to_clean]\n"
- " time_stamp <%lx>\n"
- " jiffies <%lx>\n",
- tx_ring->tx_bi[i].time_stamp, jiffies);
netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
@@ -1653,9 +1649,6 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
/* ERR_MASK will only have valid bits if EOP set */
if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
dev_kfree_skb_any(skb);
- /* TODO: shouldn't we increment a counter indicating the
- * drop?
- */
continue;
}
@@ -1923,11 +1916,11 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
* i40e_atr - Add a Flow Director ATR filter
* @tx_ring: ring to add programming descriptor to
* @skb: send buffer
- * @flags: send flags
+ * @tx_flags: send tx flags
* @protocol: wire protocol
**/
static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
- u32 flags, __be16 protocol)
+ u32 tx_flags, __be16 protocol)
{
struct i40e_filter_program_desc *fdir_desc;
struct i40e_pf *pf = tx_ring->vsi->back;
@@ -1952,25 +1945,38 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
if (!tx_ring->atr_sample_rate)
return;
- /* snag network header to get L4 type and address */
- hdr.network = skb_network_header(skb);
+ if (!(tx_flags & (I40E_TX_FLAGS_IPV4 | I40E_TX_FLAGS_IPV6)))
+ return;
- /* Currently only IPv4/IPv6 with TCP is supported */
- if (protocol == htons(ETH_P_IP)) {
- if (hdr.ipv4->protocol != IPPROTO_TCP)
- return;
+ if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL)) {
+ /* snag network header to get L4 type and address */
+ hdr.network = skb_network_header(skb);
- /* access ihl as a u8 to avoid unaligned access on ia64 */
- hlen = (hdr.network[0] & 0x0F) << 2;
- } else if (protocol == htons(ETH_P_IPV6)) {
- if (hdr.ipv6->nexthdr != IPPROTO_TCP)
+ /* Currently only IPv4/IPv6 with TCP is supported
+ * access ihl as u8 to avoid unaligned access on ia64
+ */
+ if (tx_flags & I40E_TX_FLAGS_IPV4)
+ hlen = (hdr.network[0] & 0x0F) << 2;
+ else if (protocol == htons(ETH_P_IPV6))
+ hlen = sizeof(struct ipv6hdr);
+ else
return;
-
- hlen = sizeof(struct ipv6hdr);
} else {
- return;
+ hdr.network = skb_inner_network_header(skb);
+ hlen = skb_inner_network_header_len(skb);
}
+ /* Currently only IPv4/IPv6 with TCP is supported
+ * Note: tx_flags gets modified to reflect inner protocols in
+ * tx_enable_csum function if encap is enabled.
+ */
+ if ((tx_flags & I40E_TX_FLAGS_IPV4) &&
+ (hdr.ipv4->protocol != IPPROTO_TCP))
+ return;
+ else if ((tx_flags & I40E_TX_FLAGS_IPV6) &&
+ (hdr.ipv6->nexthdr != IPPROTO_TCP))
+ return;
+
th = (struct tcphdr *)(hdr.network + hlen);
/* Due to lack of space, no more new filters can be programmed */
@@ -2020,9 +2026,16 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
- dtype_cmd |=
- ((u32)pf->fd_atr_cnt_idx << I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
- I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
+ if (!(tx_flags & I40E_TX_FLAGS_VXLAN_TUNNEL))
+ dtype_cmd |=
+ ((u32)I40E_FD_ATR_STAT_IDX(pf->hw.pf_id) <<
+ I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
+ I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
+ else
+ dtype_cmd |=
+ ((u32)I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id) <<
+ I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
+ I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
fdir_desc->rsvd = cpu_to_le32(0);
@@ -2043,13 +2056,13 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
* otherwise returns 0 to indicate the flags has been set properly.
**/
#ifdef I40E_FCOE
-int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
- struct i40e_ring *tx_ring,
- u32 *flags)
-#else
-static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
+inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
struct i40e_ring *tx_ring,
u32 *flags)
+#else
+static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
+ struct i40e_ring *tx_ring,
+ u32 *flags)
#endif
{
__be16 protocol = skb->protocol;
@@ -2117,16 +2130,14 @@ out:
* i40e_tso - set up the tso context descriptor
* @tx_ring: ptr to the ring to send
* @skb: ptr to the skb we're sending
- * @tx_flags: the collected send information
- * @protocol: the send protocol
* @hdr_len: ptr to the size of the packet header
* @cd_tunneling: ptr to context descriptor bits
*
* Returns 0 if no TSO can happen, 1 if tso is going, or error
**/
static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
- u32 tx_flags, __be16 protocol, u8 *hdr_len,
- u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling)
+ u8 *hdr_len, u64 *cd_type_cmd_tso_mss,
+ u32 *cd_tunneling)
{
u32 cd_cmd, cd_tso_len, cd_mss;
struct ipv6hdr *ipv6h;
@@ -2218,12 +2229,12 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
/**
* i40e_tx_enable_csum - Enable Tx checksum offloads
* @skb: send buffer
- * @tx_flags: Tx flags currently set
+ * @tx_flags: pointer to Tx flags currently set
* @td_cmd: Tx descriptor command bits to set
* @td_offset: Tx descriptor header offsets to set
* @cd_tunneling: ptr to context desc bits
**/
-static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
+static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
u32 *td_cmd, u32 *td_offset,
struct i40e_ring *tx_ring,
u32 *cd_tunneling)
@@ -2239,6 +2250,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
switch (ip_hdr(skb)->protocol) {
case IPPROTO_UDP:
l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
+ *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
break;
default:
return;
@@ -2248,18 +2260,17 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
this_ipv6_hdr = inner_ipv6_hdr(skb);
this_tcp_hdrlen = inner_tcp_hdrlen(skb);
- if (tx_flags & I40E_TX_FLAGS_IPV4) {
-
- if (tx_flags & I40E_TX_FLAGS_TSO) {
+ if (*tx_flags & I40E_TX_FLAGS_IPV4) {
+ if (*tx_flags & I40E_TX_FLAGS_TSO) {
*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
ip_hdr(skb)->check = 0;
} else {
*cd_tunneling |=
I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
}
- } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
+ } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
- if (tx_flags & I40E_TX_FLAGS_TSO)
+ if (*tx_flags & I40E_TX_FLAGS_TSO)
ip_hdr(skb)->check = 0;
}
@@ -2271,8 +2282,8 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
skb_transport_offset(skb)) >> 1) <<
I40E_TXD_CTX_QW0_NATLEN_SHIFT;
if (this_ip_hdr->version == 6) {
- tx_flags &= ~I40E_TX_FLAGS_IPV4;
- tx_flags |= I40E_TX_FLAGS_IPV6;
+ *tx_flags &= ~I40E_TX_FLAGS_IPV4;
+ *tx_flags |= I40E_TX_FLAGS_IPV6;
}
} else {
network_hdr_len = skb_network_header_len(skb);
@@ -2282,12 +2293,12 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
}
/* Enable IP checksum offloads */
- if (tx_flags & I40E_TX_FLAGS_IPV4) {
+ if (*tx_flags & I40E_TX_FLAGS_IPV4) {
l4_hdr = this_ip_hdr->protocol;
/* the stack computes the IP header already, the only time we
* need the hardware to recompute it is in the case of TSO.
*/
- if (tx_flags & I40E_TX_FLAGS_TSO) {
+ if (*tx_flags & I40E_TX_FLAGS_TSO) {
*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
this_ip_hdr->check = 0;
} else {
@@ -2296,7 +2307,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
/* Now set the td_offset for IP header length */
*td_offset = (network_hdr_len >> 2) <<
I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
- } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
+ } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
l4_hdr = this_ipv6_hdr->nexthdr;
*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
/* Now set the td_offset for IP header length */
@@ -2394,9 +2405,9 @@ static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
* Returns 0 if stop is not needed
**/
#ifdef I40E_FCOE
-int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
#else
-static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
#endif
{
if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
@@ -2476,13 +2487,13 @@ linearize_chk_done:
* @td_offset: offset for checksum or crc
**/
#ifdef I40E_FCOE
-void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
- struct i40e_tx_buffer *first, u32 tx_flags,
- const u8 hdr_len, u32 td_cmd, u32 td_offset)
-#else
-static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
+inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
struct i40e_tx_buffer *first, u32 tx_flags,
const u8 hdr_len, u32 td_cmd, u32 td_offset)
+#else
+static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
+ struct i40e_tx_buffer *first, u32 tx_flags,
+ const u8 hdr_len, u32 td_cmd, u32 td_offset)
#endif
{
unsigned int data_len = skb->data_len;
@@ -2588,9 +2599,6 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_ring->queue_index),
first->bytecount);
- /* set the timestamp */
- first->time_stamp = jiffies;
-
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
@@ -2643,11 +2651,11 @@ dma_error:
* one descriptor.
**/
#ifdef I40E_FCOE
-int i40e_xmit_descriptor_count(struct sk_buff *skb,
- struct i40e_ring *tx_ring)
-#else
-static int i40e_xmit_descriptor_count(struct sk_buff *skb,
+inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
struct i40e_ring *tx_ring)
+#else
+static inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
+ struct i40e_ring *tx_ring)
#endif
{
unsigned int f;
@@ -2709,7 +2717,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
else if (protocol == htons(ETH_P_IPV6))
tx_flags |= I40E_TX_FLAGS_IPV6;
- tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len,
+ tso = i40e_tso(tx_ring, skb, &hdr_len,
&cd_type_cmd_tso_mss, &cd_tunneling);
if (tso < 0)
@@ -2735,7 +2743,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
if (skb->ip_summed == CHECKSUM_PARTIAL) {
tx_flags |= I40E_TX_FLAGS_CSUM;
- i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset,
+ i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
tx_ring, &cd_tunneling);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index 4b0b810..0dc48dc 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -139,6 +139,7 @@ enum i40e_dyn_idx_t {
#define I40E_TX_FLAGS_FSO (u32)(1 << 7)
#define I40E_TX_FLAGS_TSYN (u32)(1 << 8)
#define I40E_TX_FLAGS_FD_SB (u32)(1 << 9)
+#define I40E_TX_FLAGS_VXLAN_TUNNEL (u32)(1 << 10)
#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
@@ -146,7 +147,6 @@ enum i40e_dyn_idx_t {
struct i40e_tx_buffer {
struct i40e_tx_desc *next_to_watch;
- unsigned long time_stamp;
union {
struct sk_buff *skb;
void *raw_buf;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 568e855..9a5a75b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -1133,6 +1133,7 @@ struct i40e_hw_port_stats {
/* flow director stats */
u64 fd_atr_match;
u64 fd_sb_match;
+ u64 fd_atr_tunnel_match;
/* EEE LPI */
u32 tx_lpi_status;
u32 rx_lpi_status;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 78d1c4f..4653b6e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -980,6 +980,13 @@ static int i40e_pci_sriov_enable(struct pci_dev *pdev, int num_vfs)
int pre_existing_vfs = pci_num_vf(pdev);
int err = 0;
+ if (pf->state & __I40E_TESTING) {
+ dev_warn(&pdev->dev,
+ "Cannot enable SR-IOV virtual functions while the device is undergoing diagnostic testing\n");
+ err = -EPERM;
+ goto err_out;
+ }
+
dev_info(&pdev->dev, "Allocating %d VFs.\n", num_vfs);
if (pre_existing_vfs && pre_existing_vfs != num_vfs)
i40e_free_vfs(pf);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index 3ef2309..ec7e220 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -322,10 +322,6 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
tx_ring->vsi->seid,
tx_ring->queue_index,
tx_ring->next_to_use, i);
- dev_info(tx_ring->dev, "tx_bi[next_to_clean]\n"
- " time_stamp <%lx>\n"
- " jiffies <%lx>\n",
- tx_ring->tx_bi[i].time_stamp, jiffies);
netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
@@ -1128,9 +1124,6 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
/* ERR_MASK will only have valid bits if EOP set */
if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
dev_kfree_skb_any(skb);
- /* TODO: shouldn't we increment a counter indicating the
- * drop?
- */
continue;
}
@@ -1350,7 +1343,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
}
/**
- * i40e_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
+ * i40evf_tx_prepare_vlan_flags - prepare generic TX VLAN tagging flags for HW
* @skb: send buffer
* @tx_ring: ring to send buffer on
* @flags: the tx flags to be set
@@ -1361,9 +1354,9 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
* Returns error code indicate the frame should be dropped upon error and the
* otherwise returns 0 to indicate the flags has been set properly.
**/
-static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
- struct i40e_ring *tx_ring,
- u32 *flags)
+static inline int i40evf_tx_prepare_vlan_flags(struct sk_buff *skb,
+ struct i40e_ring *tx_ring,
+ u32 *flags)
{
__be16 protocol = skb->protocol;
u32 tx_flags = 0;
@@ -1406,16 +1399,14 @@ out:
* i40e_tso - set up the tso context descriptor
* @tx_ring: ptr to the ring to send
* @skb: ptr to the skb we're sending
- * @tx_flags: the collected send information
- * @protocol: the send protocol
* @hdr_len: ptr to the size of the packet header
* @cd_tunneling: ptr to context descriptor bits
*
* Returns 0 if no TSO can happen, 1 if tso is going, or error
**/
static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
- u32 tx_flags, __be16 protocol, u8 *hdr_len,
- u64 *cd_type_cmd_tso_mss, u32 *cd_tunneling)
+ u8 *hdr_len, u64 *cd_type_cmd_tso_mss,
+ u32 *cd_tunneling)
{
u32 cd_cmd, cd_tso_len, cd_mss;
struct ipv6hdr *ipv6h;
@@ -1466,12 +1457,12 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
/**
* i40e_tx_enable_csum - Enable Tx checksum offloads
* @skb: send buffer
- * @tx_flags: Tx flags currently set
+ * @tx_flags: pointer to Tx flags currently set
* @td_cmd: Tx descriptor command bits to set
* @td_offset: Tx descriptor header offsets to set
* @cd_tunneling: ptr to context desc bits
**/
-static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
+static void i40e_tx_enable_csum(struct sk_buff *skb, u32 *tx_flags,
u32 *td_cmd, u32 *td_offset,
struct i40e_ring *tx_ring,
u32 *cd_tunneling)
@@ -1487,6 +1478,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
switch (ip_hdr(skb)->protocol) {
case IPPROTO_UDP:
l4_tunnel = I40E_TXD_CTX_UDP_TUNNELING;
+ *tx_flags |= I40E_TX_FLAGS_VXLAN_TUNNEL;
break;
default:
return;
@@ -1496,18 +1488,17 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
this_ipv6_hdr = inner_ipv6_hdr(skb);
this_tcp_hdrlen = inner_tcp_hdrlen(skb);
- if (tx_flags & I40E_TX_FLAGS_IPV4) {
-
- if (tx_flags & I40E_TX_FLAGS_TSO) {
+ if (*tx_flags & I40E_TX_FLAGS_IPV4) {
+ if (*tx_flags & I40E_TX_FLAGS_TSO) {
*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV4;
ip_hdr(skb)->check = 0;
} else {
*cd_tunneling |=
I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
}
- } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
+ } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
*cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
- if (tx_flags & I40E_TX_FLAGS_TSO)
+ if (*tx_flags & I40E_TX_FLAGS_TSO)
ip_hdr(skb)->check = 0;
}
@@ -1519,8 +1510,8 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
skb_transport_offset(skb)) >> 1) <<
I40E_TXD_CTX_QW0_NATLEN_SHIFT;
if (this_ip_hdr->version == 6) {
- tx_flags &= ~I40E_TX_FLAGS_IPV4;
- tx_flags |= I40E_TX_FLAGS_IPV6;
+ *tx_flags &= ~I40E_TX_FLAGS_IPV4;
+ *tx_flags |= I40E_TX_FLAGS_IPV6;
}
@@ -1532,12 +1523,12 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
}
/* Enable IP checksum offloads */
- if (tx_flags & I40E_TX_FLAGS_IPV4) {
+ if (*tx_flags & I40E_TX_FLAGS_IPV4) {
l4_hdr = this_ip_hdr->protocol;
/* the stack computes the IP header already, the only time we
* need the hardware to recompute it is in the case of TSO.
*/
- if (tx_flags & I40E_TX_FLAGS_TSO) {
+ if (*tx_flags & I40E_TX_FLAGS_TSO) {
*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV4_CSUM;
this_ip_hdr->check = 0;
} else {
@@ -1546,7 +1537,7 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
/* Now set the td_offset for IP header length */
*td_offset = (network_hdr_len >> 2) <<
I40E_TX_DESC_LENGTH_IPLEN_SHIFT;
- } else if (tx_flags & I40E_TX_FLAGS_IPV6) {
+ } else if (*tx_flags & I40E_TX_FLAGS_IPV6) {
l4_hdr = this_ipv6_hdr->nexthdr;
*td_cmd |= I40E_TX_DESC_CMD_IIPT_IPV6;
/* Now set the td_offset for IP header length */
@@ -1675,7 +1666,44 @@ linearize_chk_done:
}
/**
- * i40e_tx_map - Build the Tx descriptor
+ * __i40evf_maybe_stop_tx - 2nd level check for tx stop conditions
+ * @tx_ring: the ring to be checked
+ * @size: the size buffer we want to assure is available
+ *
+ * Returns -EBUSY if a stop is needed, else 0
+ **/
+static inline int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+{
+ netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+ /* Memory barrier before checking head and tail */
+ smp_mb();
+
+ /* Check again in a case another CPU has just made room available. */
+ if (likely(I40E_DESC_UNUSED(tx_ring) < size))
+ return -EBUSY;
+
+ /* A reprieve! - use start_queue because it doesn't call schedule */
+ netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
+ ++tx_ring->tx_stats.restart_queue;
+ return 0;
+}
+
+/**
+ * i40evf_maybe_stop_tx - 1st level check for tx stop conditions
+ * @tx_ring: the ring to be checked
+ * @size: the size buffer we want to assure is available
+ *
+ * Returns 0 if stop is not needed
+ **/
+static inline int i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+{
+ if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
+ return 0;
+ return __i40evf_maybe_stop_tx(tx_ring, size);
+}
+
+/**
+ * i40evf_tx_map - Build the Tx descriptor
* @tx_ring: ring to send buffer on
* @skb: send buffer
* @first: first buffer info buffer to use
@@ -1684,9 +1712,9 @@ linearize_chk_done:
* @td_cmd: the command field in the descriptor
* @td_offset: offset for checksum or crc
**/
-static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
- struct i40e_tx_buffer *first, u32 tx_flags,
- const u8 hdr_len, u32 td_cmd, u32 td_offset)
+static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
+ struct i40e_tx_buffer *first, u32 tx_flags,
+ const u8 hdr_len, u32 td_cmd, u32 td_offset)
{
unsigned int data_len = skb->data_len;
unsigned int size = skb_headlen(skb);
@@ -1792,9 +1820,6 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_ring->queue_index),
first->bytecount);
- /* set the timestamp */
- first->time_stamp = jiffies;
-
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
@@ -1811,8 +1836,12 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_ring->next_to_use = i;
+ i40evf_maybe_stop_tx(tx_ring, DESC_NEEDED);
/* notify HW of packet */
- writel(i, tx_ring->tail);
+ if (!skb->xmit_more ||
+ netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
+ tx_ring->queue_index)))
+ writel(i, tx_ring->tail);
return;
@@ -1834,44 +1863,7 @@ dma_error:
}
/**
- * __i40e_maybe_stop_tx - 2nd level check for tx stop conditions
- * @tx_ring: the ring to be checked
- * @size: the size buffer we want to assure is available
- *
- * Returns -EBUSY if a stop is needed, else 0
- **/
-static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
-{
- netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
- /* Memory barrier before checking head and tail */
- smp_mb();
-
- /* Check again in a case another CPU has just made room available. */
- if (likely(I40E_DESC_UNUSED(tx_ring) < size))
- return -EBUSY;
-
- /* A reprieve! - use start_queue because it doesn't call schedule */
- netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
- ++tx_ring->tx_stats.restart_queue;
- return 0;
-}
-
-/**
- * i40e_maybe_stop_tx - 1st level check for tx stop conditions
- * @tx_ring: the ring to be checked
- * @size: the size buffer we want to assure is available
- *
- * Returns 0 if stop is not needed
- **/
-static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
-{
- if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
- return 0;
- return __i40e_maybe_stop_tx(tx_ring, size);
-}
-
-/**
- * i40e_xmit_descriptor_count - calculate number of tx descriptors needed
+ * i40evf_xmit_descriptor_count - calculate number of tx descriptors needed
* @skb: send buffer
* @tx_ring: ring to send buffer on
*
@@ -1879,8 +1871,8 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
* there is not enough descriptors available in this ring since we need at least
* one descriptor.
**/
-static int i40e_xmit_descriptor_count(struct sk_buff *skb,
- struct i40e_ring *tx_ring)
+static inline int i40evf_xmit_descriptor_count(struct sk_buff *skb,
+ struct i40e_ring *tx_ring)
{
unsigned int f;
int count = 0;
@@ -1895,7 +1887,7 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb,
count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
count += TXD_USE_COUNT(skb_headlen(skb));
- if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
+ if (i40evf_maybe_stop_tx(tx_ring, count + 4 + 1)) {
tx_ring->tx_stats.tx_busy++;
return 0;
}
@@ -1921,11 +1913,11 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
u32 td_cmd = 0;
u8 hdr_len = 0;
int tso;
- if (0 == i40e_xmit_descriptor_count(skb, tx_ring))
+ if (0 == i40evf_xmit_descriptor_count(skb, tx_ring))
return NETDEV_TX_BUSY;
/* prepare the xmit flags */
- if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
+ if (i40evf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
goto out_drop;
/* obtain protocol of skb */
@@ -1940,7 +1932,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
else if (protocol == htons(ETH_P_IPV6))
tx_flags |= I40E_TX_FLAGS_IPV6;
- tso = i40e_tso(tx_ring, skb, tx_flags, protocol, &hdr_len,
+ tso = i40e_tso(tx_ring, skb, &hdr_len,
&cd_type_cmd_tso_mss, &cd_tunneling);
if (tso < 0)
@@ -1961,17 +1953,15 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
if (skb->ip_summed == CHECKSUM_PARTIAL) {
tx_flags |= I40E_TX_FLAGS_CSUM;
- i40e_tx_enable_csum(skb, tx_flags, &td_cmd, &td_offset,
+ i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
tx_ring, &cd_tunneling);
}
i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
cd_tunneling, cd_l2tag2);
- i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
- td_cmd, td_offset);
-
- i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
+ i40evf_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
+ td_cmd, td_offset);
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index 1e49bb1..e7a34f8 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -138,6 +138,7 @@ enum i40e_dyn_idx_t {
#define I40E_TX_FLAGS_FCCRC (u32)(1 << 6)
#define I40E_TX_FLAGS_FSO (u32)(1 << 7)
#define I40E_TX_FLAGS_FD_SB (u32)(1 << 9)
+#define I40E_TX_FLAGS_VXLAN_TUNNEL (u32)(1 << 10)
#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
@@ -145,7 +146,6 @@ enum i40e_dyn_idx_t {
struct i40e_tx_buffer {
struct i40e_tx_desc *next_to_watch;
- unsigned long time_stamp;
union {
struct sk_buff *skb;
void *raw_buf;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index ec9d83a..c463ec4 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -1108,6 +1108,7 @@ struct i40e_hw_port_stats {
/* flow director stats */
u64 fd_atr_match;
u64 fd_sb_match;
+ u64 fd_atr_tunnel_match;
/* EEE LPI */
u32 tx_lpi_status;
u32 rx_lpi_status;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 9f6fb19..9a1d0f1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -2594,18 +2594,35 @@ static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
struct ixgbe_hw *hw = &adapter->hw;
struct ixgbe_fdir_filter *input;
union ixgbe_atr_input mask;
+ u8 queue;
int err;
if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
return -EOPNOTSUPP;
- /*
- * Don't allow programming if the action is a queue greater than
- * the number of online Rx queues.
+ /* ring_cookie is a masked into a set of queues and ixgbe pools or
+ * we use the drop index.
*/
- if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) &&
- (fsp->ring_cookie >= adapter->num_rx_queues))
- return -EINVAL;
+ if (fsp->ring_cookie == RX_CLS_FLOW_DISC) {
+ queue = IXGBE_FDIR_DROP_QUEUE;
+ } else {
+ u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
+ u8 vf = ethtool_get_flow_spec_ring_vf(fsp->ring_cookie);
+
+ if (!vf && (ring >= adapter->num_rx_queues))
+ return -EINVAL;
+ else if (vf &&
+ ((vf > adapter->num_vfs) ||
+ ring >= adapter->num_rx_queues_per_pool))
+ return -EINVAL;
+
+ /* Map the ring onto the absolute queue index */
+ if (!vf)
+ queue = adapter->rx_ring[ring]->reg_idx;
+ else
+ queue = ((vf - 1) *
+ adapter->num_rx_queues_per_pool) + ring;
+ }
/* Don't allow indexes to exist outside of available space */
if (fsp->location >= ((1024 << adapter->fdir_pballoc) - 2)) {
@@ -2683,10 +2700,7 @@ static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter,
/* program filters to filter memory */
err = ixgbe_fdir_write_perfect_filter_82599(hw,
- &input->filter, input->sw_idx,
- (input->action == IXGBE_FDIR_DROP_QUEUE) ?
- IXGBE_FDIR_DROP_QUEUE :
- adapter->rx_ring[input->action]->reg_idx);
+ &input->filter, input->sw_idx, queue);
if (err)
goto err_out_w_lock;
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 1c75829..d52639b 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -3125,9 +3125,8 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
mib_counters_clear(mp);
- init_timer(&mp->mib_counters_timer);
- mp->mib_counters_timer.data = (unsigned long)mp;
- mp->mib_counters_timer.function = mib_counters_timer_wrapper;
+ setup_timer(&mp->mib_counters_timer, mib_counters_timer_wrapper,
+ (unsigned long)mp);
mp->mib_counters_timer.expires = jiffies + 30 * HZ;
spin_lock_init(&mp->mib_counters_lock);
@@ -3136,9 +3135,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
netif_napi_add(dev, &mp->napi, mv643xx_eth_poll, NAPI_POLL_WEIGHT);
- init_timer(&mp->rx_oom);
- mp->rx_oom.data = (unsigned long)mp;
- mp->rx_oom.function = oom_timer_wrapper;
+ setup_timer(&mp->rx_oom, oom_timer_wrapper, (unsigned long)mp);
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 7761045..68ae765 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -714,8 +714,13 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
msecs_to_jiffies(timeout))) {
mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
op);
- err = -EIO;
- goto out_reset;
+ if (op == MLX4_CMD_NOP) {
+ err = -EBUSY;
+ goto out;
+ } else {
+ err = -EIO;
+ goto out_reset;
+ }
}
err = context->result;
@@ -2912,7 +2917,7 @@ int mlx4_set_vf_mac(struct mlx4_dev *dev, int port, int vf, u64 mac)
port = mlx4_slaves_closest_port(dev, slave, port);
s_info = &priv->mfunc.master.vf_admin[slave].vport[port];
s_info->mac = mac;
- mlx4_info(dev, "default mac on vf %d port %d to %llX will take afect only after vf restart\n",
+ mlx4_info(dev, "default mac on vf %d port %d to %llX will take effect only after vf restart\n",
vf, port, s_info->mac);
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index e71f313..3348e64 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -292,7 +292,7 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
u64 mtt_addr;
int err;
- if (vector > dev->caps.num_comp_vectors + dev->caps.comp_pool)
+ if (vector >= dev->caps.num_comp_vectors)
return -EINVAL;
cq->vector = vector;
@@ -319,7 +319,7 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
cq_context->flags |= cpu_to_be32(1 << 19);
cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index);
- cq_context->comp_eqn = priv->eq_table.eq[vector].eqn;
+ cq_context->comp_eqn = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].eqn;
cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
mtt_addr = mlx4_mtt_addr(dev, mtt);
@@ -339,11 +339,11 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
init_completion(&cq->free);
cq->comp = mlx4_add_cq_to_tasklet;
cq->tasklet_ctx.priv =
- &priv->eq_table.eq[cq->vector].tasklet_ctx;
+ &priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].tasklet_ctx;
INIT_LIST_HEAD(&cq->tasklet_ctx.list);
- cq->irq = priv->eq_table.eq[cq->vector].irq;
+ cq->irq = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].irq;
return 0;
err_radix:
@@ -368,7 +368,10 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
if (err)
mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);
- synchronize_irq(priv->eq_table.eq[cq->vector].irq);
+ synchronize_irq(priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq);
+ if (priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq !=
+ priv->eq_table.eq[MLX4_EQ_ASYNC].irq)
+ synchronize_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
spin_lock_irq(&cq_table->lock);
radix_tree_delete(&cq_table->tree, cq->cqn);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 22da4d0..63769df 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -66,6 +66,7 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv,
cq->ring = ring;
cq->is_tx = mode;
+ cq->vector = mdev->dev->caps.num_comp_vectors;
/* Allocate HW buffers on provided NUMA node.
* dev->numa_node is used in mtt range allocation flow.
@@ -101,12 +102,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
int err = 0;
char name[25];
int timestamp_en = 0;
- struct cpu_rmap *rmap =
-#ifdef CONFIG_RFS_ACCEL
- priv->dev->rx_cpu_rmap;
-#else
- NULL;
-#endif
+ bool assigned_eq = false;
cq->dev = mdev->pndev[priv->port];
cq->mcq.set_ci_db = cq->wqres.db.db;
@@ -116,23 +112,19 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
memset(cq->buf, 0, cq->buf_size);
if (cq->is_tx == RX) {
- if (mdev->dev->caps.comp_pool) {
- if (!cq->vector) {
- sprintf(name, "%s-%d", priv->dev->name,
- cq->ring);
- /* Set IRQ for specific name (per ring) */
- if (mlx4_assign_eq(mdev->dev, name, rmap,
- &cq->vector)) {
- cq->vector = (cq->ring + 1 + priv->port)
- % mdev->dev->caps.num_comp_vectors;
- mlx4_warn(mdev, "Failed assigning an EQ to %s, falling back to legacy EQ's\n",
- name);
- }
-
+ if (!mlx4_is_eq_vector_valid(mdev->dev, priv->port,
+ cq->vector)) {
+ cq->vector = cpumask_first(priv->rx_ring[cq->ring]->affinity_mask);
+
+ err = mlx4_assign_eq(mdev->dev, priv->port,
+ &cq->vector);
+ if (err) {
+ mlx4_err(mdev, "Failed assigning an EQ to %s\n",
+ name);
+ goto free_eq;
}
- } else {
- cq->vector = (cq->ring + 1 + priv->port) %
- mdev->dev->caps.num_comp_vectors;
+
+ assigned_eq = true;
}
cq->irq_desc =
@@ -159,7 +151,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
&mdev->priv_uar, cq->wqres.db.dma, &cq->mcq,
cq->vector, 0, timestamp_en);
if (err)
- return err;
+ goto free_eq;
cq->mcq.comp = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq;
cq->mcq.event = mlx4_en_cq_event;
@@ -168,13 +160,6 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq,
NAPI_POLL_WEIGHT);
} else {
- struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring];
-
- err = irq_set_affinity_hint(cq->mcq.irq,
- ring->affinity_mask);
- if (err)
- mlx4_warn(mdev, "Failed setting affinity hint\n");
-
netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
napi_hash_add(&cq->napi);
}
@@ -182,6 +167,12 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
napi_enable(&cq->napi);
return 0;
+
+free_eq:
+ if (assigned_eq)
+ mlx4_release_eq(mdev->dev, cq->vector);
+ cq->vector = mdev->dev->caps.num_comp_vectors;
+ return err;
}
void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
@@ -191,9 +182,9 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
mlx4_en_unmap_buffer(&cq->wqres.buf);
mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
- if (priv->mdev->dev->caps.comp_pool && cq->vector) {
+ if (mlx4_is_eq_vector_valid(mdev->dev, priv->port, cq->vector) &&
+ cq->is_tx == RX)
mlx4_release_eq(priv->mdev->dev, cq->vector);
- }
cq->vector = 0;
cq->buf_size = 0;
cq->buf = NULL;
@@ -207,7 +198,6 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
if (!cq->is_tx) {
napi_hash_del(&cq->napi);
synchronize_rcu();
- irq_set_affinity_hint(cq->mcq.irq, NULL);
}
netif_napi_del(&cq->napi);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 32f5ec7..98efb58 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1501,17 +1501,13 @@ static int mlx4_en_init_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
{
struct mlx4_en_rx_ring *ring = priv->rx_ring[ring_idx];
int numa_node = priv->mdev->dev->numa_node;
- int ret = 0;
if (!zalloc_cpumask_var(&ring->affinity_mask, GFP_KERNEL))
return -ENOMEM;
- ret = cpumask_set_cpu_local_first(ring_idx, numa_node,
- ring->affinity_mask);
- if (ret)
- free_cpumask_var(ring->affinity_mask);
-
- return ret;
+ cpumask_set_cpu(cpumask_local_spread(ring_idx, numa_node),
+ ring->affinity_mask);
+ return 0;
}
static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
@@ -1958,7 +1954,6 @@ void mlx4_en_free_resources(struct mlx4_en_priv *priv)
int i;
#ifdef CONFIG_RFS_ACCEL
- free_irq_cpu_rmap(priv->dev->rx_cpu_rmap);
priv->dev->rx_cpu_rmap = NULL;
#endif
@@ -2016,11 +2011,7 @@ int mlx4_en_alloc_resources(struct mlx4_en_priv *priv)
}
#ifdef CONFIG_RFS_ACCEL
- if (priv->mdev->dev->caps.comp_pool) {
- priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->mdev->dev->caps.comp_pool);
- if (!priv->dev->rx_cpu_rmap)
- goto err;
- }
+ priv->dev->rx_cpu_rmap = mlx4_get_cpu_rmap(priv->mdev->dev, priv->port);
#endif
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 2a77a6b..35f726c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -337,15 +337,10 @@ void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev)
struct mlx4_dev *dev = mdev->dev;
mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
- if (!dev->caps.comp_pool)
- num_of_eqs = max_t(int, MIN_RX_RINGS,
- min_t(int,
- dev->caps.num_comp_vectors,
- DEF_RX_RINGS));
- else
- num_of_eqs = min_t(int, MAX_MSIX_P_PORT,
- dev->caps.comp_pool/
- dev->caps.num_ports) - 1;
+ num_of_eqs = max_t(int, MIN_RX_RINGS,
+ min_t(int,
+ mlx4_get_eqs_per_port(mdev->dev, i),
+ DEF_RX_RINGS));
num_rx_rings = mlx4_low_memory_profile() ? MIN_RX_RINGS :
min_t(int, num_of_eqs,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index f7bf312..7bed3a8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -144,9 +144,9 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
ring->queue_index = queue_index;
if (queue_index < priv->num_tx_rings_p_up)
- cpumask_set_cpu_local_first(queue_index,
- priv->mdev->dev->numa_node,
- &ring->affinity_mask);
+ cpumask_set_cpu(cpumask_local_spread(queue_index,
+ priv->mdev->dev->numa_node),
+ &ring->affinity_mask);
*pring = ring;
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 80bcd64..aae13ad 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -221,6 +221,22 @@ static void mlx4_slave_event(struct mlx4_dev *dev, int slave,
slave_event(dev, slave, eqe);
}
+#if defined(CONFIG_SMP)
+static void mlx4_set_eq_affinity_hint(struct mlx4_priv *priv, int vec)
+{
+ int hint_err;
+ struct mlx4_dev *dev = &priv->dev;
+ struct mlx4_eq *eq = &priv->eq_table.eq[vec];
+
+ if (!eq->affinity_mask || cpumask_empty(eq->affinity_mask))
+ return;
+
+ hint_err = irq_set_affinity_hint(eq->irq, eq->affinity_mask);
+ if (hint_err)
+ mlx4_warn(dev, "irq_set_affinity_hint failed, err %d\n", hint_err);
+}
+#endif
+
int mlx4_gen_pkey_eqe(struct mlx4_dev *dev, int slave, u8 port)
{
struct mlx4_eqe eqe;
@@ -895,8 +911,8 @@ static int mlx4_num_eq_uar(struct mlx4_dev *dev)
* we need to map, take the difference of highest index and
* the lowest index we'll use and add 1.
*/
- return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs +
- dev->caps.comp_pool)/4 - dev->caps.reserved_eqs/4 + 1;
+ return (dev->caps.num_comp_vectors + 1 + dev->caps.reserved_eqs) / 4 -
+ dev->caps.reserved_eqs / 4 + 1;
}
static void __iomem *mlx4_get_eq_uar(struct mlx4_dev *dev, struct mlx4_eq *eq)
@@ -1085,32 +1101,21 @@ static void mlx4_free_eq(struct mlx4_dev *dev,
static void mlx4_free_irqs(struct mlx4_dev *dev)
{
struct mlx4_eq_table *eq_table = &mlx4_priv(dev)->eq_table;
- struct mlx4_priv *priv = mlx4_priv(dev);
- int i, vec;
+ int i;
if (eq_table->have_irq)
free_irq(dev->persist->pdev->irq, dev);
for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
if (eq_table->eq[i].have_irq) {
+ free_cpumask_var(eq_table->eq[i].affinity_mask);
+#if defined(CONFIG_SMP)
+ irq_set_affinity_hint(eq_table->eq[i].irq, NULL);
+#endif
free_irq(eq_table->eq[i].irq, eq_table->eq + i);
eq_table->eq[i].have_irq = 0;
}
- for (i = 0; i < dev->caps.comp_pool; i++) {
- /*
- * Freeing the assigned irq's
- * all bits should be 0, but we need to validate
- */
- if (priv->msix_ctl.pool_bm & 1ULL << i) {
- /* NO need protecting*/
- vec = dev->caps.num_comp_vectors + 1 + i;
- free_irq(priv->eq_table.eq[vec].irq,
- &priv->eq_table.eq[vec]);
- }
- }
-
-
kfree(eq_table->irq_names);
}
@@ -1191,76 +1196,73 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
}
priv->eq_table.irq_names =
- kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1 +
- dev->caps.comp_pool),
+ kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1),
GFP_KERNEL);
if (!priv->eq_table.irq_names) {
err = -ENOMEM;
- goto err_out_bitmap;
+ goto err_out_clr_int;
}
- for (i = 0; i < dev->caps.num_comp_vectors; ++i) {
- err = mlx4_create_eq(dev, dev->caps.num_cqs -
- dev->caps.reserved_cqs +
- MLX4_NUM_SPARE_EQE,
- (dev->flags & MLX4_FLAG_MSI_X) ? i : 0,
- &priv->eq_table.eq[i]);
- if (err) {
- --i;
- goto err_out_unmap;
- }
- }
-
- err = mlx4_create_eq(dev, MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE,
- (dev->flags & MLX4_FLAG_MSI_X) ? dev->caps.num_comp_vectors : 0,
- &priv->eq_table.eq[dev->caps.num_comp_vectors]);
- if (err)
- goto err_out_comp;
-
- /*if additional completion vectors poolsize is 0 this loop will not run*/
- for (i = dev->caps.num_comp_vectors + 1;
- i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i) {
+ for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) {
+ if (i == MLX4_EQ_ASYNC) {
+ err = mlx4_create_eq(dev,
+ MLX4_NUM_ASYNC_EQE + MLX4_NUM_SPARE_EQE,
+ 0, &priv->eq_table.eq[MLX4_EQ_ASYNC]);
+ } else {
+ struct mlx4_eq *eq = &priv->eq_table.eq[i];
+#ifdef CONFIG_RFS_ACCEL
+ int port = find_first_bit(eq->actv_ports.ports,
+ dev->caps.num_ports) + 1;
+
+ if (port <= dev->caps.num_ports) {
+ struct mlx4_port_info *info =
+ &mlx4_priv(dev)->port[port];
+
+ if (!info->rmap) {
+ info->rmap = alloc_irq_cpu_rmap(
+ mlx4_get_eqs_per_port(dev, port));
+ if (!info->rmap) {
+ mlx4_warn(dev, "Failed to allocate cpu rmap\n");
+ err = -ENOMEM;
+ goto err_out_unmap;
+ }
+ }
- err = mlx4_create_eq(dev, dev->caps.num_cqs -
- dev->caps.reserved_cqs +
- MLX4_NUM_SPARE_EQE,
- (dev->flags & MLX4_FLAG_MSI_X) ? i : 0,
- &priv->eq_table.eq[i]);
- if (err) {
- --i;
- goto err_out_unmap;
+ err = irq_cpu_rmap_add(
+ info->rmap, eq->irq);
+ if (err)
+ mlx4_warn(dev, "Failed adding irq rmap\n");
+ }
+#endif
+ err = mlx4_create_eq(dev, dev->caps.num_cqs -
+ dev->caps.reserved_cqs +
+ MLX4_NUM_SPARE_EQE,
+ (dev->flags & MLX4_FLAG_MSI_X) ?
+ i + 1 - !!(i > MLX4_EQ_ASYNC) : 0,
+ eq);
}
+ if (err)
+ goto err_out_unmap;
}
-
if (dev->flags & MLX4_FLAG_MSI_X) {
const char *eq_name;
- for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) {
- if (i < dev->caps.num_comp_vectors) {
- snprintf(priv->eq_table.irq_names +
- i * MLX4_IRQNAME_SIZE,
- MLX4_IRQNAME_SIZE,
- "mlx4-comp-%d@pci:%s", i,
- pci_name(dev->persist->pdev));
- } else {
- snprintf(priv->eq_table.irq_names +
- i * MLX4_IRQNAME_SIZE,
- MLX4_IRQNAME_SIZE,
- "mlx4-async@pci:%s",
- pci_name(dev->persist->pdev));
- }
+ snprintf(priv->eq_table.irq_names +
+ MLX4_EQ_ASYNC * MLX4_IRQNAME_SIZE,
+ MLX4_IRQNAME_SIZE,
+ "mlx4-async@pci:%s",
+ pci_name(dev->persist->pdev));
+ eq_name = priv->eq_table.irq_names +
+ MLX4_EQ_ASYNC * MLX4_IRQNAME_SIZE;
- eq_name = priv->eq_table.irq_names +
- i * MLX4_IRQNAME_SIZE;
- err = request_irq(priv->eq_table.eq[i].irq,
- mlx4_msi_x_interrupt, 0, eq_name,
- priv->eq_table.eq + i);
- if (err)
- goto err_out_async;
+ err = request_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq,
+ mlx4_msi_x_interrupt, 0, eq_name,
+ priv->eq_table.eq + MLX4_EQ_ASYNC);
+ if (err)
+ goto err_out_unmap;
- priv->eq_table.eq[i].have_irq = 1;
- }
+ priv->eq_table.eq[MLX4_EQ_ASYNC].have_irq = 1;
} else {
snprintf(priv->eq_table.irq_names,
MLX4_IRQNAME_SIZE,
@@ -1269,36 +1271,38 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
err = request_irq(dev->persist->pdev->irq, mlx4_interrupt,
IRQF_SHARED, priv->eq_table.irq_names, dev);
if (err)
- goto err_out_async;
+ goto err_out_unmap;
priv->eq_table.have_irq = 1;
}
err = mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
- priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
+ priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
if (err)
mlx4_warn(dev, "MAP_EQ for async EQ %d failed (%d)\n",
- priv->eq_table.eq[dev->caps.num_comp_vectors].eqn, err);
+ priv->eq_table.eq[MLX4_EQ_ASYNC].eqn, err);
- for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
- eq_set_ci(&priv->eq_table.eq[i], 1);
+ /* arm ASYNC eq */
+ eq_set_ci(&priv->eq_table.eq[MLX4_EQ_ASYNC], 1);
return 0;
-err_out_async:
- mlx4_free_eq(dev, &priv->eq_table.eq[dev->caps.num_comp_vectors]);
-
-err_out_comp:
- i = dev->caps.num_comp_vectors - 1;
-
err_out_unmap:
- while (i >= 0) {
- mlx4_free_eq(dev, &priv->eq_table.eq[i]);
- --i;
+ while (i >= 0)
+ mlx4_free_eq(dev, &priv->eq_table.eq[i--]);
+#ifdef CONFIG_RFS_ACCEL
+ for (i = 1; i <= dev->caps.num_ports; i++) {
+ if (mlx4_priv(dev)->port[i].rmap) {
+ free_irq_cpu_rmap(mlx4_priv(dev)->port[i].rmap);
+ mlx4_priv(dev)->port[i].rmap = NULL;
+ }
}
+#endif
+ mlx4_free_irqs(dev);
+
+err_out_clr_int:
if (!mlx4_is_slave(dev))
mlx4_unmap_clr_int(dev);
- mlx4_free_irqs(dev);
err_out_bitmap:
mlx4_unmap_uar(dev);
@@ -1316,11 +1320,19 @@ void mlx4_cleanup_eq_table(struct mlx4_dev *dev)
int i;
mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 1,
- priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
+ priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
+#ifdef CONFIG_RFS_ACCEL
+ for (i = 1; i <= dev->caps.num_ports; i++) {
+ if (mlx4_priv(dev)->port[i].rmap) {
+ free_irq_cpu_rmap(mlx4_priv(dev)->port[i].rmap);
+ mlx4_priv(dev)->port[i].rmap = NULL;
+ }
+ }
+#endif
mlx4_free_irqs(dev);
- for (i = 0; i < dev->caps.num_comp_vectors + dev->caps.comp_pool + 1; ++i)
+ for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i)
mlx4_free_eq(dev, &priv->eq_table.eq[i]);
if (!mlx4_is_slave(dev))
@@ -1371,87 +1383,169 @@ int mlx4_test_interrupts(struct mlx4_dev *dev)
/* Return to default */
mlx4_MAP_EQ(dev, get_async_ev_mask(dev), 0,
- priv->eq_table.eq[dev->caps.num_comp_vectors].eqn);
+ priv->eq_table.eq[MLX4_EQ_ASYNC].eqn);
return err;
}
EXPORT_SYMBOL(mlx4_test_interrupts);
-int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
- int *vector)
+bool mlx4_is_eq_vector_valid(struct mlx4_dev *dev, u8 port, int vector)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ vector = MLX4_CQ_TO_EQ_VECTOR(vector);
+ if (vector < 0 || (vector >= dev->caps.num_comp_vectors + 1) ||
+ (vector == MLX4_EQ_ASYNC))
+ return false;
+
+ return test_bit(port - 1, priv->eq_table.eq[vector].actv_ports.ports);
+}
+EXPORT_SYMBOL(mlx4_is_eq_vector_valid);
+
+u32 mlx4_get_eqs_per_port(struct mlx4_dev *dev, u8 port)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ unsigned int i;
+ unsigned int sum = 0;
+
+ for (i = 0; i < dev->caps.num_comp_vectors + 1; i++)
+ sum += !!test_bit(port - 1,
+ priv->eq_table.eq[i].actv_ports.ports);
+
+ return sum;
+}
+EXPORT_SYMBOL(mlx4_get_eqs_per_port);
+
+int mlx4_is_eq_shared(struct mlx4_dev *dev, int vector)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+
+ vector = MLX4_CQ_TO_EQ_VECTOR(vector);
+ if (vector <= 0 || (vector >= dev->caps.num_comp_vectors + 1))
+ return -EINVAL;
+
+ return !!(bitmap_weight(priv->eq_table.eq[vector].actv_ports.ports,
+ dev->caps.num_ports) > 1);
+}
+EXPORT_SYMBOL(mlx4_is_eq_shared);
+
+struct cpu_rmap *mlx4_get_cpu_rmap(struct mlx4_dev *dev, int port)
{
+ return mlx4_priv(dev)->port[port].rmap;
+}
+EXPORT_SYMBOL(mlx4_get_cpu_rmap);
+int mlx4_assign_eq(struct mlx4_dev *dev, u8 port, int *vector)
+{
struct mlx4_priv *priv = mlx4_priv(dev);
- int vec = 0, err = 0, i;
+ int err = 0, i = 0;
+ u32 min_ref_count_val = (u32)-1;
+ int requested_vector = MLX4_CQ_TO_EQ_VECTOR(*vector);
+ int *prequested_vector = NULL;
+
mutex_lock(&priv->msix_ctl.pool_lock);
- for (i = 0; !vec && i < dev->caps.comp_pool; i++) {
- if (~priv->msix_ctl.pool_bm & 1ULL << i) {
- priv->msix_ctl.pool_bm |= 1ULL << i;
- vec = dev->caps.num_comp_vectors + 1 + i;
- snprintf(priv->eq_table.irq_names +
- vec * MLX4_IRQNAME_SIZE,
- MLX4_IRQNAME_SIZE, "%s", name);
-#ifdef CONFIG_RFS_ACCEL
- if (rmap) {
- err = irq_cpu_rmap_add(rmap,
- priv->eq_table.eq[vec].irq);
- if (err)
- mlx4_warn(dev, "Failed adding irq rmap\n");
+ if (requested_vector < (dev->caps.num_comp_vectors + 1) &&
+ (requested_vector >= 0) &&
+ (requested_vector != MLX4_EQ_ASYNC)) {
+ if (test_bit(port - 1,
+ priv->eq_table.eq[requested_vector].actv_ports.ports)) {
+ prequested_vector = &requested_vector;
+ } else {
+ struct mlx4_eq *eq;
+
+ for (i = 1; i < port;
+ requested_vector += mlx4_get_eqs_per_port(dev, i++))
+ ;
+
+ eq = &priv->eq_table.eq[requested_vector];
+ if (requested_vector < dev->caps.num_comp_vectors + 1 &&
+ test_bit(port - 1, eq->actv_ports.ports)) {
+ prequested_vector = &requested_vector;
}
-#endif
- err = request_irq(priv->eq_table.eq[vec].irq,
- mlx4_msi_x_interrupt, 0,
- &priv->eq_table.irq_names[vec<<5],
- priv->eq_table.eq + vec);
- if (err) {
- /*zero out bit by fliping it*/
- priv->msix_ctl.pool_bm ^= 1 << i;
- vec = 0;
- continue;
- /*we dont want to break here*/
+ }
+ }
+
+ if (!prequested_vector) {
+ requested_vector = -1;
+ for (i = 0; min_ref_count_val && i < dev->caps.num_comp_vectors + 1;
+ i++) {
+ struct mlx4_eq *eq = &priv->eq_table.eq[i];
+
+ if (min_ref_count_val > eq->ref_count &&
+ test_bit(port - 1, eq->actv_ports.ports)) {
+ min_ref_count_val = eq->ref_count;
+ requested_vector = i;
}
+ }
+
+ if (requested_vector < 0) {
+ err = -ENOSPC;
+ goto err_unlock;
+ }
+
+ prequested_vector = &requested_vector;
+ }
+
+ if (!test_bit(*prequested_vector, priv->msix_ctl.pool_bm) &&
+ dev->flags & MLX4_FLAG_MSI_X) {
+ set_bit(*prequested_vector, priv->msix_ctl.pool_bm);
+ snprintf(priv->eq_table.irq_names +
+ *prequested_vector * MLX4_IRQNAME_SIZE,
+ MLX4_IRQNAME_SIZE, "mlx4-%d@%s",
+ *prequested_vector, dev_name(&dev->persist->pdev->dev));
+
+ err = request_irq(priv->eq_table.eq[*prequested_vector].irq,
+ mlx4_msi_x_interrupt, 0,
+ &priv->eq_table.irq_names[*prequested_vector << 5],
+ priv->eq_table.eq + *prequested_vector);
- eq_set_ci(&priv->eq_table.eq[vec], 1);
+ if (err) {
+ clear_bit(*prequested_vector, priv->msix_ctl.pool_bm);
+ *prequested_vector = -1;
+ } else {
+#if defined(CONFIG_SMP)
+ mlx4_set_eq_affinity_hint(priv, *prequested_vector);
+#endif
+ eq_set_ci(&priv->eq_table.eq[*prequested_vector], 1);
+ priv->eq_table.eq[*prequested_vector].have_irq = 1;
}
}
+
+ if (!err && *prequested_vector >= 0)
+ priv->eq_table.eq[*prequested_vector].ref_count++;
+
+err_unlock:
mutex_unlock(&priv->msix_ctl.pool_lock);
- if (vec) {
- *vector = vec;
- } else {
+ if (!err && *prequested_vector >= 0)
+ *vector = MLX4_EQ_TO_CQ_VECTOR(*prequested_vector);
+ else
*vector = 0;
- err = (i == dev->caps.comp_pool) ? -ENOSPC : err;
- }
+
return err;
}
EXPORT_SYMBOL(mlx4_assign_eq);
-int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec)
+int mlx4_eq_get_irq(struct mlx4_dev *dev, int cq_vec)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- return priv->eq_table.eq[vec].irq;
+ return priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq_vec)].irq;
}
EXPORT_SYMBOL(mlx4_eq_get_irq);
void mlx4_release_eq(struct mlx4_dev *dev, int vec)
{
struct mlx4_priv *priv = mlx4_priv(dev);
- /*bm index*/
- int i = vec - dev->caps.num_comp_vectors - 1;
-
- if (likely(i >= 0)) {
- /*sanity check , making sure were not trying to free irq's
- Belonging to a legacy EQ*/
- mutex_lock(&priv->msix_ctl.pool_lock);
- if (priv->msix_ctl.pool_bm & 1ULL << i) {
- free_irq(priv->eq_table.eq[vec].irq,
- &priv->eq_table.eq[vec]);
- priv->msix_ctl.pool_bm &= ~(1ULL << i);
- }
- mutex_unlock(&priv->msix_ctl.pool_lock);
- }
+ int eq_vec = MLX4_CQ_TO_EQ_VECTOR(vec);
+ mutex_lock(&priv->msix_ctl.pool_lock);
+ priv->eq_table.eq[eq_vec].ref_count--;
+
+ /* once we allocated EQ, we don't release it because it might be binded
+ * to cpu_rmap.
+ */
+ mutex_unlock(&priv->msix_ctl.pool_lock);
}
EXPORT_SYMBOL(mlx4_release_eq);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 70d33f6..7d57777 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -2364,11 +2364,11 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
if (err) {
if (dev->flags & MLX4_FLAG_MSI_X) {
mlx4_warn(dev, "NOP command failed to generate MSI-X interrupt IRQ %d)\n",
- priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
+ priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
mlx4_warn(dev, "Trying again without MSI-X\n");
} else {
mlx4_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting\n",
- priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
+ priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
}
@@ -2481,14 +2481,45 @@ err_uar_table_free:
return err;
}
+static int mlx4_init_affinity_hint(struct mlx4_dev *dev, int port, int eqn)
+{
+ int requested_cpu = 0;
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_eq *eq;
+ int off = 0;
+ int i;
+
+ if (eqn > dev->caps.num_comp_vectors)
+ return -EINVAL;
+
+ for (i = 1; i < port; i++)
+ off += mlx4_get_eqs_per_port(dev, i);
+
+ requested_cpu = eqn - off - !!(eqn > MLX4_EQ_ASYNC);
+
+ /* Meaning EQs are shared, and this call comes from the second port */
+ if (requested_cpu < 0)
+ return 0;
+
+ eq = &priv->eq_table.eq[eqn];
+
+ if (!zalloc_cpumask_var(&eq->affinity_mask, GFP_KERNEL))
+ return -ENOMEM;
+
+ cpumask_set_cpu(requested_cpu, eq->affinity_mask);
+
+ return 0;
+}
+
static void mlx4_enable_msi_x(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct msix_entry *entries;
int i;
+ int port = 0;
if (msi_x) {
- int nreq = dev->caps.num_ports * num_online_cpus() + MSIX_LEGACY_SZ;
+ int nreq = dev->caps.num_ports * num_online_cpus() + 1;
nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
nreq);
@@ -2503,20 +2534,55 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
nreq = pci_enable_msix_range(dev->persist->pdev, entries, 2,
nreq);
- if (nreq < 0) {
+ if (nreq < 0 || nreq < MLX4_EQ_ASYNC) {
kfree(entries);
goto no_msi;
- } else if (nreq < MSIX_LEGACY_SZ +
- dev->caps.num_ports * MIN_MSIX_P_PORT) {
- /*Working in legacy mode , all EQ's shared*/
- dev->caps.comp_pool = 0;
- dev->caps.num_comp_vectors = nreq - 1;
- } else {
- dev->caps.comp_pool = nreq - MSIX_LEGACY_SZ;
- dev->caps.num_comp_vectors = MSIX_LEGACY_SZ - 1;
}
- for (i = 0; i < nreq; ++i)
- priv->eq_table.eq[i].irq = entries[i].vector;
+ /* 1 is reserved for events (asyncrounous EQ) */
+ dev->caps.num_comp_vectors = nreq - 1;
+
+ priv->eq_table.eq[MLX4_EQ_ASYNC].irq = entries[0].vector;
+ bitmap_zero(priv->eq_table.eq[MLX4_EQ_ASYNC].actv_ports.ports,
+ dev->caps.num_ports);
+
+ for (i = 0; i < dev->caps.num_comp_vectors + 1; i++) {
+ if (i == MLX4_EQ_ASYNC)
+ continue;
+
+ priv->eq_table.eq[i].irq =
+ entries[i + 1 - !!(i > MLX4_EQ_ASYNC)].vector;
+
+ if (MLX4_IS_LEGACY_EQ_MODE(dev->caps)) {
+ bitmap_fill(priv->eq_table.eq[i].actv_ports.ports,
+ dev->caps.num_ports);
+ /* We don't set affinity hint when there
+ * aren't enough EQs
+ */
+ } else {
+ set_bit(port,
+ priv->eq_table.eq[i].actv_ports.ports);
+ if (mlx4_init_affinity_hint(dev, port + 1, i))
+ mlx4_warn(dev, "Couldn't init hint cpumask for EQ %d\n",
+ i);
+ }
+ /* We divide the Eqs evenly between the two ports.
+ * (dev->caps.num_comp_vectors / dev->caps.num_ports)
+ * refers to the number of Eqs per port
+ * (i.e eqs_per_port). Theoretically, we would like to
+ * write something like (i + 1) % eqs_per_port == 0.
+ * However, since there's an asynchronous Eq, we have
+ * to skip over it by comparing this condition to
+ * !!((i + 1) > MLX4_EQ_ASYNC).
+ */
+ if ((dev->caps.num_comp_vectors > dev->caps.num_ports) &&
+ ((i + 1) %
+ (dev->caps.num_comp_vectors / dev->caps.num_ports)) ==
+ !!((i + 1) > MLX4_EQ_ASYNC))
+ /* If dev->caps.num_comp_vectors < dev->caps.num_ports,
+ * everything is shared anyway.
+ */
+ port++;
+ }
dev->flags |= MLX4_FLAG_MSI_X;
@@ -2526,10 +2592,15 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
no_msi:
dev->caps.num_comp_vectors = 1;
- dev->caps.comp_pool = 0;
- for (i = 0; i < 2; ++i)
+ BUG_ON(MLX4_EQ_ASYNC >= 2);
+ for (i = 0; i < 2; ++i) {
priv->eq_table.eq[i].irq = dev->persist->pdev->irq;
+ if (i != MLX4_EQ_ASYNC) {
+ bitmap_fill(priv->eq_table.eq[i].actv_ports.ports,
+ dev->caps.num_ports);
+ }
+ }
}
static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
@@ -2594,6 +2665,10 @@ static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
device_remove_file(&info->dev->persist->pdev->dev, &info->port_attr);
device_remove_file(&info->dev->persist->pdev->dev,
&info->port_mtu_attr);
+#ifdef CONFIG_RFS_ACCEL
+ free_irq_cpu_rmap(info->rmap);
+ info->rmap = NULL;
+#endif
}
static int mlx4_init_steering(struct mlx4_dev *dev)
@@ -2749,6 +2824,7 @@ disable_sriov:
free_mem:
dev->persist->num_vfs = 0;
kfree(dev->dev_vfs);
+ dev->dev_vfs = NULL;
return dev_flags & ~MLX4_FLAG_MASTER;
}
@@ -2900,6 +2976,7 @@ slave_start:
existing_vfs,
reset_flow);
+ mlx4_close_fw(dev);
mlx4_cmd_cleanup(dev, MLX4_CMD_CLEANUP_ALL);
dev->flags = dev_flags;
if (!SRIOV_VALID_STATE(dev->flags)) {
@@ -3024,7 +3101,7 @@ slave_start:
if (err)
goto err_master_mfunc;
- priv->msix_ctl.pool_bm = 0;
+ bitmap_zero(priv->msix_ctl.pool_bm, MAX_MSIX);
mutex_init(&priv->msix_ctl.pool_lock);
mlx4_enable_msi_x(dev);
@@ -3046,7 +3123,6 @@ slave_start:
!mlx4_is_mfunc(dev)) {
dev->flags &= ~MLX4_FLAG_MSI_X;
dev->caps.num_comp_vectors = 1;
- dev->caps.comp_pool = 0;
pci_disable_msix(pdev);
err = mlx4_setup_hca(dev);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 502d3dd..f424900 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -287,6 +287,12 @@ struct mlx4_icm_table {
#define MLX4_CQE_SIZE_MASK_STRIDE 0x3
#define MLX4_EQE_SIZE_MASK_STRIDE 0x30
+#define MLX4_EQ_ASYNC 0
+#define MLX4_EQ_TO_CQ_VECTOR(vector) ((vector) - \
+ !!((int)(vector) >= MLX4_EQ_ASYNC))
+#define MLX4_CQ_TO_EQ_VECTOR(vector) ((vector) + \
+ !!((int)(vector) >= MLX4_EQ_ASYNC))
+
/*
* Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
*/
@@ -391,6 +397,9 @@ struct mlx4_eq {
struct mlx4_buf_list *page_list;
struct mlx4_mtt mtt;
struct mlx4_eq_tasklet tasklet_ctx;
+ struct mlx4_active_ports actv_ports;
+ u32 ref_count;
+ cpumask_var_t affinity_mask;
};
struct mlx4_slave_eqe {
@@ -808,6 +817,7 @@ struct mlx4_port_info {
struct mlx4_vlan_table vlan_table;
struct mlx4_roce_gid_table gid_table;
int base_qpn;
+ struct cpu_rmap *rmap;
};
struct mlx4_sense {
@@ -818,7 +828,7 @@ struct mlx4_sense {
};
struct mlx4_msix_ctl {
- u64 pool_bm;
+ DECLARE_BITMAP(pool_bm, MAX_MSIX);
struct mutex pool_lock;
};
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index d021f07..edd8fd6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -338,7 +338,7 @@ struct mlx4_en_cq {
struct napi_struct napi;
int size;
int buf_size;
- unsigned vector;
+ int vector;
enum cq_type is_tx;
u16 moder_time;
u16 moder_cnt;
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 15ec081..ab48386 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -3973,6 +3973,22 @@ static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
return 0;
}
+static void handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl,
+ struct _rule_hw *eth_header)
+{
+ if (is_multicast_ether_addr(eth_header->eth.dst_mac) ||
+ is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
+ struct mlx4_net_trans_rule_hw_eth *eth =
+ (struct mlx4_net_trans_rule_hw_eth *)eth_header;
+ struct _rule_hw *next_rule = (struct _rule_hw *)(eth + 1);
+ bool last_rule = next_rule->size == 0 && next_rule->id == 0 &&
+ next_rule->rsvd == 0;
+
+ if (last_rule)
+ ctrl->prio = cpu_to_be16(MLX4_DOMAIN_NIC);
+ }
+}
+
/*
* In case of missing eth header, append eth header with a MAC address
* assigned to the VF.
@@ -4125,6 +4141,12 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
rule_header = (struct _rule_hw *)(ctrl + 1);
header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
+ if (header_id == MLX4_NET_TRANS_RULE_ID_ETH)
+ handle_eth_header_mcast_prio(ctrl, rule_header);
+
+ if (slave == dev->caps.function)
+ goto execute;
+
switch (header_id) {
case MLX4_NET_TRANS_RULE_ID_ETH:
if (validate_eth_header_mac(slave, rule_header, rlist)) {
@@ -4151,6 +4173,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
goto err_put;
}
+execute:
err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
vhcr->in_modifier, 0,
MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 8ff57e8..0d7aef0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -3,6 +3,18 @@
#
config MLX5_CORE
- tristate
+ tristate "Mellanox Technologies ConnectX-4 and Connect-IB core driver"
depends on PCI
default n
+ ---help---
+ Core driver for low level functionality of the ConnectX-4 and
+ Connect-IB cards by Mellanox Technologies.
+
+config MLX5_CORE_EN
+ bool "Mellanox Technologies ConnectX-4 Ethernet support"
+ depends on MLX5_INFINIBAND=n && NETDEVICES && ETHERNET && PCI && MLX5_CORE
+ default n
+ ---help---
+ Ethernet support in Mellanox Technologies ConnectX-4 NIC.
+ Ethernet and Infiniband support in ConnectX-4 are currently mutually
+ exclusive.
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
index 105780b..87e9e60 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile
@@ -3,3 +3,6 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o
mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \
health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \
mad.o
+mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o flow_table.o vport.o transobj.o \
+ en_main.o en_flow_table.o en_ethtool.o en_tx.o en_rx.o \
+ en_txrx.o
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
index ac0f7bf..0715b49 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
@@ -42,95 +42,36 @@
#include "mlx5_core.h"
/* Handling for queue buffers -- we allocate a bunch of memory and
- * register it in a memory region at HCA virtual address 0. If the
- * requested size is > max_direct, we split the allocation into
- * multiple pages, so we don't require too much contiguous memory.
+ * register it in a memory region at HCA virtual address 0.
*/
-int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct,
- struct mlx5_buf *buf)
+int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, struct mlx5_buf *buf)
{
dma_addr_t t;
buf->size = size;
- if (size <= max_direct) {
- buf->nbufs = 1;
- buf->npages = 1;
- buf->page_shift = (u8)get_order(size) + PAGE_SHIFT;
- buf->direct.buf = dma_zalloc_coherent(&dev->pdev->dev,
- size, &t, GFP_KERNEL);
- if (!buf->direct.buf)
- return -ENOMEM;
-
- buf->direct.map = t;
-
- while (t & ((1 << buf->page_shift) - 1)) {
- --buf->page_shift;
- buf->npages *= 2;
- }
- } else {
- int i;
-
- buf->direct.buf = NULL;
- buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
- buf->npages = buf->nbufs;
- buf->page_shift = PAGE_SHIFT;
- buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list),
- GFP_KERNEL);
- if (!buf->page_list)
- return -ENOMEM;
-
- for (i = 0; i < buf->nbufs; i++) {
- buf->page_list[i].buf =
- dma_zalloc_coherent(&dev->pdev->dev, PAGE_SIZE,
- &t, GFP_KERNEL);
- if (!buf->page_list[i].buf)
- goto err_free;
-
- buf->page_list[i].map = t;
- }
-
- if (BITS_PER_LONG == 64) {
- struct page **pages;
- pages = kmalloc(sizeof(*pages) * buf->nbufs, GFP_KERNEL);
- if (!pages)
- goto err_free;
- for (i = 0; i < buf->nbufs; i++)
- pages[i] = virt_to_page(buf->page_list[i].buf);
- buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL);
- kfree(pages);
- if (!buf->direct.buf)
- goto err_free;
- }
- }
+ buf->npages = 1;
+ buf->page_shift = (u8)get_order(size) + PAGE_SHIFT;
+ buf->direct.buf = dma_zalloc_coherent(&dev->pdev->dev,
+ size, &t, GFP_KERNEL);
+ if (!buf->direct.buf)
+ return -ENOMEM;
- return 0;
+ buf->direct.map = t;
-err_free:
- mlx5_buf_free(dev, buf);
+ while (t & ((1 << buf->page_shift) - 1)) {
+ --buf->page_shift;
+ buf->npages *= 2;
+ }
- return -ENOMEM;
+ return 0;
}
EXPORT_SYMBOL_GPL(mlx5_buf_alloc);
void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_buf *buf)
{
- int i;
-
- if (buf->nbufs == 1)
- dma_free_coherent(&dev->pdev->dev, buf->size, buf->direct.buf,
- buf->direct.map);
- else {
- if (BITS_PER_LONG == 64)
- vunmap(buf->direct.buf);
-
- for (i = 0; i < buf->nbufs; i++)
- if (buf->page_list[i].buf)
- dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
- buf->page_list[i].buf,
- buf->page_list[i].map);
- kfree(buf->page_list);
- }
+ dma_free_coherent(&dev->pdev->dev, buf->size, buf->direct.buf,
+ buf->direct.map);
}
EXPORT_SYMBOL_GPL(mlx5_buf_free);
@@ -230,10 +171,7 @@ void mlx5_fill_page_array(struct mlx5_buf *buf, __be64 *pas)
int i;
for (i = 0; i < buf->npages; i++) {
- if (buf->nbufs == 1)
- addr = buf->direct.map + (i << buf->page_shift);
- else
- addr = buf->page_list[i].map;
+ addr = buf->direct.map + (i << buf->page_shift);
pas[i] = cpu_to_be64(addr);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index e3273fa..75ff58d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -75,25 +75,6 @@ enum {
MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR = 0x10,
};
-enum {
- MLX5_CMD_STAT_OK = 0x0,
- MLX5_CMD_STAT_INT_ERR = 0x1,
- MLX5_CMD_STAT_BAD_OP_ERR = 0x2,
- MLX5_CMD_STAT_BAD_PARAM_ERR = 0x3,
- MLX5_CMD_STAT_BAD_SYS_STATE_ERR = 0x4,
- MLX5_CMD_STAT_BAD_RES_ERR = 0x5,
- MLX5_CMD_STAT_RES_BUSY = 0x6,
- MLX5_CMD_STAT_LIM_ERR = 0x8,
- MLX5_CMD_STAT_BAD_RES_STATE_ERR = 0x9,
- MLX5_CMD_STAT_IX_ERR = 0xa,
- MLX5_CMD_STAT_NO_RES_ERR = 0xf,
- MLX5_CMD_STAT_BAD_INP_LEN_ERR = 0x50,
- MLX5_CMD_STAT_BAD_OUTP_LEN_ERR = 0x51,
- MLX5_CMD_STAT_BAD_QP_STATE_ERR = 0x10,
- MLX5_CMD_STAT_BAD_PKT_ERR = 0x30,
- MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40,
-};
-
static struct mlx5_cmd_work_ent *alloc_cmd(struct mlx5_cmd *cmd,
struct mlx5_cmd_msg *in,
struct mlx5_cmd_msg *out,
@@ -390,8 +371,17 @@ const char *mlx5_command_str(int command)
case MLX5_CMD_OP_ARM_RQ:
return "ARM_RQ";
- case MLX5_CMD_OP_RESIZE_SRQ:
- return "RESIZE_SRQ";
+ case MLX5_CMD_OP_CREATE_XRC_SRQ:
+ return "CREATE_XRC_SRQ";
+
+ case MLX5_CMD_OP_DESTROY_XRC_SRQ:
+ return "DESTROY_XRC_SRQ";
+
+ case MLX5_CMD_OP_QUERY_XRC_SRQ:
+ return "QUERY_XRC_SRQ";
+
+ case MLX5_CMD_OP_ARM_XRC_SRQ:
+ return "ARM_XRC_SRQ";
case MLX5_CMD_OP_ALLOC_PD:
return "ALLOC_PD";
@@ -408,8 +398,8 @@ const char *mlx5_command_str(int command)
case MLX5_CMD_OP_ATTACH_TO_MCG:
return "ATTACH_TO_MCG";
- case MLX5_CMD_OP_DETACH_FROM_MCG:
- return "DETACH_FROM_MCG";
+ case MLX5_CMD_OP_DETTACH_FROM_MCG:
+ return "DETTACH_FROM_MCG";
case MLX5_CMD_OP_ALLOC_XRCD:
return "ALLOC_XRCD";
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
index eb0cf81..04ab7e4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c
@@ -219,6 +219,24 @@ int mlx5_core_modify_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq,
}
EXPORT_SYMBOL(mlx5_core_modify_cq);
+int mlx5_core_modify_cq_moderation(struct mlx5_core_dev *dev,
+ struct mlx5_core_cq *cq,
+ u16 cq_period,
+ u16 cq_max_count)
+{
+ struct mlx5_modify_cq_mbox_in in;
+
+ memset(&in, 0, sizeof(in));
+
+ in.cqn = cpu_to_be32(cq->cqn);
+ in.ctx.cq_period = cpu_to_be16(cq_period);
+ in.ctx.cq_max_count = cpu_to_be16(cq_max_count);
+ in.field_select = cpu_to_be32(MLX5_CQ_MODIFY_PERIOD |
+ MLX5_CQ_MODIFY_COUNT);
+
+ return mlx5_core_modify_cq(dev, cq, &in, sizeof(in));
+}
+
int mlx5_init_cq_table(struct mlx5_core_dev *dev)
{
struct mlx5_cq_table *table = &dev->priv.cq_table;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
new file mode 100644
index 0000000..cbb3c7c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -0,0 +1,520 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/if_vlan.h>
+#include <linux/etherdevice.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/qp.h>
+#include <linux/mlx5/cq.h>
+#include "vport.h"
+#include "wq.h"
+#include "transobj.h"
+#include "mlx5_core.h"
+
+#define MLX5E_MAX_NUM_TC 8
+
+#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x7
+#define MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE 0xa
+#define MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE 0xd
+
+#define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE 0x7
+#define MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE 0xa
+#define MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE 0xd
+
+#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (16 * 1024)
+#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC 0x10
+#define MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS 0x20
+#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC 0x10
+#define MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS 0x20
+#define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES 0x80
+#define MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ 0x7
+#define MLX5E_PARAMS_MIN_MTU 46
+
+#define MLX5E_TX_CQ_POLL_BUDGET 128
+#define MLX5E_UPDATE_STATS_INTERVAL 200 /* msecs */
+
+static const char vport_strings[][ETH_GSTRING_LEN] = {
+ /* vport statistics */
+ "rx_packets",
+ "rx_bytes",
+ "tx_packets",
+ "tx_bytes",
+ "rx_error_packets",
+ "rx_error_bytes",
+ "tx_error_packets",
+ "tx_error_bytes",
+ "rx_unicast_packets",
+ "rx_unicast_bytes",
+ "tx_unicast_packets",
+ "tx_unicast_bytes",
+ "rx_multicast_packets",
+ "rx_multicast_bytes",
+ "tx_multicast_packets",
+ "tx_multicast_bytes",
+ "rx_broadcast_packets",
+ "rx_broadcast_bytes",
+ "tx_broadcast_packets",
+ "tx_broadcast_bytes",
+
+ /* SW counters */
+ "tso_packets",
+ "tso_bytes",
+ "lro_packets",
+ "lro_bytes",
+ "rx_csum_good",
+ "rx_csum_none",
+ "tx_csum_offload",
+ "tx_queue_stopped",
+ "tx_queue_wake",
+ "tx_queue_dropped",
+ "rx_wqe_err",
+};
+
+struct mlx5e_vport_stats {
+ /* HW counters */
+ u64 rx_packets;
+ u64 rx_bytes;
+ u64 tx_packets;
+ u64 tx_bytes;
+ u64 rx_error_packets;
+ u64 rx_error_bytes;
+ u64 tx_error_packets;
+ u64 tx_error_bytes;
+ u64 rx_unicast_packets;
+ u64 rx_unicast_bytes;
+ u64 tx_unicast_packets;
+ u64 tx_unicast_bytes;
+ u64 rx_multicast_packets;
+ u64 rx_multicast_bytes;
+ u64 tx_multicast_packets;
+ u64 tx_multicast_bytes;
+ u64 rx_broadcast_packets;
+ u64 rx_broadcast_bytes;
+ u64 tx_broadcast_packets;
+ u64 tx_broadcast_bytes;
+
+ /* SW counters */
+ u64 tso_packets;
+ u64 tso_bytes;
+ u64 lro_packets;
+ u64 lro_bytes;
+ u64 rx_csum_good;
+ u64 rx_csum_none;
+ u64 tx_csum_offload;
+ u64 tx_queue_stopped;
+ u64 tx_queue_wake;
+ u64 tx_queue_dropped;
+ u64 rx_wqe_err;
+
+#define NUM_VPORT_COUNTERS 31
+};
+
+static const char rq_stats_strings[][ETH_GSTRING_LEN] = {
+ "packets",
+ "csum_none",
+ "lro_packets",
+ "lro_bytes",
+ "wqe_err"
+};
+
+struct mlx5e_rq_stats {
+ u64 packets;
+ u64 csum_none;
+ u64 lro_packets;
+ u64 lro_bytes;
+ u64 wqe_err;
+#define NUM_RQ_STATS 5
+};
+
+static const char sq_stats_strings[][ETH_GSTRING_LEN] = {
+ "packets",
+ "tso_packets",
+ "tso_bytes",
+ "csum_offload_none",
+ "stopped",
+ "wake",
+ "dropped",
+ "nop"
+};
+
+struct mlx5e_sq_stats {
+ u64 packets;
+ u64 tso_packets;
+ u64 tso_bytes;
+ u64 csum_offload_none;
+ u64 stopped;
+ u64 wake;
+ u64 dropped;
+ u64 nop;
+#define NUM_SQ_STATS 8
+};
+
+struct mlx5e_stats {
+ struct mlx5e_vport_stats vport;
+};
+
+struct mlx5e_params {
+ u8 log_sq_size;
+ u8 log_rq_size;
+ u16 num_channels;
+ u8 default_vlan_prio;
+ u8 num_tc;
+ u16 rx_cq_moderation_usec;
+ u16 rx_cq_moderation_pkts;
+ u16 tx_cq_moderation_usec;
+ u16 tx_cq_moderation_pkts;
+ u16 min_rx_wqes;
+ u16 rx_hash_log_tbl_sz;
+ bool lro_en;
+ u32 lro_wqe_sz;
+};
+
+enum {
+ MLX5E_RQ_STATE_POST_WQES_ENABLE,
+};
+
+enum cq_flags {
+ MLX5E_CQ_HAS_CQES = 1,
+};
+
+struct mlx5e_cq {
+ /* data path - accessed per cqe */
+ struct mlx5_cqwq wq;
+ void *sqrq;
+ unsigned long flags;
+
+ /* data path - accessed per napi poll */
+ struct napi_struct *napi;
+ struct mlx5_core_cq mcq;
+ struct mlx5e_channel *channel;
+
+ /* control */
+ struct mlx5_wq_ctrl wq_ctrl;
+} ____cacheline_aligned_in_smp;
+
+struct mlx5e_rq {
+ /* data path */
+ struct mlx5_wq_ll wq;
+ u32 wqe_sz;
+ struct sk_buff **skb;
+
+ struct device *pdev;
+ struct net_device *netdev;
+ struct mlx5e_rq_stats stats;
+ struct mlx5e_cq cq;
+
+ unsigned long state;
+ int ix;
+
+ /* control */
+ struct mlx5_wq_ctrl wq_ctrl;
+ u32 rqn;
+ struct mlx5e_channel *channel;
+} ____cacheline_aligned_in_smp;
+
+struct mlx5e_tx_skb_cb {
+ u32 num_bytes;
+ u8 num_wqebbs;
+ u8 num_dma;
+};
+
+#define MLX5E_TX_SKB_CB(__skb) ((struct mlx5e_tx_skb_cb *)__skb->cb)
+
+struct mlx5e_sq_dma {
+ dma_addr_t addr;
+ u32 size;
+};
+
+enum {
+ MLX5E_SQ_STATE_WAKE_TXQ_ENABLE,
+};
+
+struct mlx5e_sq {
+ /* data path */
+
+ /* dirtied @completion */
+ u16 cc;
+ u32 dma_fifo_cc;
+
+ /* dirtied @xmit */
+ u16 pc ____cacheline_aligned_in_smp;
+ u32 dma_fifo_pc;
+ u32 bf_offset;
+ struct mlx5e_sq_stats stats;
+
+ struct mlx5e_cq cq;
+
+ /* pointers to per packet info: write@xmit, read@completion */
+ struct sk_buff **skb;
+ struct mlx5e_sq_dma *dma_fifo;
+
+ /* read only */
+ struct mlx5_wq_cyc wq;
+ u32 dma_fifo_mask;
+ void __iomem *uar_map;
+ struct netdev_queue *txq;
+ u32 sqn;
+ u32 bf_buf_size;
+ struct device *pdev;
+ __be32 mkey_be;
+ unsigned long state;
+
+ /* control path */
+ struct mlx5_wq_ctrl wq_ctrl;
+ struct mlx5_uar uar;
+ struct mlx5e_channel *channel;
+ int tc;
+} ____cacheline_aligned_in_smp;
+
+static inline bool mlx5e_sq_has_room_for(struct mlx5e_sq *sq, u16 n)
+{
+ return (((sq->wq.sz_m1 & (sq->cc - sq->pc)) >= n) ||
+ (sq->cc == sq->pc));
+}
+
+enum channel_flags {
+ MLX5E_CHANNEL_NAPI_SCHED = 1,
+};
+
+struct mlx5e_channel {
+ /* data path */
+ struct mlx5e_rq rq;
+ struct mlx5e_sq sq[MLX5E_MAX_NUM_TC];
+ struct napi_struct napi;
+ struct device *pdev;
+ struct net_device *netdev;
+ __be32 mkey_be;
+ u8 num_tc;
+ unsigned long flags;
+
+ /* control */
+ struct mlx5e_priv *priv;
+ int ix;
+ int cpu;
+};
+
+enum mlx5e_traffic_types {
+ MLX5E_TT_IPV4_TCP = 0,
+ MLX5E_TT_IPV6_TCP = 1,
+ MLX5E_TT_IPV4_UDP = 2,
+ MLX5E_TT_IPV6_UDP = 3,
+ MLX5E_TT_IPV4 = 4,
+ MLX5E_TT_IPV6 = 5,
+ MLX5E_TT_ANY = 6,
+ MLX5E_NUM_TT = 7,
+};
+
+enum {
+ MLX5E_RQT_SPREADING = 0,
+ MLX5E_RQT_DEFAULT_RQ = 1,
+ MLX5E_NUM_RQT = 2,
+};
+
+struct mlx5e_eth_addr_info {
+ u8 addr[ETH_ALEN + 2];
+ u32 tt_vec;
+ u32 ft_ix[MLX5E_NUM_TT]; /* flow table index per traffic type */
+};
+
+#define MLX5E_ETH_ADDR_HASH_SIZE (1 << BITS_PER_BYTE)
+
+struct mlx5e_eth_addr_db {
+ struct hlist_head netdev_uc[MLX5E_ETH_ADDR_HASH_SIZE];
+ struct hlist_head netdev_mc[MLX5E_ETH_ADDR_HASH_SIZE];
+ struct mlx5e_eth_addr_info broadcast;
+ struct mlx5e_eth_addr_info allmulti;
+ struct mlx5e_eth_addr_info promisc;
+ bool broadcast_enabled;
+ bool allmulti_enabled;
+ bool promisc_enabled;
+};
+
+enum {
+ MLX5E_STATE_ASYNC_EVENTS_ENABLE,
+ MLX5E_STATE_OPENED,
+};
+
+struct mlx5e_vlan_db {
+ unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+ u32 active_vlans_ft_ix[VLAN_N_VID];
+ u32 untagged_rule_ft_ix;
+ u32 any_vlan_rule_ft_ix;
+ bool filter_disabled;
+};
+
+struct mlx5e_flow_table {
+ void *vlan;
+ void *main;
+};
+
+struct mlx5e_priv {
+ /* priv data path fields - start */
+ int order_base_2_num_channels;
+ int queue_mapping_channel_mask;
+ int num_tc;
+ int default_vlan_prio;
+ /* priv data path fields - end */
+
+ unsigned long state;
+ struct mutex state_lock; /* Protects Interface state */
+ struct mlx5_uar cq_uar;
+ u32 pdn;
+ struct mlx5_core_mr mr;
+
+ struct mlx5e_channel **channel;
+ u32 tisn[MLX5E_MAX_NUM_TC];
+ u32 rqtn;
+ u32 tirn[MLX5E_NUM_TT];
+
+ struct mlx5e_flow_table ft;
+ struct mlx5e_eth_addr_db eth_addr;
+ struct mlx5e_vlan_db vlan;
+
+ struct mlx5e_params params;
+ spinlock_t async_events_spinlock; /* sync hw events */
+ struct work_struct update_carrier_work;
+ struct work_struct set_rx_mode_work;
+ struct delayed_work update_stats_work;
+
+ struct mlx5_core_dev *mdev;
+ struct net_device *netdev;
+ struct mlx5e_stats stats;
+};
+
+#define MLX5E_NET_IP_ALIGN 2
+
+struct mlx5e_tx_wqe {
+ struct mlx5_wqe_ctrl_seg ctrl;
+ struct mlx5_wqe_eth_seg eth;
+};
+
+struct mlx5e_rx_wqe {
+ struct mlx5_wqe_srq_next_seg next;
+ struct mlx5_wqe_data_seg data;
+};
+
+enum mlx5e_link_mode {
+ MLX5E_1000BASE_CX_SGMII = 0,
+ MLX5E_1000BASE_KX = 1,
+ MLX5E_10GBASE_CX4 = 2,
+ MLX5E_10GBASE_KX4 = 3,
+ MLX5E_10GBASE_KR = 4,
+ MLX5E_20GBASE_KR2 = 5,
+ MLX5E_40GBASE_CR4 = 6,
+ MLX5E_40GBASE_KR4 = 7,
+ MLX5E_56GBASE_R4 = 8,
+ MLX5E_10GBASE_CR = 12,
+ MLX5E_10GBASE_SR = 13,
+ MLX5E_10GBASE_ER = 14,
+ MLX5E_40GBASE_SR4 = 15,
+ MLX5E_40GBASE_LR4 = 16,
+ MLX5E_100GBASE_CR4 = 20,
+ MLX5E_100GBASE_SR4 = 21,
+ MLX5E_100GBASE_KR4 = 22,
+ MLX5E_100GBASE_LR4 = 23,
+ MLX5E_100BASE_TX = 24,
+ MLX5E_100BASE_T = 25,
+ MLX5E_10GBASE_T = 26,
+ MLX5E_25GBASE_CR = 27,
+ MLX5E_25GBASE_KR = 28,
+ MLX5E_25GBASE_SR = 29,
+ MLX5E_50GBASE_CR2 = 30,
+ MLX5E_50GBASE_KR2 = 31,
+ MLX5E_LINK_MODES_NUMBER,
+};
+
+#define MLX5E_PROT_MASK(link_mode) (1 << link_mode)
+
+u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv, select_queue_fallback_t fallback);
+netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
+netdev_tx_t mlx5e_xmit_multi_tc(struct sk_buff *skb, struct net_device *dev);
+
+void mlx5e_completion_event(struct mlx5_core_cq *mcq);
+void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event);
+int mlx5e_napi_poll(struct napi_struct *napi, int budget);
+bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq);
+bool mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget);
+bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq);
+struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq);
+
+void mlx5e_update_stats(struct mlx5e_priv *priv);
+
+int mlx5e_open_flow_table(struct mlx5e_priv *priv);
+void mlx5e_close_flow_table(struct mlx5e_priv *priv);
+void mlx5e_init_eth_addr(struct mlx5e_priv *priv);
+void mlx5e_set_rx_mode_core(struct mlx5e_priv *priv);
+void mlx5e_set_rx_mode_work(struct work_struct *work);
+
+int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
+ u16 vid);
+int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
+ u16 vid);
+void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv);
+void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv);
+int mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv);
+void mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv);
+
+int mlx5e_open_locked(struct net_device *netdev);
+int mlx5e_close_locked(struct net_device *netdev);
+int mlx5e_update_priv_params(struct mlx5e_priv *priv,
+ struct mlx5e_params *new_params);
+
+static inline void mlx5e_tx_notify_hw(struct mlx5e_sq *sq,
+ struct mlx5e_tx_wqe *wqe)
+{
+ /* ensure wqe is visible to device before updating doorbell record */
+ dma_wmb();
+
+ *sq->wq.db = cpu_to_be32(sq->pc);
+
+ /* ensure doorbell record is visible to device before ringing the
+ * doorbell
+ */
+ wmb();
+
+ mlx5_write64((__be32 *)&wqe->ctrl,
+ sq->uar_map + MLX5_BF_OFFSET + sq->bf_offset,
+ NULL);
+
+ sq->bf_offset ^= sq->bf_buf_size;
+}
+
+static inline void mlx5e_cq_arm(struct mlx5e_cq *cq)
+{
+ struct mlx5_core_cq *mcq;
+
+ mcq = &cq->mcq;
+ mlx5_cq_arm(mcq, MLX5_CQ_DB_REQ_NOT, mcq->uar->map, NULL, cq->wq.cc);
+}
+
+extern const struct ethtool_ops mlx5e_ethtool_ops;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
new file mode 100644
index 0000000..de7aec8
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -0,0 +1,679 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "en.h"
+
+static void mlx5e_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ strlcpy(drvinfo->driver, DRIVER_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, DRIVER_VERSION " (" DRIVER_RELDATE ")",
+ sizeof(drvinfo->version));
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%d.%d.%d",
+ fw_rev_maj(mdev), fw_rev_min(mdev), fw_rev_sub(mdev));
+ strlcpy(drvinfo->bus_info, pci_name(mdev->pdev),
+ sizeof(drvinfo->bus_info));
+}
+
+static const struct {
+ u32 supported;
+ u32 advertised;
+ u32 speed;
+} ptys2ethtool_table[MLX5E_LINK_MODES_NUMBER] = {
+ [MLX5E_1000BASE_CX_SGMII] = {
+ .supported = SUPPORTED_1000baseKX_Full,
+ .advertised = ADVERTISED_1000baseKX_Full,
+ .speed = 1000,
+ },
+ [MLX5E_1000BASE_KX] = {
+ .supported = SUPPORTED_1000baseKX_Full,
+ .advertised = ADVERTISED_1000baseKX_Full,
+ .speed = 1000,
+ },
+ [MLX5E_10GBASE_CX4] = {
+ .supported = SUPPORTED_10000baseKX4_Full,
+ .advertised = ADVERTISED_10000baseKX4_Full,
+ .speed = 10000,
+ },
+ [MLX5E_10GBASE_KX4] = {
+ .supported = SUPPORTED_10000baseKX4_Full,
+ .advertised = ADVERTISED_10000baseKX4_Full,
+ .speed = 10000,
+ },
+ [MLX5E_10GBASE_KR] = {
+ .supported = SUPPORTED_10000baseKR_Full,
+ .advertised = ADVERTISED_10000baseKR_Full,
+ .speed = 10000,
+ },
+ [MLX5E_20GBASE_KR2] = {
+ .supported = SUPPORTED_20000baseKR2_Full,
+ .advertised = ADVERTISED_20000baseKR2_Full,
+ .speed = 20000,
+ },
+ [MLX5E_40GBASE_CR4] = {
+ .supported = SUPPORTED_40000baseCR4_Full,
+ .advertised = ADVERTISED_40000baseCR4_Full,
+ .speed = 40000,
+ },
+ [MLX5E_40GBASE_KR4] = {
+ .supported = SUPPORTED_40000baseKR4_Full,
+ .advertised = ADVERTISED_40000baseKR4_Full,
+ .speed = 40000,
+ },
+ [MLX5E_56GBASE_R4] = {
+ .supported = SUPPORTED_56000baseKR4_Full,
+ .advertised = ADVERTISED_56000baseKR4_Full,
+ .speed = 56000,
+ },
+ [MLX5E_10GBASE_CR] = {
+ .supported = SUPPORTED_10000baseKR_Full,
+ .advertised = ADVERTISED_10000baseKR_Full,
+ .speed = 10000,
+ },
+ [MLX5E_10GBASE_SR] = {
+ .supported = SUPPORTED_10000baseKR_Full,
+ .advertised = ADVERTISED_10000baseKR_Full,
+ .speed = 10000,
+ },
+ [MLX5E_10GBASE_ER] = {
+ .supported = SUPPORTED_10000baseKR_Full,
+ .advertised = ADVERTISED_10000baseKR_Full,
+ .speed = 10000,
+ },
+ [MLX5E_40GBASE_SR4] = {
+ .supported = SUPPORTED_40000baseSR4_Full,
+ .advertised = ADVERTISED_40000baseSR4_Full,
+ .speed = 40000,
+ },
+ [MLX5E_40GBASE_LR4] = {
+ .supported = SUPPORTED_40000baseLR4_Full,
+ .advertised = ADVERTISED_40000baseLR4_Full,
+ .speed = 40000,
+ },
+ [MLX5E_100GBASE_CR4] = {
+ .speed = 100000,
+ },
+ [MLX5E_100GBASE_SR4] = {
+ .speed = 100000,
+ },
+ [MLX5E_100GBASE_KR4] = {
+ .speed = 100000,
+ },
+ [MLX5E_100GBASE_LR4] = {
+ .speed = 100000,
+ },
+ [MLX5E_100BASE_TX] = {
+ .speed = 100,
+ },
+ [MLX5E_100BASE_T] = {
+ .supported = SUPPORTED_100baseT_Full,
+ .advertised = ADVERTISED_100baseT_Full,
+ .speed = 100,
+ },
+ [MLX5E_10GBASE_T] = {
+ .supported = SUPPORTED_10000baseT_Full,
+ .advertised = ADVERTISED_10000baseT_Full,
+ .speed = 1000,
+ },
+ [MLX5E_25GBASE_CR] = {
+ .speed = 25000,
+ },
+ [MLX5E_25GBASE_KR] = {
+ .speed = 25000,
+ },
+ [MLX5E_25GBASE_SR] = {
+ .speed = 25000,
+ },
+ [MLX5E_50GBASE_CR2] = {
+ .speed = 50000,
+ },
+ [MLX5E_50GBASE_KR2] = {
+ .speed = 50000,
+ },
+};
+
+static int mlx5e_get_sset_count(struct net_device *dev, int sset)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ switch (sset) {
+ case ETH_SS_STATS:
+ return NUM_VPORT_COUNTERS +
+ priv->params.num_channels * NUM_RQ_STATS +
+ priv->params.num_channels * priv->num_tc *
+ NUM_SQ_STATS;
+ /* fallthrough */
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void mlx5e_get_strings(struct net_device *dev,
+ uint32_t stringset, uint8_t *data)
+{
+ int i, j, tc, idx = 0;
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ switch (stringset) {
+ case ETH_SS_PRIV_FLAGS:
+ break;
+
+ case ETH_SS_TEST:
+ break;
+
+ case ETH_SS_STATS:
+ /* VPORT counters */
+ for (i = 0; i < NUM_VPORT_COUNTERS; i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ vport_strings[i]);
+
+ /* per channel counters */
+ for (i = 0; i < priv->params.num_channels; i++)
+ for (j = 0; j < NUM_RQ_STATS; j++)
+ sprintf(data + (idx++) * ETH_GSTRING_LEN,
+ "rx%d_%s", i, rq_stats_strings[j]);
+
+ for (i = 0; i < priv->params.num_channels; i++)
+ for (tc = 0; tc < priv->num_tc; tc++)
+ for (j = 0; j < NUM_SQ_STATS; j++)
+ sprintf(data +
+ (idx++) * ETH_GSTRING_LEN,
+ "tx%d_%d_%s", i, tc,
+ sq_stats_strings[j]);
+ break;
+ }
+}
+
+static void mlx5e_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ int i, j, tc, idx = 0;
+
+ if (!data)
+ return;
+
+ mutex_lock(&priv->state_lock);
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ mlx5e_update_stats(priv);
+ mutex_unlock(&priv->state_lock);
+
+ for (i = 0; i < NUM_VPORT_COUNTERS; i++)
+ data[idx++] = ((u64 *)&priv->stats.vport)[i];
+
+ /* per channel counters */
+ for (i = 0; i < priv->params.num_channels; i++)
+ for (j = 0; j < NUM_RQ_STATS; j++)
+ data[idx++] = !test_bit(MLX5E_STATE_OPENED,
+ &priv->state) ? 0 :
+ ((u64 *)&priv->channel[i]->rq.stats)[j];
+
+ for (i = 0; i < priv->params.num_channels; i++)
+ for (tc = 0; tc < priv->num_tc; tc++)
+ for (j = 0; j < NUM_SQ_STATS; j++)
+ data[idx++] = !test_bit(MLX5E_STATE_OPENED,
+ &priv->state) ? 0 :
+ ((u64 *)&priv->channel[i]->sq[tc].stats)[j];
+}
+
+static void mlx5e_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *param)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ param->rx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE;
+ param->tx_max_pending = 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE;
+ param->rx_pending = 1 << priv->params.log_rq_size;
+ param->tx_pending = 1 << priv->params.log_sq_size;
+}
+
+static int mlx5e_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *param)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_params new_params;
+ u16 min_rx_wqes;
+ u8 log_rq_size;
+ u8 log_sq_size;
+ int err = 0;
+
+ if (param->rx_jumbo_pending) {
+ netdev_info(dev, "%s: rx_jumbo_pending not supported\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (param->rx_mini_pending) {
+ netdev_info(dev, "%s: rx_mini_pending not supported\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (param->rx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE)) {
+ netdev_info(dev, "%s: rx_pending (%d) < min (%d)\n",
+ __func__, param->rx_pending,
+ 1 << MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE);
+ return -EINVAL;
+ }
+ if (param->rx_pending > (1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE)) {
+ netdev_info(dev, "%s: rx_pending (%d) > max (%d)\n",
+ __func__, param->rx_pending,
+ 1 << MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE);
+ return -EINVAL;
+ }
+ if (param->tx_pending < (1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE)) {
+ netdev_info(dev, "%s: tx_pending (%d) < min (%d)\n",
+ __func__, param->tx_pending,
+ 1 << MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE);
+ return -EINVAL;
+ }
+ if (param->tx_pending > (1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE)) {
+ netdev_info(dev, "%s: tx_pending (%d) > max (%d)\n",
+ __func__, param->tx_pending,
+ 1 << MLX5E_PARAMS_MAXIMUM_LOG_SQ_SIZE);
+ return -EINVAL;
+ }
+
+ log_rq_size = order_base_2(param->rx_pending);
+ log_sq_size = order_base_2(param->tx_pending);
+ min_rx_wqes = min_t(u16, param->rx_pending - 1,
+ MLX5E_PARAMS_DEFAULT_MIN_RX_WQES);
+
+ if (log_rq_size == priv->params.log_rq_size &&
+ log_sq_size == priv->params.log_sq_size &&
+ min_rx_wqes == priv->params.min_rx_wqes)
+ return 0;
+
+ mutex_lock(&priv->state_lock);
+ new_params = priv->params;
+ new_params.log_rq_size = log_rq_size;
+ new_params.log_sq_size = log_sq_size;
+ new_params.min_rx_wqes = min_rx_wqes;
+ err = mlx5e_update_priv_params(priv, &new_params);
+ mutex_unlock(&priv->state_lock);
+
+ return err;
+}
+
+static void mlx5e_get_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ int ncv = priv->mdev->priv.eq_table.num_comp_vectors;
+
+ ch->max_combined = ncv;
+ ch->combined_count = priv->params.num_channels;
+}
+
+static int mlx5e_set_channels(struct net_device *dev,
+ struct ethtool_channels *ch)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ int ncv = priv->mdev->priv.eq_table.num_comp_vectors;
+ unsigned int count = ch->combined_count;
+ struct mlx5e_params new_params;
+ int err = 0;
+
+ if (!count) {
+ netdev_info(dev, "%s: combined_count=0 not supported\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (ch->rx_count || ch->tx_count) {
+ netdev_info(dev, "%s: separate rx/tx count not supported\n",
+ __func__);
+ return -EINVAL;
+ }
+ if (count > ncv) {
+ netdev_info(dev, "%s: count (%d) > max (%d)\n",
+ __func__, count, ncv);
+ return -EINVAL;
+ }
+
+ if (priv->params.num_channels == count)
+ return 0;
+
+ mutex_lock(&priv->state_lock);
+ new_params = priv->params;
+ new_params.num_channels = count;
+ err = mlx5e_update_priv_params(priv, &new_params);
+ mutex_unlock(&priv->state_lock);
+
+ return err;
+}
+
+static int mlx5e_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *coal)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ coal->rx_coalesce_usecs = priv->params.rx_cq_moderation_usec;
+ coal->rx_max_coalesced_frames = priv->params.rx_cq_moderation_pkts;
+ coal->tx_coalesce_usecs = priv->params.tx_cq_moderation_usec;
+ coal->tx_max_coalesced_frames = priv->params.tx_cq_moderation_pkts;
+
+ return 0;
+}
+
+static int mlx5e_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *coal)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_channel *c;
+ int tc;
+ int i;
+
+ priv->params.tx_cq_moderation_usec = coal->tx_coalesce_usecs;
+ priv->params.tx_cq_moderation_pkts = coal->tx_max_coalesced_frames;
+ priv->params.rx_cq_moderation_usec = coal->rx_coalesce_usecs;
+ priv->params.rx_cq_moderation_pkts = coal->rx_max_coalesced_frames;
+
+ for (i = 0; i < priv->params.num_channels; ++i) {
+ c = priv->channel[i];
+
+ for (tc = 0; tc < c->num_tc; tc++) {
+ mlx5_core_modify_cq_moderation(mdev,
+ &c->sq[tc].cq.mcq,
+ coal->tx_coalesce_usecs,
+ coal->tx_max_coalesced_frames);
+ }
+
+ mlx5_core_modify_cq_moderation(mdev, &c->rq.cq.mcq,
+ coal->rx_coalesce_usecs,
+ coal->rx_max_coalesced_frames);
+ }
+
+ return 0;
+}
+
+static u32 ptys2ethtool_supported_link(u32 eth_proto_cap)
+{
+ int i;
+ u32 supported_modes = 0;
+
+ for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
+ if (eth_proto_cap & MLX5E_PROT_MASK(i))
+ supported_modes |= ptys2ethtool_table[i].supported;
+ }
+ return supported_modes;
+}
+
+static u32 ptys2ethtool_adver_link(u32 eth_proto_cap)
+{
+ int i;
+ u32 advertising_modes = 0;
+
+ for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
+ if (eth_proto_cap & MLX5E_PROT_MASK(i))
+ advertising_modes |= ptys2ethtool_table[i].advertised;
+ }
+ return advertising_modes;
+}
+
+static u32 ptys2ethtool_supported_port(u32 eth_proto_cap)
+{
+ if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_10GBASE_CR)
+ | MLX5E_PROT_MASK(MLX5E_10GBASE_SR)
+ | MLX5E_PROT_MASK(MLX5E_40GBASE_CR4)
+ | MLX5E_PROT_MASK(MLX5E_40GBASE_SR4)
+ | MLX5E_PROT_MASK(MLX5E_100GBASE_SR4)
+ | MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII))) {
+ return SUPPORTED_FIBRE;
+ }
+
+ if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_100GBASE_KR4)
+ | MLX5E_PROT_MASK(MLX5E_40GBASE_KR4)
+ | MLX5E_PROT_MASK(MLX5E_10GBASE_KR)
+ | MLX5E_PROT_MASK(MLX5E_10GBASE_KX4)
+ | MLX5E_PROT_MASK(MLX5E_1000BASE_KX))) {
+ return SUPPORTED_Backplane;
+ }
+ return 0;
+}
+
+static void get_speed_duplex(struct net_device *netdev,
+ u32 eth_proto_oper,
+ struct ethtool_cmd *cmd)
+{
+ int i;
+ u32 speed = SPEED_UNKNOWN;
+ u8 duplex = DUPLEX_UNKNOWN;
+
+ if (!netif_carrier_ok(netdev))
+ goto out;
+
+ for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
+ if (eth_proto_oper & MLX5E_PROT_MASK(i)) {
+ speed = ptys2ethtool_table[i].speed;
+ duplex = DUPLEX_FULL;
+ break;
+ }
+ }
+out:
+ ethtool_cmd_speed_set(cmd, speed);
+ cmd->duplex = duplex;
+}
+
+static void get_supported(u32 eth_proto_cap, u32 *supported)
+{
+ *supported |= ptys2ethtool_supported_port(eth_proto_cap);
+ *supported |= ptys2ethtool_supported_link(eth_proto_cap);
+ *supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+}
+
+static void get_advertising(u32 eth_proto_cap, u8 tx_pause,
+ u8 rx_pause, u32 *advertising)
+{
+ *advertising |= ptys2ethtool_adver_link(eth_proto_cap);
+ *advertising |= tx_pause ? ADVERTISED_Pause : 0;
+ *advertising |= (tx_pause ^ rx_pause) ? ADVERTISED_Asym_Pause : 0;
+}
+
+static u8 get_connector_port(u32 eth_proto)
+{
+ if (eth_proto & (MLX5E_PROT_MASK(MLX5E_10GBASE_SR)
+ | MLX5E_PROT_MASK(MLX5E_40GBASE_SR4)
+ | MLX5E_PROT_MASK(MLX5E_100GBASE_SR4)
+ | MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII))) {
+ return PORT_FIBRE;
+ }
+
+ if (eth_proto & (MLX5E_PROT_MASK(MLX5E_40GBASE_CR4)
+ | MLX5E_PROT_MASK(MLX5E_10GBASE_CR)
+ | MLX5E_PROT_MASK(MLX5E_100GBASE_CR4))) {
+ return PORT_DA;
+ }
+
+ if (eth_proto & (MLX5E_PROT_MASK(MLX5E_10GBASE_KX4)
+ | MLX5E_PROT_MASK(MLX5E_10GBASE_KR)
+ | MLX5E_PROT_MASK(MLX5E_40GBASE_KR4)
+ | MLX5E_PROT_MASK(MLX5E_100GBASE_KR4))) {
+ return PORT_NONE;
+ }
+
+ return PORT_OTHER;
+}
+
+static void get_lp_advertising(u32 eth_proto_lp, u32 *lp_advertising)
+{
+ *lp_advertising = ptys2ethtool_adver_link(eth_proto_lp);
+}
+
+static int mlx5e_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *cmd)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+ u32 eth_proto_cap;
+ u32 eth_proto_admin;
+ u32 eth_proto_lp;
+ u32 eth_proto_oper;
+ int err;
+
+ err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN);
+
+ if (err) {
+ netdev_err(netdev, "%s: query port ptys failed: %d\n",
+ __func__, err);
+ goto err_query_ptys;
+ }
+
+ eth_proto_cap = MLX5_GET(ptys_reg, out, eth_proto_capability);
+ eth_proto_admin = MLX5_GET(ptys_reg, out, eth_proto_admin);
+ eth_proto_oper = MLX5_GET(ptys_reg, out, eth_proto_oper);
+ eth_proto_lp = MLX5_GET(ptys_reg, out, eth_proto_lp_advertise);
+
+ cmd->supported = 0;
+ cmd->advertising = 0;
+
+ get_supported(eth_proto_cap, &cmd->supported);
+ get_advertising(eth_proto_admin, 0, 0, &cmd->advertising);
+ get_speed_duplex(netdev, eth_proto_oper, cmd);
+
+ eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
+
+ cmd->port = get_connector_port(eth_proto_oper);
+ get_lp_advertising(eth_proto_lp, &cmd->lp_advertising);
+
+ cmd->transceiver = XCVR_INTERNAL;
+
+err_query_ptys:
+ return err;
+}
+
+static u32 mlx5e_ethtool2ptys_adver_link(u32 link_modes)
+{
+ u32 i, ptys_modes = 0;
+
+ for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
+ if (ptys2ethtool_table[i].advertised & link_modes)
+ ptys_modes |= MLX5E_PROT_MASK(i);
+ }
+
+ return ptys_modes;
+}
+
+static u32 mlx5e_ethtool2ptys_speed_link(u32 speed)
+{
+ u32 i, speed_links = 0;
+
+ for (i = 0; i < MLX5E_LINK_MODES_NUMBER; ++i) {
+ if (ptys2ethtool_table[i].speed == speed)
+ speed_links |= MLX5E_PROT_MASK(i);
+ }
+
+ return speed_links;
+}
+
+static int mlx5e_set_settings(struct net_device *netdev,
+ struct ethtool_cmd *cmd)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 link_modes;
+ u32 speed;
+ u32 eth_proto_cap, eth_proto_admin;
+ u8 port_status;
+ int err;
+
+ speed = ethtool_cmd_speed(cmd);
+
+ link_modes = cmd->autoneg == AUTONEG_ENABLE ?
+ mlx5e_ethtool2ptys_adver_link(cmd->advertising) :
+ mlx5e_ethtool2ptys_speed_link(speed);
+
+ err = mlx5_query_port_proto_cap(mdev, &eth_proto_cap, MLX5_PTYS_EN);
+ if (err) {
+ netdev_err(netdev, "%s: query port eth proto cap failed: %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ link_modes = link_modes & eth_proto_cap;
+ if (!link_modes) {
+ netdev_err(netdev, "%s: Not supported link mode(s) requested",
+ __func__);
+ err = -EINVAL;
+ goto out;
+ }
+
+ err = mlx5_query_port_proto_admin(mdev, &eth_proto_admin, MLX5_PTYS_EN);
+ if (err) {
+ netdev_err(netdev, "%s: query port eth proto admin failed: %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ if (link_modes == eth_proto_admin)
+ goto out;
+
+ err = mlx5_set_port_proto(mdev, link_modes, MLX5_PTYS_EN);
+ if (err) {
+ netdev_err(netdev, "%s: set port eth proto admin failed: %d\n",
+ __func__, err);
+ goto out;
+ }
+
+ err = mlx5_query_port_status(mdev, &port_status);
+ if (err)
+ goto out;
+
+ if (port_status == MLX5_PORT_DOWN)
+ return 0;
+
+ err = mlx5_set_port_status(mdev, MLX5_PORT_DOWN);
+ if (err)
+ goto out;
+ err = mlx5_set_port_status(mdev, MLX5_PORT_UP);
+out:
+ return err;
+}
+
+const struct ethtool_ops mlx5e_ethtool_ops = {
+ .get_drvinfo = mlx5e_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_strings = mlx5e_get_strings,
+ .get_sset_count = mlx5e_get_sset_count,
+ .get_ethtool_stats = mlx5e_get_ethtool_stats,
+ .get_ringparam = mlx5e_get_ringparam,
+ .set_ringparam = mlx5e_set_ringparam,
+ .get_channels = mlx5e_get_channels,
+ .set_channels = mlx5e_set_channels,
+ .get_coalesce = mlx5e_get_coalesce,
+ .set_coalesce = mlx5e_set_coalesce,
+ .get_settings = mlx5e_get_settings,
+ .set_settings = mlx5e_set_settings,
+};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c
new file mode 100644
index 0000000..6feebda
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_flow_table.c
@@ -0,0 +1,858 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/list.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include <linux/mlx5/flow_table.h>
+#include "en.h"
+
+enum {
+ MLX5E_FULLMATCH = 0,
+ MLX5E_ALLMULTI = 1,
+ MLX5E_PROMISC = 2,
+};
+
+enum {
+ MLX5E_UC = 0,
+ MLX5E_MC_IPV4 = 1,
+ MLX5E_MC_IPV6 = 2,
+ MLX5E_MC_OTHER = 3,
+};
+
+enum {
+ MLX5E_ACTION_NONE = 0,
+ MLX5E_ACTION_ADD = 1,
+ MLX5E_ACTION_DEL = 2,
+};
+
+struct mlx5e_eth_addr_hash_node {
+ struct hlist_node hlist;
+ u8 action;
+ struct mlx5e_eth_addr_info ai;
+};
+
+static inline int mlx5e_hash_eth_addr(u8 *addr)
+{
+ return addr[5];
+}
+
+static void mlx5e_add_eth_addr_to_hash(struct hlist_head *hash, u8 *addr)
+{
+ struct mlx5e_eth_addr_hash_node *hn;
+ int ix = mlx5e_hash_eth_addr(addr);
+ int found = 0;
+
+ hlist_for_each_entry(hn, &hash[ix], hlist)
+ if (ether_addr_equal_64bits(hn->ai.addr, addr)) {
+ found = 1;
+ break;
+ }
+
+ if (found) {
+ hn->action = MLX5E_ACTION_NONE;
+ return;
+ }
+
+ hn = kzalloc(sizeof(*hn), GFP_ATOMIC);
+ if (!hn)
+ return;
+
+ ether_addr_copy(hn->ai.addr, addr);
+ hn->action = MLX5E_ACTION_ADD;
+
+ hlist_add_head(&hn->hlist, &hash[ix]);
+}
+
+static void mlx5e_del_eth_addr_from_hash(struct mlx5e_eth_addr_hash_node *hn)
+{
+ hlist_del(&hn->hlist);
+ kfree(hn);
+}
+
+static void mlx5e_del_eth_addr_from_flow_table(struct mlx5e_priv *priv,
+ struct mlx5e_eth_addr_info *ai)
+{
+ void *ft = priv->ft.main;
+
+ if (ai->tt_vec & (1 << MLX5E_TT_IPV6_TCP))
+ mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_TCP]);
+
+ if (ai->tt_vec & (1 << MLX5E_TT_IPV4_TCP))
+ mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_TCP]);
+
+ if (ai->tt_vec & (1 << MLX5E_TT_IPV6_UDP))
+ mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6_UDP]);
+
+ if (ai->tt_vec & (1 << MLX5E_TT_IPV4_UDP))
+ mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4_UDP]);
+
+ if (ai->tt_vec & (1 << MLX5E_TT_IPV6))
+ mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV6]);
+
+ if (ai->tt_vec & (1 << MLX5E_TT_IPV4))
+ mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_IPV4]);
+
+ if (ai->tt_vec & (1 << MLX5E_TT_ANY))
+ mlx5_del_flow_table_entry(ft, ai->ft_ix[MLX5E_TT_ANY]);
+}
+
+static int mlx5e_get_eth_addr_type(u8 *addr)
+{
+ if (is_unicast_ether_addr(addr))
+ return MLX5E_UC;
+
+ if ((addr[0] == 0x01) &&
+ (addr[1] == 0x00) &&
+ (addr[2] == 0x5e) &&
+ !(addr[3] & 0x80))
+ return MLX5E_MC_IPV4;
+
+ if ((addr[0] == 0x33) &&
+ (addr[1] == 0x33))
+ return MLX5E_MC_IPV6;
+
+ return MLX5E_MC_OTHER;
+}
+
+static u32 mlx5e_get_tt_vec(struct mlx5e_eth_addr_info *ai, int type)
+{
+ int eth_addr_type;
+ u32 ret;
+
+ switch (type) {
+ case MLX5E_FULLMATCH:
+ eth_addr_type = mlx5e_get_eth_addr_type(ai->addr);
+ switch (eth_addr_type) {
+ case MLX5E_UC:
+ ret =
+ (1 << MLX5E_TT_IPV4_TCP) |
+ (1 << MLX5E_TT_IPV6_TCP) |
+ (1 << MLX5E_TT_IPV4_UDP) |
+ (1 << MLX5E_TT_IPV6_UDP) |
+ (1 << MLX5E_TT_IPV4) |
+ (1 << MLX5E_TT_IPV6) |
+ (1 << MLX5E_TT_ANY) |
+ 0;
+ break;
+
+ case MLX5E_MC_IPV4:
+ ret =
+ (1 << MLX5E_TT_IPV4_UDP) |
+ (1 << MLX5E_TT_IPV4) |
+ 0;
+ break;
+
+ case MLX5E_MC_IPV6:
+ ret =
+ (1 << MLX5E_TT_IPV6_UDP) |
+ (1 << MLX5E_TT_IPV6) |
+ 0;
+ break;
+
+ case MLX5E_MC_OTHER:
+ ret =
+ (1 << MLX5E_TT_ANY) |
+ 0;
+ break;
+ }
+
+ break;
+
+ case MLX5E_ALLMULTI:
+ ret =
+ (1 << MLX5E_TT_IPV4_UDP) |
+ (1 << MLX5E_TT_IPV6_UDP) |
+ (1 << MLX5E_TT_IPV4) |
+ (1 << MLX5E_TT_IPV6) |
+ (1 << MLX5E_TT_ANY) |
+ 0;
+ break;
+
+ default: /* MLX5E_PROMISC */
+ ret =
+ (1 << MLX5E_TT_IPV4_TCP) |
+ (1 << MLX5E_TT_IPV6_TCP) |
+ (1 << MLX5E_TT_IPV4_UDP) |
+ (1 << MLX5E_TT_IPV6_UDP) |
+ (1 << MLX5E_TT_IPV4) |
+ (1 << MLX5E_TT_IPV6) |
+ (1 << MLX5E_TT_ANY) |
+ 0;
+ break;
+ }
+
+ return ret;
+}
+
+static int __mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
+ struct mlx5e_eth_addr_info *ai, int type,
+ void *flow_context, void *match_criteria)
+{
+ u8 match_criteria_enable = 0;
+ void *match_value;
+ void *dest;
+ u8 *dmac;
+ u8 *match_criteria_dmac;
+ void *ft = priv->ft.main;
+ u32 *tirn = priv->tirn;
+ u32 tt_vec;
+ int err;
+
+ match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
+ dmac = MLX5_ADDR_OF(fte_match_param, match_value,
+ outer_headers.dmac_47_16);
+ match_criteria_dmac = MLX5_ADDR_OF(fte_match_param, match_criteria,
+ outer_headers.dmac_47_16);
+ dest = MLX5_ADDR_OF(flow_context, flow_context, destination);
+
+ MLX5_SET(flow_context, flow_context, action,
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
+ MLX5_SET(flow_context, flow_context, destination_list_size, 1);
+ MLX5_SET(dest_format_struct, dest, destination_type,
+ MLX5_FLOW_CONTEXT_DEST_TYPE_TIR);
+
+ switch (type) {
+ case MLX5E_FULLMATCH:
+ match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ memset(match_criteria_dmac, 0xff, ETH_ALEN);
+ ether_addr_copy(dmac, ai->addr);
+ break;
+
+ case MLX5E_ALLMULTI:
+ match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ match_criteria_dmac[0] = 0x01;
+ dmac[0] = 0x01;
+ break;
+
+ case MLX5E_PROMISC:
+ break;
+ }
+
+ tt_vec = mlx5e_get_tt_vec(ai, type);
+
+ if (tt_vec & (1 << MLX5E_TT_ANY)) {
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ tirn[MLX5E_TT_ANY]);
+ err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+ match_criteria, flow_context,
+ &ai->ft_ix[MLX5E_TT_ANY]);
+ if (err) {
+ mlx5e_del_eth_addr_from_flow_table(priv, ai);
+ return err;
+ }
+ ai->tt_vec |= (1 << MLX5E_TT_ANY);
+ }
+
+ match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ outer_headers.ethertype);
+
+ if (tt_vec & (1 << MLX5E_TT_IPV4)) {
+ MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ ETH_P_IP);
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ tirn[MLX5E_TT_IPV4]);
+ err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+ match_criteria, flow_context,
+ &ai->ft_ix[MLX5E_TT_IPV4]);
+ if (err) {
+ mlx5e_del_eth_addr_from_flow_table(priv, ai);
+ return err;
+ }
+ ai->tt_vec |= (1 << MLX5E_TT_IPV4);
+ }
+
+ if (tt_vec & (1 << MLX5E_TT_IPV6)) {
+ MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ ETH_P_IPV6);
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ tirn[MLX5E_TT_IPV6]);
+ err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+ match_criteria, flow_context,
+ &ai->ft_ix[MLX5E_TT_IPV6]);
+ if (err) {
+ mlx5e_del_eth_addr_from_flow_table(priv, ai);
+ return err;
+ }
+ ai->tt_vec |= (1 << MLX5E_TT_IPV6);
+ }
+
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ outer_headers.ip_protocol);
+ MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
+ IPPROTO_UDP);
+
+ if (tt_vec & (1 << MLX5E_TT_IPV4_UDP)) {
+ MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ ETH_P_IP);
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ tirn[MLX5E_TT_IPV4_UDP]);
+ err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+ match_criteria, flow_context,
+ &ai->ft_ix[MLX5E_TT_IPV4_UDP]);
+ if (err) {
+ mlx5e_del_eth_addr_from_flow_table(priv, ai);
+ return err;
+ }
+ ai->tt_vec |= (1 << MLX5E_TT_IPV4_UDP);
+ }
+
+ if (tt_vec & (1 << MLX5E_TT_IPV6_UDP)) {
+ MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ ETH_P_IPV6);
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ tirn[MLX5E_TT_IPV6_UDP]);
+ err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+ match_criteria, flow_context,
+ &ai->ft_ix[MLX5E_TT_IPV6_UDP]);
+ if (err) {
+ mlx5e_del_eth_addr_from_flow_table(priv, ai);
+ return err;
+ }
+ ai->tt_vec |= (1 << MLX5E_TT_IPV6_UDP);
+ }
+
+ MLX5_SET(fte_match_param, match_value, outer_headers.ip_protocol,
+ IPPROTO_TCP);
+
+ if (tt_vec & (1 << MLX5E_TT_IPV4_TCP)) {
+ MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ ETH_P_IP);
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ tirn[MLX5E_TT_IPV4_TCP]);
+ err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+ match_criteria, flow_context,
+ &ai->ft_ix[MLX5E_TT_IPV4_TCP]);
+ if (err) {
+ mlx5e_del_eth_addr_from_flow_table(priv, ai);
+ return err;
+ }
+ ai->tt_vec |= (1 << MLX5E_TT_IPV4_TCP);
+ }
+
+ if (tt_vec & (1 << MLX5E_TT_IPV6_TCP)) {
+ MLX5_SET(fte_match_param, match_value, outer_headers.ethertype,
+ ETH_P_IPV6);
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ tirn[MLX5E_TT_IPV6_TCP]);
+ err = mlx5_add_flow_table_entry(ft, match_criteria_enable,
+ match_criteria, flow_context,
+ &ai->ft_ix[MLX5E_TT_IPV6_TCP]);
+ if (err) {
+ mlx5e_del_eth_addr_from_flow_table(priv, ai);
+ return err;
+ }
+ ai->tt_vec |= (1 << MLX5E_TT_IPV6_TCP);
+ }
+
+ return 0;
+}
+
+static int mlx5e_add_eth_addr_rule(struct mlx5e_priv *priv,
+ struct mlx5e_eth_addr_info *ai, int type)
+{
+ u32 *flow_context;
+ u32 *match_criteria;
+ int err;
+
+ flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) +
+ MLX5_ST_SZ_BYTES(dest_format_struct));
+ match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+ if (!flow_context || !match_criteria) {
+ netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
+ err = -ENOMEM;
+ goto add_eth_addr_rule_out;
+ }
+
+ err = __mlx5e_add_eth_addr_rule(priv, ai, type, flow_context,
+ match_criteria);
+ if (err)
+ netdev_err(priv->netdev, "%s: failed\n", __func__);
+
+add_eth_addr_rule_out:
+ kvfree(match_criteria);
+ kvfree(flow_context);
+ return err;
+}
+
+enum mlx5e_vlan_rule_type {
+ MLX5E_VLAN_RULE_TYPE_UNTAGGED,
+ MLX5E_VLAN_RULE_TYPE_ANY_VID,
+ MLX5E_VLAN_RULE_TYPE_MATCH_VID,
+};
+
+static int mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
+ enum mlx5e_vlan_rule_type rule_type, u16 vid)
+{
+ u8 match_criteria_enable = 0;
+ u32 *flow_context;
+ void *match_value;
+ void *dest;
+ u32 *match_criteria;
+ u32 *ft_ix;
+ int err;
+
+ flow_context = mlx5_vzalloc(MLX5_ST_SZ_BYTES(flow_context) +
+ MLX5_ST_SZ_BYTES(dest_format_struct));
+ match_criteria = mlx5_vzalloc(MLX5_ST_SZ_BYTES(fte_match_param));
+ if (!flow_context || !match_criteria) {
+ netdev_err(priv->netdev, "%s: alloc failed\n", __func__);
+ err = -ENOMEM;
+ goto add_vlan_rule_out;
+ }
+ match_value = MLX5_ADDR_OF(flow_context, flow_context, match_value);
+ dest = MLX5_ADDR_OF(flow_context, flow_context, destination);
+
+ MLX5_SET(flow_context, flow_context, action,
+ MLX5_FLOW_CONTEXT_ACTION_FWD_DEST);
+ MLX5_SET(flow_context, flow_context, destination_list_size, 1);
+ MLX5_SET(dest_format_struct, dest, destination_type,
+ MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE);
+ MLX5_SET(dest_format_struct, dest, destination_id,
+ mlx5_get_flow_table_id(priv->ft.main));
+
+ match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ outer_headers.vlan_tag);
+
+ switch (rule_type) {
+ case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
+ ft_ix = &priv->vlan.untagged_rule_ft_ix;
+ break;
+ case MLX5E_VLAN_RULE_TYPE_ANY_VID:
+ ft_ix = &priv->vlan.any_vlan_rule_ft_ix;
+ MLX5_SET(fte_match_param, match_value, outer_headers.vlan_tag,
+ 1);
+ break;
+ default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
+ ft_ix = &priv->vlan.active_vlans_ft_ix[vid];
+ MLX5_SET(fte_match_param, match_value, outer_headers.vlan_tag,
+ 1);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria,
+ outer_headers.first_vid);
+ MLX5_SET(fte_match_param, match_value, outer_headers.first_vid,
+ vid);
+ break;
+ }
+
+ err = mlx5_add_flow_table_entry(priv->ft.vlan, match_criteria_enable,
+ match_criteria, flow_context, ft_ix);
+ if (err)
+ netdev_err(priv->netdev, "%s: failed\n", __func__);
+
+add_vlan_rule_out:
+ kvfree(match_criteria);
+ kvfree(flow_context);
+ return err;
+}
+
+static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
+ enum mlx5e_vlan_rule_type rule_type, u16 vid)
+{
+ switch (rule_type) {
+ case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
+ mlx5_del_flow_table_entry(priv->ft.vlan,
+ priv->vlan.untagged_rule_ft_ix);
+ break;
+ case MLX5E_VLAN_RULE_TYPE_ANY_VID:
+ mlx5_del_flow_table_entry(priv->ft.vlan,
+ priv->vlan.any_vlan_rule_ft_ix);
+ break;
+ case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
+ mlx5_del_flow_table_entry(priv->ft.vlan,
+ priv->vlan.active_vlans_ft_ix[vid]);
+ break;
+ }
+}
+
+void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
+{
+ WARN_ON(!mutex_is_locked(&priv->state_lock));
+
+ if (priv->vlan.filter_disabled) {
+ priv->vlan.filter_disabled = false;
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
+ 0);
+ }
+}
+
+void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
+{
+ WARN_ON(!mutex_is_locked(&priv->state_lock));
+
+ if (!priv->vlan.filter_disabled) {
+ priv->vlan.filter_disabled = true;
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
+ 0);
+ }
+}
+
+int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
+ u16 vid)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ int err = 0;
+
+ mutex_lock(&priv->state_lock);
+
+ set_bit(vid, priv->vlan.active_vlans);
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID,
+ vid);
+
+ mutex_unlock(&priv->state_lock);
+
+ return err;
+}
+
+int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto,
+ u16 vid)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ mutex_lock(&priv->state_lock);
+
+ clear_bit(vid, priv->vlan.active_vlans);
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
+
+ mutex_unlock(&priv->state_lock);
+
+ return 0;
+}
+
+int mlx5e_add_all_vlan_rules(struct mlx5e_priv *priv)
+{
+ u16 vid;
+ int err;
+
+ for_each_set_bit(vid, priv->vlan.active_vlans, VLAN_N_VID) {
+ err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID,
+ vid);
+ if (err)
+ return err;
+ }
+
+ err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
+ if (err)
+ return err;
+
+ if (priv->vlan.filter_disabled) {
+ err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
+ 0);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+void mlx5e_del_all_vlan_rules(struct mlx5e_priv *priv)
+{
+ u16 vid;
+
+ if (priv->vlan.filter_disabled)
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
+
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_UNTAGGED, 0);
+
+ for_each_set_bit(vid, priv->vlan.active_vlans, VLAN_N_VID)
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_MATCH_VID, vid);
+}
+
+#define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
+ for (i = 0; i < MLX5E_ETH_ADDR_HASH_SIZE; i++) \
+ hlist_for_each_entry_safe(hn, tmp, &hash[i], hlist)
+
+static void mlx5e_execute_action(struct mlx5e_priv *priv,
+ struct mlx5e_eth_addr_hash_node *hn)
+{
+ switch (hn->action) {
+ case MLX5E_ACTION_ADD:
+ mlx5e_add_eth_addr_rule(priv, &hn->ai, MLX5E_FULLMATCH);
+ hn->action = MLX5E_ACTION_NONE;
+ break;
+
+ case MLX5E_ACTION_DEL:
+ mlx5e_del_eth_addr_from_flow_table(priv, &hn->ai);
+ mlx5e_del_eth_addr_from_hash(hn);
+ break;
+ }
+}
+
+static void mlx5e_sync_netdev_addr(struct mlx5e_priv *priv)
+{
+ struct net_device *netdev = priv->netdev;
+ struct netdev_hw_addr *ha;
+
+ netif_addr_lock_bh(netdev);
+
+ mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc,
+ priv->netdev->dev_addr);
+
+ netdev_for_each_uc_addr(ha, netdev)
+ mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_uc, ha->addr);
+
+ netdev_for_each_mc_addr(ha, netdev)
+ mlx5e_add_eth_addr_to_hash(priv->eth_addr.netdev_mc, ha->addr);
+
+ netif_addr_unlock_bh(netdev);
+}
+
+static void mlx5e_apply_netdev_addr(struct mlx5e_priv *priv)
+{
+ struct mlx5e_eth_addr_hash_node *hn;
+ struct hlist_node *tmp;
+ int i;
+
+ mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i)
+ mlx5e_execute_action(priv, hn);
+
+ mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i)
+ mlx5e_execute_action(priv, hn);
+}
+
+static void mlx5e_handle_netdev_addr(struct mlx5e_priv *priv)
+{
+ struct mlx5e_eth_addr_hash_node *hn;
+ struct hlist_node *tmp;
+ int i;
+
+ mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_uc, i)
+ hn->action = MLX5E_ACTION_DEL;
+ mlx5e_for_each_hash_node(hn, tmp, priv->eth_addr.netdev_mc, i)
+ hn->action = MLX5E_ACTION_DEL;
+
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ mlx5e_sync_netdev_addr(priv);
+
+ mlx5e_apply_netdev_addr(priv);
+}
+
+void mlx5e_set_rx_mode_core(struct mlx5e_priv *priv)
+{
+ struct mlx5e_eth_addr_db *ea = &priv->eth_addr;
+ struct net_device *ndev = priv->netdev;
+
+ bool rx_mode_enable = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ bool promisc_enabled = rx_mode_enable && (ndev->flags & IFF_PROMISC);
+ bool allmulti_enabled = rx_mode_enable && (ndev->flags & IFF_ALLMULTI);
+ bool broadcast_enabled = rx_mode_enable;
+
+ bool enable_promisc = !ea->promisc_enabled && promisc_enabled;
+ bool disable_promisc = ea->promisc_enabled && !promisc_enabled;
+ bool enable_allmulti = !ea->allmulti_enabled && allmulti_enabled;
+ bool disable_allmulti = ea->allmulti_enabled && !allmulti_enabled;
+ bool enable_broadcast = !ea->broadcast_enabled && broadcast_enabled;
+ bool disable_broadcast = ea->broadcast_enabled && !broadcast_enabled;
+
+ if (enable_promisc)
+ mlx5e_add_eth_addr_rule(priv, &ea->promisc, MLX5E_PROMISC);
+ if (enable_allmulti)
+ mlx5e_add_eth_addr_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
+ if (enable_broadcast)
+ mlx5e_add_eth_addr_rule(priv, &ea->broadcast, MLX5E_FULLMATCH);
+
+ mlx5e_handle_netdev_addr(priv);
+
+ if (disable_broadcast)
+ mlx5e_del_eth_addr_from_flow_table(priv, &ea->broadcast);
+ if (disable_allmulti)
+ mlx5e_del_eth_addr_from_flow_table(priv, &ea->allmulti);
+ if (disable_promisc)
+ mlx5e_del_eth_addr_from_flow_table(priv, &ea->promisc);
+
+ ea->promisc_enabled = promisc_enabled;
+ ea->allmulti_enabled = allmulti_enabled;
+ ea->broadcast_enabled = broadcast_enabled;
+}
+
+void mlx5e_set_rx_mode_work(struct work_struct *work)
+{
+ struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
+ set_rx_mode_work);
+
+ mutex_lock(&priv->state_lock);
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ mlx5e_set_rx_mode_core(priv);
+ mutex_unlock(&priv->state_lock);
+}
+
+void mlx5e_init_eth_addr(struct mlx5e_priv *priv)
+{
+ ether_addr_copy(priv->eth_addr.broadcast.addr, priv->netdev->broadcast);
+}
+
+static int mlx5e_create_main_flow_table(struct mlx5e_priv *priv)
+{
+ struct mlx5_flow_table_group *g;
+ u8 *dmac;
+
+ g = kcalloc(9, sizeof(*g), GFP_KERNEL);
+
+ g[0].log_sz = 2;
+ g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
+ outer_headers.ethertype);
+ MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
+ outer_headers.ip_protocol);
+
+ g[1].log_sz = 1;
+ g[1].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, g[1].match_criteria,
+ outer_headers.ethertype);
+
+ g[2].log_sz = 0;
+
+ g[3].log_sz = 14;
+ g[3].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ dmac = MLX5_ADDR_OF(fte_match_param, g[3].match_criteria,
+ outer_headers.dmac_47_16);
+ memset(dmac, 0xff, ETH_ALEN);
+ MLX5_SET_TO_ONES(fte_match_param, g[3].match_criteria,
+ outer_headers.ethertype);
+ MLX5_SET_TO_ONES(fte_match_param, g[3].match_criteria,
+ outer_headers.ip_protocol);
+
+ g[4].log_sz = 13;
+ g[4].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ dmac = MLX5_ADDR_OF(fte_match_param, g[4].match_criteria,
+ outer_headers.dmac_47_16);
+ memset(dmac, 0xff, ETH_ALEN);
+ MLX5_SET_TO_ONES(fte_match_param, g[4].match_criteria,
+ outer_headers.ethertype);
+
+ g[5].log_sz = 11;
+ g[5].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ dmac = MLX5_ADDR_OF(fte_match_param, g[5].match_criteria,
+ outer_headers.dmac_47_16);
+ memset(dmac, 0xff, ETH_ALEN);
+
+ g[6].log_sz = 2;
+ g[6].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ dmac = MLX5_ADDR_OF(fte_match_param, g[6].match_criteria,
+ outer_headers.dmac_47_16);
+ dmac[0] = 0x01;
+ MLX5_SET_TO_ONES(fte_match_param, g[6].match_criteria,
+ outer_headers.ethertype);
+ MLX5_SET_TO_ONES(fte_match_param, g[6].match_criteria,
+ outer_headers.ip_protocol);
+
+ g[7].log_sz = 1;
+ g[7].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ dmac = MLX5_ADDR_OF(fte_match_param, g[7].match_criteria,
+ outer_headers.dmac_47_16);
+ dmac[0] = 0x01;
+ MLX5_SET_TO_ONES(fte_match_param, g[7].match_criteria,
+ outer_headers.ethertype);
+
+ g[8].log_sz = 0;
+ g[8].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ dmac = MLX5_ADDR_OF(fte_match_param, g[8].match_criteria,
+ outer_headers.dmac_47_16);
+ dmac[0] = 0x01;
+ priv->ft.main = mlx5_create_flow_table(priv->mdev, 1,
+ MLX5_FLOW_TABLE_TYPE_NIC_RCV,
+ 9, g);
+ kfree(g);
+
+ return priv->ft.main ? 0 : -ENOMEM;
+}
+
+static void mlx5e_destroy_main_flow_table(struct mlx5e_priv *priv)
+{
+ mlx5_destroy_flow_table(priv->ft.main);
+}
+
+static int mlx5e_create_vlan_flow_table(struct mlx5e_priv *priv)
+{
+ struct mlx5_flow_table_group *g;
+
+ g = kcalloc(2, sizeof(*g), GFP_KERNEL);
+ if (!g)
+ return -ENOMEM;
+
+ g[0].log_sz = 12;
+ g[0].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
+ outer_headers.vlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, g[0].match_criteria,
+ outer_headers.first_vid);
+
+ /* untagged + any vlan id */
+ g[1].log_sz = 1;
+ g[1].match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
+ MLX5_SET_TO_ONES(fte_match_param, g[1].match_criteria,
+ outer_headers.vlan_tag);
+
+ priv->ft.vlan = mlx5_create_flow_table(priv->mdev, 0,
+ MLX5_FLOW_TABLE_TYPE_NIC_RCV,
+ 2, g);
+
+ kfree(g);
+ return priv->ft.vlan ? 0 : -ENOMEM;
+}
+
+static void mlx5e_destroy_vlan_flow_table(struct mlx5e_priv *priv)
+{
+ mlx5_destroy_flow_table(priv->ft.vlan);
+}
+
+int mlx5e_open_flow_table(struct mlx5e_priv *priv)
+{
+ int err;
+
+ err = mlx5e_create_main_flow_table(priv);
+ if (err)
+ return err;
+
+ err = mlx5e_create_vlan_flow_table(priv);
+ if (err)
+ goto err_destroy_main_flow_table;
+
+ return 0;
+
+err_destroy_main_flow_table:
+ mlx5e_destroy_main_flow_table(priv);
+
+ return err;
+}
+
+void mlx5e_close_flow_table(struct mlx5e_priv *priv)
+{
+ mlx5e_destroy_vlan_flow_table(priv);
+ mlx5e_destroy_main_flow_table(priv);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
new file mode 100644
index 0000000..eee829d
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -0,0 +1,1899 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mlx5/flow_table.h>
+#include "en.h"
+
+struct mlx5e_rq_param {
+ u32 rqc[MLX5_ST_SZ_DW(rqc)];
+ struct mlx5_wq_param wq;
+};
+
+struct mlx5e_sq_param {
+ u32 sqc[MLX5_ST_SZ_DW(sqc)];
+ struct mlx5_wq_param wq;
+};
+
+struct mlx5e_cq_param {
+ u32 cqc[MLX5_ST_SZ_DW(cqc)];
+ struct mlx5_wq_param wq;
+ u16 eq_ix;
+};
+
+struct mlx5e_channel_param {
+ struct mlx5e_rq_param rq;
+ struct mlx5e_sq_param sq;
+ struct mlx5e_cq_param rx_cq;
+ struct mlx5e_cq_param tx_cq;
+};
+
+static void mlx5e_update_carrier(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u8 port_state;
+
+ port_state = mlx5_query_vport_state(mdev,
+ MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT);
+
+ if (port_state == VPORT_STATE_UP)
+ netif_carrier_on(priv->netdev);
+ else
+ netif_carrier_off(priv->netdev);
+}
+
+static void mlx5e_update_carrier_work(struct work_struct *work)
+{
+ struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
+ update_carrier_work);
+
+ mutex_lock(&priv->state_lock);
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ mlx5e_update_carrier(priv);
+ mutex_unlock(&priv->state_lock);
+}
+
+void mlx5e_update_stats(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5e_vport_stats *s = &priv->stats.vport;
+ struct mlx5e_rq_stats *rq_stats;
+ struct mlx5e_sq_stats *sq_stats;
+ u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)];
+ u32 *out;
+ int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
+ u64 tx_offload_none;
+ int i, j;
+
+ out = mlx5_vzalloc(outlen);
+ if (!out)
+ return;
+
+ /* Collect firts the SW counters and then HW for consistency */
+ s->tso_packets = 0;
+ s->tso_bytes = 0;
+ s->tx_queue_stopped = 0;
+ s->tx_queue_wake = 0;
+ s->tx_queue_dropped = 0;
+ tx_offload_none = 0;
+ s->lro_packets = 0;
+ s->lro_bytes = 0;
+ s->rx_csum_none = 0;
+ s->rx_wqe_err = 0;
+ for (i = 0; i < priv->params.num_channels; i++) {
+ rq_stats = &priv->channel[i]->rq.stats;
+
+ s->lro_packets += rq_stats->lro_packets;
+ s->lro_bytes += rq_stats->lro_bytes;
+ s->rx_csum_none += rq_stats->csum_none;
+ s->rx_wqe_err += rq_stats->wqe_err;
+
+ for (j = 0; j < priv->num_tc; j++) {
+ sq_stats = &priv->channel[i]->sq[j].stats;
+
+ s->tso_packets += sq_stats->tso_packets;
+ s->tso_bytes += sq_stats->tso_bytes;
+ s->tx_queue_stopped += sq_stats->stopped;
+ s->tx_queue_wake += sq_stats->wake;
+ s->tx_queue_dropped += sq_stats->dropped;
+ tx_offload_none += sq_stats->csum_offload_none;
+ }
+ }
+
+ /* HW counters */
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(query_vport_counter_in, in, opcode,
+ MLX5_CMD_OP_QUERY_VPORT_COUNTER);
+ MLX5_SET(query_vport_counter_in, in, op_mod, 0);
+ MLX5_SET(query_vport_counter_in, in, other_vport, 0);
+
+ memset(out, 0, outlen);
+
+ if (mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen))
+ goto free_out;
+
+#define MLX5_GET_CTR(p, x) \
+ MLX5_GET64(query_vport_counter_out, p, x)
+
+ s->rx_error_packets =
+ MLX5_GET_CTR(out, received_errors.packets);
+ s->rx_error_bytes =
+ MLX5_GET_CTR(out, received_errors.octets);
+ s->tx_error_packets =
+ MLX5_GET_CTR(out, transmit_errors.packets);
+ s->tx_error_bytes =
+ MLX5_GET_CTR(out, transmit_errors.octets);
+
+ s->rx_unicast_packets =
+ MLX5_GET_CTR(out, received_eth_unicast.packets);
+ s->rx_unicast_bytes =
+ MLX5_GET_CTR(out, received_eth_unicast.octets);
+ s->tx_unicast_packets =
+ MLX5_GET_CTR(out, transmitted_eth_unicast.packets);
+ s->tx_unicast_bytes =
+ MLX5_GET_CTR(out, transmitted_eth_unicast.octets);
+
+ s->rx_multicast_packets =
+ MLX5_GET_CTR(out, received_eth_multicast.packets);
+ s->rx_multicast_bytes =
+ MLX5_GET_CTR(out, received_eth_multicast.octets);
+ s->tx_multicast_packets =
+ MLX5_GET_CTR(out, transmitted_eth_multicast.packets);
+ s->tx_multicast_bytes =
+ MLX5_GET_CTR(out, transmitted_eth_multicast.octets);
+
+ s->rx_broadcast_packets =
+ MLX5_GET_CTR(out, received_eth_broadcast.packets);
+ s->rx_broadcast_bytes =
+ MLX5_GET_CTR(out, received_eth_broadcast.octets);
+ s->tx_broadcast_packets =
+ MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
+ s->tx_broadcast_bytes =
+ MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
+
+ s->rx_packets =
+ s->rx_unicast_packets +
+ s->rx_multicast_packets +
+ s->rx_broadcast_packets;
+ s->rx_bytes =
+ s->rx_unicast_bytes +
+ s->rx_multicast_bytes +
+ s->rx_broadcast_bytes;
+ s->tx_packets =
+ s->tx_unicast_packets +
+ s->tx_multicast_packets +
+ s->tx_broadcast_packets;
+ s->tx_bytes =
+ s->tx_unicast_bytes +
+ s->tx_multicast_bytes +
+ s->tx_broadcast_bytes;
+
+ /* Update calculated offload counters */
+ s->tx_csum_offload = s->tx_packets - tx_offload_none;
+ s->rx_csum_good = s->rx_packets - s->rx_csum_none;
+
+free_out:
+ kvfree(out);
+}
+
+static void mlx5e_update_stats_work(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv,
+ update_stats_work);
+ mutex_lock(&priv->state_lock);
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
+ mlx5e_update_stats(priv);
+ schedule_delayed_work(dwork,
+ msecs_to_jiffies(
+ MLX5E_UPDATE_STATS_INTERVAL));
+ }
+ mutex_unlock(&priv->state_lock);
+}
+
+static void __mlx5e_async_event(struct mlx5e_priv *priv,
+ enum mlx5_dev_event event)
+{
+ switch (event) {
+ case MLX5_DEV_EVENT_PORT_UP:
+ case MLX5_DEV_EVENT_PORT_DOWN:
+ schedule_work(&priv->update_carrier_work);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
+ enum mlx5_dev_event event, unsigned long param)
+{
+ struct mlx5e_priv *priv = vpriv;
+
+ spin_lock(&priv->async_events_spinlock);
+ if (test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state))
+ __mlx5e_async_event(priv, event);
+ spin_unlock(&priv->async_events_spinlock);
+}
+
+static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
+{
+ set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
+}
+
+static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
+{
+ spin_lock_irq(&priv->async_events_spinlock);
+ clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
+ spin_unlock_irq(&priv->async_events_spinlock);
+}
+
+static void mlx5e_send_nop(struct mlx5e_sq *sq)
+{
+ struct mlx5_wq_cyc *wq = &sq->wq;
+
+ u16 pi = sq->pc & wq->sz_m1;
+ struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+
+ struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
+
+ memset(cseg, 0, sizeof(*cseg));
+
+ cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_NOP);
+ cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | 0x01);
+ cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
+
+ sq->skb[pi] = NULL;
+ sq->pc++;
+ mlx5e_tx_notify_hw(sq, wqe);
+}
+
+static int mlx5e_create_rq(struct mlx5e_channel *c,
+ struct mlx5e_rq_param *param,
+ struct mlx5e_rq *rq)
+{
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ void *rqc = param->rqc;
+ void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
+ int wq_sz;
+ int err;
+ int i;
+
+ err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
+ &rq->wq_ctrl);
+ if (err)
+ return err;
+
+ rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];
+
+ wq_sz = mlx5_wq_ll_get_size(&rq->wq);
+ rq->skb = kzalloc_node(wq_sz * sizeof(*rq->skb), GFP_KERNEL,
+ cpu_to_node(c->cpu));
+ if (!rq->skb) {
+ err = -ENOMEM;
+ goto err_rq_wq_destroy;
+ }
+
+ rq->wqe_sz = (priv->params.lro_en) ? priv->params.lro_wqe_sz :
+ priv->netdev->mtu + ETH_HLEN + VLAN_HLEN;
+
+ for (i = 0; i < wq_sz; i++) {
+ struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
+
+ wqe->data.lkey = c->mkey_be;
+ wqe->data.byte_count = cpu_to_be32(rq->wqe_sz);
+ }
+
+ rq->pdev = c->pdev;
+ rq->netdev = c->netdev;
+ rq->channel = c;
+ rq->ix = c->ix;
+
+ return 0;
+
+err_rq_wq_destroy:
+ mlx5_wq_destroy(&rq->wq_ctrl);
+
+ return err;
+}
+
+static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
+{
+ kfree(rq->skb);
+ mlx5_wq_destroy(&rq->wq_ctrl);
+}
+
+static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
+{
+ struct mlx5e_channel *c = rq->channel;
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ void *in;
+ void *rqc;
+ void *wq;
+ int inlen;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
+ sizeof(u64) * rq->wq_ctrl.buf.npages;
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
+ wq = MLX5_ADDR_OF(rqc, rqc, wq);
+
+ memcpy(rqc, param->rqc, sizeof(param->rqc));
+
+ MLX5_SET(rqc, rqc, cqn, c->rq.cq.mcq.cqn);
+ MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
+ MLX5_SET(rqc, rqc, flush_in_error_en, 1);
+ MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
+ MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
+ PAGE_SHIFT);
+ MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
+
+ mlx5_fill_page_array(&rq->wq_ctrl.buf,
+ (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
+
+ err = mlx5_create_rq(mdev, in, inlen, &rq->rqn);
+
+ kvfree(in);
+
+ return err;
+}
+
+static int mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state)
+{
+ struct mlx5e_channel *c = rq->channel;
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ void *in;
+ void *rqc;
+ int inlen;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
+
+ MLX5_SET(modify_rq_in, in, rq_state, curr_state);
+ MLX5_SET(rqc, rqc, state, next_state);
+
+ err = mlx5_modify_rq(mdev, rq->rqn, in, inlen);
+
+ kvfree(in);
+
+ return err;
+}
+
+static void mlx5e_disable_rq(struct mlx5e_rq *rq)
+{
+ struct mlx5e_channel *c = rq->channel;
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ mlx5_destroy_rq(mdev, rq->rqn);
+}
+
+static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
+{
+ struct mlx5e_channel *c = rq->channel;
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_wq_ll *wq = &rq->wq;
+ int i;
+
+ for (i = 0; i < 1000; i++) {
+ if (wq->cur_sz >= priv->params.min_rx_wqes)
+ return 0;
+
+ msleep(20);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static int mlx5e_open_rq(struct mlx5e_channel *c,
+ struct mlx5e_rq_param *param,
+ struct mlx5e_rq *rq)
+{
+ int err;
+
+ err = mlx5e_create_rq(c, param, rq);
+ if (err)
+ return err;
+
+ err = mlx5e_enable_rq(rq, param);
+ if (err)
+ goto err_destroy_rq;
+
+ err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
+ if (err)
+ goto err_disable_rq;
+
+ set_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
+ mlx5e_send_nop(&c->sq[0]); /* trigger mlx5e_post_rx_wqes() */
+
+ return 0;
+
+err_disable_rq:
+ mlx5e_disable_rq(rq);
+err_destroy_rq:
+ mlx5e_destroy_rq(rq);
+
+ return err;
+}
+
+static void mlx5e_close_rq(struct mlx5e_rq *rq)
+{
+ clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
+ napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
+
+ mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
+ while (!mlx5_wq_ll_is_empty(&rq->wq))
+ msleep(20);
+
+ /* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */
+ napi_synchronize(&rq->channel->napi);
+
+ mlx5e_disable_rq(rq);
+ mlx5e_destroy_rq(rq);
+}
+
+static void mlx5e_free_sq_db(struct mlx5e_sq *sq)
+{
+ kfree(sq->dma_fifo);
+ kfree(sq->skb);
+}
+
+static int mlx5e_alloc_sq_db(struct mlx5e_sq *sq, int numa)
+{
+ int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
+ int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
+
+ sq->skb = kzalloc_node(wq_sz * sizeof(*sq->skb), GFP_KERNEL, numa);
+ sq->dma_fifo = kzalloc_node(df_sz * sizeof(*sq->dma_fifo), GFP_KERNEL,
+ numa);
+
+ if (!sq->skb || !sq->dma_fifo) {
+ mlx5e_free_sq_db(sq);
+ return -ENOMEM;
+ }
+
+ sq->dma_fifo_mask = df_sz - 1;
+
+ return 0;
+}
+
+static int mlx5e_create_sq(struct mlx5e_channel *c,
+ int tc,
+ struct mlx5e_sq_param *param,
+ struct mlx5e_sq *sq)
+{
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ void *sqc = param->sqc;
+ void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
+ int err;
+
+ err = mlx5_alloc_map_uar(mdev, &sq->uar);
+ if (err)
+ return err;
+
+ err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq,
+ &sq->wq_ctrl);
+ if (err)
+ goto err_unmap_free_uar;
+
+ sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
+ sq->uar_map = sq->uar.map;
+ sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
+
+ if (mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu)))
+ goto err_sq_wq_destroy;
+
+ sq->txq = netdev_get_tx_queue(priv->netdev,
+ c->ix + tc * priv->params.num_channels);
+
+ sq->pdev = c->pdev;
+ sq->mkey_be = c->mkey_be;
+ sq->channel = c;
+ sq->tc = tc;
+
+ return 0;
+
+err_sq_wq_destroy:
+ mlx5_wq_destroy(&sq->wq_ctrl);
+
+err_unmap_free_uar:
+ mlx5_unmap_free_uar(mdev, &sq->uar);
+
+ return err;
+}
+
+static void mlx5e_destroy_sq(struct mlx5e_sq *sq)
+{
+ struct mlx5e_channel *c = sq->channel;
+ struct mlx5e_priv *priv = c->priv;
+
+ mlx5e_free_sq_db(sq);
+ mlx5_wq_destroy(&sq->wq_ctrl);
+ mlx5_unmap_free_uar(priv->mdev, &sq->uar);
+}
+
+static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
+{
+ struct mlx5e_channel *c = sq->channel;
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ void *in;
+ void *sqc;
+ void *wq;
+ int inlen;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
+ sizeof(u64) * sq->wq_ctrl.buf.npages;
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
+ wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+ memcpy(sqc, param->sqc, sizeof(param->sqc));
+
+ MLX5_SET(sqc, sqc, user_index, sq->tc);
+ MLX5_SET(sqc, sqc, tis_num_0, priv->tisn[sq->tc]);
+ MLX5_SET(sqc, sqc, cqn, c->sq[sq->tc].cq.mcq.cqn);
+ MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
+ MLX5_SET(sqc, sqc, tis_lst_sz, 1);
+ MLX5_SET(sqc, sqc, flush_in_error_en, 1);
+
+ MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
+ MLX5_SET(wq, wq, uar_page, sq->uar.index);
+ MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift -
+ PAGE_SHIFT);
+ MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
+
+ mlx5_fill_page_array(&sq->wq_ctrl.buf,
+ (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
+
+ err = mlx5_create_sq(mdev, in, inlen, &sq->sqn);
+
+ kvfree(in);
+
+ return err;
+}
+
+static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state)
+{
+ struct mlx5e_channel *c = sq->channel;
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ void *in;
+ void *sqc;
+ int inlen;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
+
+ MLX5_SET(modify_sq_in, in, sq_state, curr_state);
+ MLX5_SET(sqc, sqc, state, next_state);
+
+ err = mlx5_modify_sq(mdev, sq->sqn, in, inlen);
+
+ kvfree(in);
+
+ return err;
+}
+
+static void mlx5e_disable_sq(struct mlx5e_sq *sq)
+{
+ struct mlx5e_channel *c = sq->channel;
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ mlx5_destroy_sq(mdev, sq->sqn);
+}
+
+static int mlx5e_open_sq(struct mlx5e_channel *c,
+ int tc,
+ struct mlx5e_sq_param *param,
+ struct mlx5e_sq *sq)
+{
+ int err;
+
+ err = mlx5e_create_sq(c, tc, param, sq);
+ if (err)
+ return err;
+
+ err = mlx5e_enable_sq(sq, param);
+ if (err)
+ goto err_destroy_sq;
+
+ err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY);
+ if (err)
+ goto err_disable_sq;
+
+ set_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
+ netdev_tx_reset_queue(sq->txq);
+ netif_tx_start_queue(sq->txq);
+
+ return 0;
+
+err_disable_sq:
+ mlx5e_disable_sq(sq);
+err_destroy_sq:
+ mlx5e_destroy_sq(sq);
+
+ return err;
+}
+
+static inline void netif_tx_disable_queue(struct netdev_queue *txq)
+{
+ __netif_tx_lock_bh(txq);
+ netif_tx_stop_queue(txq);
+ __netif_tx_unlock_bh(txq);
+}
+
+static void mlx5e_close_sq(struct mlx5e_sq *sq)
+{
+ clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
+ napi_synchronize(&sq->channel->napi); /* prevent netif_tx_wake_queue */
+ netif_tx_disable_queue(sq->txq);
+
+ /* ensure hw is notified of all pending wqes */
+ if (mlx5e_sq_has_room_for(sq, 1))
+ mlx5e_send_nop(sq);
+
+ mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR);
+ while (sq->cc != sq->pc) /* wait till sq is empty */
+ msleep(20);
+
+ /* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */
+ napi_synchronize(&sq->channel->napi);
+
+ mlx5e_disable_sq(sq);
+ mlx5e_destroy_sq(sq);
+}
+
+static int mlx5e_create_cq(struct mlx5e_channel *c,
+ struct mlx5e_cq_param *param,
+ struct mlx5e_cq *cq)
+{
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_core_cq *mcq = &cq->mcq;
+ int eqn_not_used;
+ int irqn;
+ int err;
+ u32 i;
+
+ param->wq.numa = cpu_to_node(c->cpu);
+ param->eq_ix = c->ix;
+
+ err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
+ &cq->wq_ctrl);
+ if (err)
+ return err;
+
+ mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
+
+ cq->napi = &c->napi;
+
+ mcq->cqe_sz = 64;
+ mcq->set_ci_db = cq->wq_ctrl.db.db;
+ mcq->arm_db = cq->wq_ctrl.db.db + 1;
+ *mcq->set_ci_db = 0;
+ *mcq->arm_db = 0;
+ mcq->vector = param->eq_ix;
+ mcq->comp = mlx5e_completion_event;
+ mcq->event = mlx5e_cq_error_event;
+ mcq->irqn = irqn;
+ mcq->uar = &priv->cq_uar;
+
+ for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
+ struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
+
+ cqe->op_own = 0xf1;
+ }
+
+ cq->channel = c;
+
+ return 0;
+}
+
+static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
+{
+ mlx5_wq_destroy(&cq->wq_ctrl);
+}
+
+static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
+{
+ struct mlx5e_channel *c = cq->channel;
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_core_cq *mcq = &cq->mcq;
+
+ void *in;
+ void *cqc;
+ int inlen;
+ int irqn_not_used;
+ int eqn;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
+ sizeof(u64) * cq->wq_ctrl.buf.npages;
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
+
+ memcpy(cqc, param->cqc, sizeof(param->cqc));
+
+ mlx5_fill_page_array(&cq->wq_ctrl.buf,
+ (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
+
+ mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
+
+ MLX5_SET(cqc, cqc, c_eqn, eqn);
+ MLX5_SET(cqc, cqc, uar_page, mcq->uar->index);
+ MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
+ PAGE_SHIFT);
+ MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
+
+ err = mlx5_core_create_cq(mdev, mcq, in, inlen);
+
+ kvfree(in);
+
+ if (err)
+ return err;
+
+ mlx5e_cq_arm(cq);
+
+ return 0;
+}
+
+static void mlx5e_disable_cq(struct mlx5e_cq *cq)
+{
+ struct mlx5e_channel *c = cq->channel;
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ mlx5_core_destroy_cq(mdev, &cq->mcq);
+}
+
+static int mlx5e_open_cq(struct mlx5e_channel *c,
+ struct mlx5e_cq_param *param,
+ struct mlx5e_cq *cq,
+ u16 moderation_usecs,
+ u16 moderation_frames)
+{
+ int err;
+ struct mlx5e_priv *priv = c->priv;
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ err = mlx5e_create_cq(c, param, cq);
+ if (err)
+ return err;
+
+ err = mlx5e_enable_cq(cq, param);
+ if (err)
+ goto err_destroy_cq;
+
+ err = mlx5_core_modify_cq_moderation(mdev, &cq->mcq,
+ moderation_usecs,
+ moderation_frames);
+ if (err)
+ goto err_destroy_cq;
+
+ return 0;
+
+err_destroy_cq:
+ mlx5e_destroy_cq(cq);
+
+ return err;
+}
+
+static void mlx5e_close_cq(struct mlx5e_cq *cq)
+{
+ mlx5e_disable_cq(cq);
+ mlx5e_destroy_cq(cq);
+}
+
+static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
+{
+ return cpumask_first(priv->mdev->priv.irq_info[ix].mask);
+}
+
+static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
+ struct mlx5e_channel_param *cparam)
+{
+ struct mlx5e_priv *priv = c->priv;
+ int err;
+ int tc;
+
+ for (tc = 0; tc < c->num_tc; tc++) {
+ err = mlx5e_open_cq(c, &cparam->tx_cq, &c->sq[tc].cq,
+ priv->params.tx_cq_moderation_usec,
+ priv->params.tx_cq_moderation_pkts);
+ if (err)
+ goto err_close_tx_cqs;
+
+ c->sq[tc].cq.sqrq = &c->sq[tc];
+ }
+
+ return 0;
+
+err_close_tx_cqs:
+ for (tc--; tc >= 0; tc--)
+ mlx5e_close_cq(&c->sq[tc].cq);
+
+ return err;
+}
+
+static void mlx5e_close_tx_cqs(struct mlx5e_channel *c)
+{
+ int tc;
+
+ for (tc = 0; tc < c->num_tc; tc++)
+ mlx5e_close_cq(&c->sq[tc].cq);
+}
+
+static int mlx5e_open_sqs(struct mlx5e_channel *c,
+ struct mlx5e_channel_param *cparam)
+{
+ int err;
+ int tc;
+
+ for (tc = 0; tc < c->num_tc; tc++) {
+ err = mlx5e_open_sq(c, tc, &cparam->sq, &c->sq[tc]);
+ if (err)
+ goto err_close_sqs;
+ }
+
+ return 0;
+
+err_close_sqs:
+ for (tc--; tc >= 0; tc--)
+ mlx5e_close_sq(&c->sq[tc]);
+
+ return err;
+}
+
+static void mlx5e_close_sqs(struct mlx5e_channel *c)
+{
+ int tc;
+
+ for (tc = 0; tc < c->num_tc; tc++)
+ mlx5e_close_sq(&c->sq[tc]);
+}
+
+static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
+ struct mlx5e_channel_param *cparam,
+ struct mlx5e_channel **cp)
+{
+ struct net_device *netdev = priv->netdev;
+ int cpu = mlx5e_get_cpu(priv, ix);
+ struct mlx5e_channel *c;
+ int err;
+
+ c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
+ if (!c)
+ return -ENOMEM;
+
+ c->priv = priv;
+ c->ix = ix;
+ c->cpu = cpu;
+ c->pdev = &priv->mdev->pdev->dev;
+ c->netdev = priv->netdev;
+ c->mkey_be = cpu_to_be32(priv->mr.key);
+ c->num_tc = priv->num_tc;
+
+ netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
+
+ err = mlx5e_open_tx_cqs(c, cparam);
+ if (err)
+ goto err_napi_del;
+
+ err = mlx5e_open_cq(c, &cparam->rx_cq, &c->rq.cq,
+ priv->params.rx_cq_moderation_usec,
+ priv->params.rx_cq_moderation_pkts);
+ if (err)
+ goto err_close_tx_cqs;
+ c->rq.cq.sqrq = &c->rq;
+
+ napi_enable(&c->napi);
+
+ err = mlx5e_open_sqs(c, cparam);
+ if (err)
+ goto err_disable_napi;
+
+ err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
+ if (err)
+ goto err_close_sqs;
+
+ netif_set_xps_queue(netdev, get_cpu_mask(c->cpu), ix);
+ *cp = c;
+
+ return 0;
+
+err_close_sqs:
+ mlx5e_close_sqs(c);
+
+err_disable_napi:
+ napi_disable(&c->napi);
+ mlx5e_close_cq(&c->rq.cq);
+
+err_close_tx_cqs:
+ mlx5e_close_tx_cqs(c);
+
+err_napi_del:
+ netif_napi_del(&c->napi);
+ kfree(c);
+
+ return err;
+}
+
+static void mlx5e_close_channel(struct mlx5e_channel *c)
+{
+ mlx5e_close_rq(&c->rq);
+ mlx5e_close_sqs(c);
+ napi_disable(&c->napi);
+ mlx5e_close_cq(&c->rq.cq);
+ mlx5e_close_tx_cqs(c);
+ netif_napi_del(&c->napi);
+ kfree(c);
+}
+
+static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
+ struct mlx5e_rq_param *param)
+{
+ void *rqc = param->rqc;
+ void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
+
+ MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
+ MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
+ MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
+ MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size);
+ MLX5_SET(wq, wq, pd, priv->pdn);
+
+ param->wq.numa = dev_to_node(&priv->mdev->pdev->dev);
+ param->wq.linear = 1;
+}
+
+static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
+ struct mlx5e_sq_param *param)
+{
+ void *sqc = param->sqc;
+ void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
+
+ MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size);
+ MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
+ MLX5_SET(wq, wq, pd, priv->pdn);
+
+ param->wq.numa = dev_to_node(&priv->mdev->pdev->dev);
+}
+
+static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
+ struct mlx5e_cq_param *param)
+{
+ void *cqc = param->cqc;
+
+ MLX5_SET(cqc, cqc, uar_page, priv->cq_uar.index);
+}
+
+static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
+ struct mlx5e_cq_param *param)
+{
+ void *cqc = param->cqc;
+
+ MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_rq_size);
+
+ mlx5e_build_common_cq_param(priv, param);
+}
+
+static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
+ struct mlx5e_cq_param *param)
+{
+ void *cqc = param->cqc;
+
+ MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size);
+
+ mlx5e_build_common_cq_param(priv, param);
+}
+
+static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
+ struct mlx5e_channel_param *cparam)
+{
+ memset(cparam, 0, sizeof(*cparam));
+
+ mlx5e_build_rq_param(priv, &cparam->rq);
+ mlx5e_build_sq_param(priv, &cparam->sq);
+ mlx5e_build_rx_cq_param(priv, &cparam->rx_cq);
+ mlx5e_build_tx_cq_param(priv, &cparam->tx_cq);
+}
+
+static int mlx5e_open_channels(struct mlx5e_priv *priv)
+{
+ struct mlx5e_channel_param cparam;
+ int err;
+ int i;
+ int j;
+
+ priv->channel = kcalloc(priv->params.num_channels,
+ sizeof(struct mlx5e_channel *), GFP_KERNEL);
+ if (!priv->channel)
+ return -ENOMEM;
+
+ mlx5e_build_channel_param(priv, &cparam);
+ for (i = 0; i < priv->params.num_channels; i++) {
+ err = mlx5e_open_channel(priv, i, &cparam, &priv->channel[i]);
+ if (err)
+ goto err_close_channels;
+ }
+
+ for (j = 0; j < priv->params.num_channels; j++) {
+ err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j]->rq);
+ if (err)
+ goto err_close_channels;
+ }
+
+ return 0;
+
+err_close_channels:
+ for (i--; i >= 0; i--)
+ mlx5e_close_channel(priv->channel[i]);
+
+ kfree(priv->channel);
+
+ return err;
+}
+
+static void mlx5e_close_channels(struct mlx5e_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->params.num_channels; i++)
+ mlx5e_close_channel(priv->channel[i]);
+
+ kfree(priv->channel);
+}
+
+static int mlx5e_open_tis(struct mlx5e_priv *priv, int tc)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 in[MLX5_ST_SZ_DW(create_tis_in)];
+ void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(tisc, tisc, prio, tc);
+
+ return mlx5_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
+}
+
+static void mlx5e_close_tis(struct mlx5e_priv *priv, int tc)
+{
+ mlx5_destroy_tis(priv->mdev, priv->tisn[tc]);
+}
+
+static int mlx5e_open_tises(struct mlx5e_priv *priv)
+{
+ int num_tc = priv->num_tc;
+ int err;
+ int tc;
+
+ for (tc = 0; tc < num_tc; tc++) {
+ err = mlx5e_open_tis(priv, tc);
+ if (err)
+ goto err_close_tises;
+ }
+
+ return 0;
+
+err_close_tises:
+ for (tc--; tc >= 0; tc--)
+ mlx5e_close_tis(priv, tc);
+
+ return err;
+}
+
+static void mlx5e_close_tises(struct mlx5e_priv *priv)
+{
+ int num_tc = priv->num_tc;
+ int tc;
+
+ for (tc = 0; tc < num_tc; tc++)
+ mlx5e_close_tis(priv, tc);
+}
+
+static int mlx5e_open_rqt(struct mlx5e_priv *priv)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 *in;
+ u32 out[MLX5_ST_SZ_DW(create_rqt_out)];
+ void *rqtc;
+ int inlen;
+ int err;
+ int sz;
+ int i;
+
+ sz = 1 << priv->params.rx_hash_log_tbl_sz;
+
+ inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
+
+ MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
+ MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
+
+ for (i = 0; i < sz; i++) {
+ int ix = i % priv->params.num_channels;
+
+ MLX5_SET(rqtc, rqtc, rq_num[i], priv->channel[ix]->rq.rqn);
+ }
+
+ MLX5_SET(create_rqt_in, in, opcode, MLX5_CMD_OP_CREATE_RQT);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(mdev, in, inlen, out, sizeof(out));
+ if (!err)
+ priv->rqtn = MLX5_GET(create_rqt_out, out, rqtn);
+
+ kvfree(in);
+
+ return err;
+}
+
+static void mlx5e_close_rqt(struct mlx5e_priv *priv)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_rqt_in)];
+ u32 out[MLX5_ST_SZ_DW(destroy_rqt_out)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(destroy_rqt_in, in, opcode, MLX5_CMD_OP_DESTROY_RQT);
+ MLX5_SET(destroy_rqt_in, in, rqtn, priv->rqtn);
+
+ mlx5_cmd_exec_check_status(priv->mdev, in, sizeof(in), out,
+ sizeof(out));
+}
+
+static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
+{
+ void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
+
+#define ROUGH_MAX_L2_L3_HDR_SZ 256
+
+#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
+ MLX5_HASH_FIELD_SEL_DST_IP)
+
+#define MLX5_HASH_ALL (MLX5_HASH_FIELD_SEL_SRC_IP |\
+ MLX5_HASH_FIELD_SEL_DST_IP |\
+ MLX5_HASH_FIELD_SEL_L4_SPORT |\
+ MLX5_HASH_FIELD_SEL_L4_DPORT)
+
+ if (priv->params.lro_en) {
+ MLX5_SET(tirc, tirc, lro_enable_mask,
+ MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
+ MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
+ MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
+ (priv->params.lro_wqe_sz -
+ ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
+ MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
+ MLX5_CAP_ETH(priv->mdev,
+ lro_timer_supported_periods[3]));
+ }
+
+ switch (tt) {
+ case MLX5E_TT_ANY:
+ MLX5_SET(tirc, tirc, disp_type,
+ MLX5_TIRC_DISP_TYPE_DIRECT);
+ MLX5_SET(tirc, tirc, inline_rqn,
+ priv->channel[0]->rq.rqn);
+ break;
+ default:
+ MLX5_SET(tirc, tirc, disp_type,
+ MLX5_TIRC_DISP_TYPE_INDIRECT);
+ MLX5_SET(tirc, tirc, indirect_table,
+ priv->rqtn);
+ MLX5_SET(tirc, tirc, rx_hash_fn,
+ MLX5_TIRC_RX_HASH_FN_HASH_TOEPLITZ);
+ MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
+ netdev_rss_key_fill(MLX5_ADDR_OF(tirc, tirc,
+ rx_hash_toeplitz_key),
+ MLX5_FLD_SZ_BYTES(tirc,
+ rx_hash_toeplitz_key));
+ break;
+ }
+
+ switch (tt) {
+ case MLX5E_TT_IPV4_TCP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV4);
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_TCP);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_ALL);
+ break;
+
+ case MLX5E_TT_IPV6_TCP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV6);
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_TCP);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_ALL);
+ break;
+
+ case MLX5E_TT_IPV4_UDP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV4);
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_UDP);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_ALL);
+ break;
+
+ case MLX5E_TT_IPV6_UDP:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV6);
+ MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
+ MLX5_L4_PROT_TYPE_UDP);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_ALL);
+ break;
+
+ case MLX5E_TT_IPV4:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV4);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP);
+ break;
+
+ case MLX5E_TT_IPV6:
+ MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
+ MLX5_L3_PROT_TYPE_IPV6);
+ MLX5_SET(rx_hash_field_select, hfso, selected_fields,
+ MLX5_HASH_IP);
+ break;
+ }
+}
+
+static int mlx5e_open_tir(struct mlx5e_priv *priv, int tt)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u32 *in;
+ void *tirc;
+ int inlen;
+ int err;
+
+ inlen = MLX5_ST_SZ_BYTES(create_tir_in);
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
+
+ mlx5e_build_tir_ctx(priv, tirc, tt);
+
+ err = mlx5_create_tir(mdev, in, inlen, &priv->tirn[tt]);
+
+ kvfree(in);
+
+ return err;
+}
+
+static void mlx5e_close_tir(struct mlx5e_priv *priv, int tt)
+{
+ mlx5_destroy_tir(priv->mdev, priv->tirn[tt]);
+}
+
+static int mlx5e_open_tirs(struct mlx5e_priv *priv)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < MLX5E_NUM_TT; i++) {
+ err = mlx5e_open_tir(priv, i);
+ if (err)
+ goto err_close_tirs;
+ }
+
+ return 0;
+
+err_close_tirs:
+ for (i--; i >= 0; i--)
+ mlx5e_close_tir(priv, i);
+
+ return err;
+}
+
+static void mlx5e_close_tirs(struct mlx5e_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < MLX5E_NUM_TT; i++)
+ mlx5e_close_tir(priv, i);
+}
+
+int mlx5e_open_locked(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int actual_mtu;
+ int num_txqs;
+ int err;
+
+ num_txqs = roundup_pow_of_two(priv->params.num_channels) *
+ priv->params.num_tc;
+ netif_set_real_num_tx_queues(netdev, num_txqs);
+ netif_set_real_num_rx_queues(netdev, priv->params.num_channels);
+
+ err = mlx5_set_port_mtu(mdev, netdev->mtu);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5_set_port_mtu failed %d\n",
+ __func__, err);
+ return err;
+ }
+
+ err = mlx5_query_port_oper_mtu(mdev, &actual_mtu);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5_query_port_oper_mtu failed %d\n",
+ __func__, err);
+ return err;
+ }
+
+ if (actual_mtu != netdev->mtu)
+ netdev_warn(netdev, "%s: Failed to set MTU to %d\n",
+ __func__, netdev->mtu);
+
+ netdev->mtu = actual_mtu;
+
+ err = mlx5e_open_tises(priv);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5e_open_tises failed, %d\n",
+ __func__, err);
+ return err;
+ }
+
+ err = mlx5e_open_channels(priv);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n",
+ __func__, err);
+ goto err_close_tises;
+ }
+
+ err = mlx5e_open_rqt(priv);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5e_open_rqt failed, %d\n",
+ __func__, err);
+ goto err_close_channels;
+ }
+
+ err = mlx5e_open_tirs(priv);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5e_open_tir failed, %d\n",
+ __func__, err);
+ goto err_close_rqls;
+ }
+
+ err = mlx5e_open_flow_table(priv);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5e_open_flow_table failed, %d\n",
+ __func__, err);
+ goto err_close_tirs;
+ }
+
+ err = mlx5e_add_all_vlan_rules(priv);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5e_add_all_vlan_rules failed, %d\n",
+ __func__, err);
+ goto err_close_flow_table;
+ }
+
+ mlx5e_init_eth_addr(priv);
+
+ set_bit(MLX5E_STATE_OPENED, &priv->state);
+
+ mlx5e_update_carrier(priv);
+ mlx5e_set_rx_mode_core(priv);
+
+ schedule_delayed_work(&priv->update_stats_work, 0);
+ return 0;
+
+err_close_flow_table:
+ mlx5e_close_flow_table(priv);
+
+err_close_tirs:
+ mlx5e_close_tirs(priv);
+
+err_close_rqls:
+ mlx5e_close_rqt(priv);
+
+err_close_channels:
+ mlx5e_close_channels(priv);
+
+err_close_tises:
+ mlx5e_close_tises(priv);
+
+ return err;
+}
+
+static int mlx5e_open(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ int err;
+
+ mutex_lock(&priv->state_lock);
+ err = mlx5e_open_locked(netdev);
+ mutex_unlock(&priv->state_lock);
+
+ return err;
+}
+
+int mlx5e_close_locked(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ clear_bit(MLX5E_STATE_OPENED, &priv->state);
+
+ mlx5e_set_rx_mode_core(priv);
+ mlx5e_del_all_vlan_rules(priv);
+ netif_carrier_off(priv->netdev);
+ mlx5e_close_flow_table(priv);
+ mlx5e_close_tirs(priv);
+ mlx5e_close_rqt(priv);
+ mlx5e_close_channels(priv);
+ mlx5e_close_tises(priv);
+
+ return 0;
+}
+
+static int mlx5e_close(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ int err;
+
+ mutex_lock(&priv->state_lock);
+ err = mlx5e_close_locked(netdev);
+ mutex_unlock(&priv->state_lock);
+
+ return err;
+}
+
+int mlx5e_update_priv_params(struct mlx5e_priv *priv,
+ struct mlx5e_params *new_params)
+{
+ int err = 0;
+ int was_opened;
+
+ WARN_ON(!mutex_is_locked(&priv->state_lock));
+
+ was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
+ if (was_opened)
+ mlx5e_close_locked(priv->netdev);
+
+ priv->params = *new_params;
+
+ if (was_opened)
+ err = mlx5e_open_locked(priv->netdev);
+
+ return err;
+}
+
+static struct rtnl_link_stats64 *
+mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ struct mlx5e_vport_stats *vstats = &priv->stats.vport;
+
+ stats->rx_packets = vstats->rx_packets;
+ stats->rx_bytes = vstats->rx_bytes;
+ stats->tx_packets = vstats->tx_packets;
+ stats->tx_bytes = vstats->tx_bytes;
+ stats->multicast = vstats->rx_multicast_packets +
+ vstats->tx_multicast_packets;
+ stats->tx_errors = vstats->tx_error_packets;
+ stats->rx_errors = vstats->rx_error_packets;
+ stats->tx_dropped = vstats->tx_queue_dropped;
+ stats->rx_crc_errors = 0;
+ stats->rx_length_errors = 0;
+
+ return stats;
+}
+
+static void mlx5e_set_rx_mode(struct net_device *dev)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+
+ schedule_work(&priv->set_rx_mode_work);
+}
+
+static int mlx5e_set_mac(struct net_device *netdev, void *addr)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct sockaddr *saddr = addr;
+
+ if (!is_valid_ether_addr(saddr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ netif_addr_lock_bh(netdev);
+ ether_addr_copy(netdev->dev_addr, saddr->sa_data);
+ netif_addr_unlock_bh(netdev);
+
+ schedule_work(&priv->set_rx_mode_work);
+
+ return 0;
+}
+
+static int mlx5e_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ netdev_features_t changes = features ^ netdev->features;
+ struct mlx5e_params new_params;
+ bool update_params = false;
+
+ mutex_lock(&priv->state_lock);
+ new_params = priv->params;
+
+ if (changes & NETIF_F_LRO) {
+ new_params.lro_en = !!(features & NETIF_F_LRO);
+ update_params = true;
+ }
+
+ if (update_params)
+ mlx5e_update_priv_params(priv, &new_params);
+
+ if (changes & NETIF_F_HW_VLAN_CTAG_FILTER) {
+ if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
+ mlx5e_enable_vlan_filter(priv);
+ else
+ mlx5e_disable_vlan_filter(priv);
+ }
+
+ mutex_unlock(&priv->state_lock);
+
+ return 0;
+}
+
+static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int max_mtu;
+ int err = 0;
+
+ err = mlx5_query_port_max_mtu(mdev, &max_mtu);
+ if (err)
+ return err;
+
+ if (new_mtu > max_mtu || new_mtu < MLX5E_PARAMS_MIN_MTU) {
+ netdev_err(netdev, "%s: Bad MTU size, mtu must be [%d-%d]\n",
+ __func__, MLX5E_PARAMS_MIN_MTU, max_mtu);
+ return -EINVAL;
+ }
+
+ mutex_lock(&priv->state_lock);
+ netdev->mtu = new_mtu;
+ err = mlx5e_update_priv_params(priv, &priv->params);
+ mutex_unlock(&priv->state_lock);
+
+ return err;
+}
+
+static struct net_device_ops mlx5e_netdev_ops = {
+ .ndo_open = mlx5e_open,
+ .ndo_stop = mlx5e_close,
+ .ndo_start_xmit = mlx5e_xmit,
+ .ndo_get_stats64 = mlx5e_get_stats,
+ .ndo_set_rx_mode = mlx5e_set_rx_mode,
+ .ndo_set_mac_address = mlx5e_set_mac,
+ .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid,
+ .ndo_set_features = mlx5e_set_features,
+ .ndo_change_mtu = mlx5e_change_mtu,
+};
+
+static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
+{
+ if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
+ return -ENOTSUPP;
+ if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
+ !MLX5_CAP_GEN(mdev, nic_flow_table) ||
+ !MLX5_CAP_ETH(mdev, csum_cap) ||
+ !MLX5_CAP_ETH(mdev, max_lso_cap) ||
+ !MLX5_CAP_ETH(mdev, vlan_cap) ||
+ !MLX5_CAP_ETH(mdev, rss_ind_tbl_cap)) {
+ mlx5_core_warn(mdev,
+ "Not creating net device, some required device capabilities are missing\n");
+ return -ENOTSUPP;
+ }
+ return 0;
+}
+
+static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
+ struct net_device *netdev,
+ int num_comp_vectors)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ priv->params.log_sq_size =
+ MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
+ priv->params.log_rq_size =
+ MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
+ priv->params.rx_cq_moderation_usec =
+ MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
+ priv->params.rx_cq_moderation_pkts =
+ MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
+ priv->params.tx_cq_moderation_usec =
+ MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
+ priv->params.tx_cq_moderation_pkts =
+ MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
+ priv->params.min_rx_wqes =
+ MLX5E_PARAMS_DEFAULT_MIN_RX_WQES;
+ priv->params.rx_hash_log_tbl_sz =
+ (order_base_2(num_comp_vectors) >
+ MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ) ?
+ order_base_2(num_comp_vectors) :
+ MLX5E_PARAMS_DEFAULT_RX_HASH_LOG_TBL_SZ;
+ priv->params.num_tc = 1;
+ priv->params.default_vlan_prio = 0;
+
+ priv->params.lro_en = false && !!MLX5_CAP_ETH(priv->mdev, lro_cap);
+ priv->params.lro_wqe_sz =
+ MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
+
+ priv->mdev = mdev;
+ priv->netdev = netdev;
+ priv->params.num_channels = num_comp_vectors;
+ priv->order_base_2_num_channels = order_base_2(num_comp_vectors);
+ priv->queue_mapping_channel_mask =
+ roundup_pow_of_two(num_comp_vectors) - 1;
+ priv->num_tc = priv->params.num_tc;
+ priv->default_vlan_prio = priv->params.default_vlan_prio;
+
+ spin_lock_init(&priv->async_events_spinlock);
+ mutex_init(&priv->state_lock);
+
+ INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
+ INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
+ INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
+}
+
+static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+
+ mlx5_query_vport_mac_address(priv->mdev, netdev->dev_addr);
+}
+
+static void mlx5e_build_netdev(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ struct mlx5_core_dev *mdev = priv->mdev;
+
+ SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
+
+ if (priv->num_tc > 1) {
+ mlx5e_netdev_ops.ndo_select_queue = mlx5e_select_queue;
+ mlx5e_netdev_ops.ndo_start_xmit = mlx5e_xmit_multi_tc;
+ }
+
+ netdev->netdev_ops = &mlx5e_netdev_ops;
+ netdev->watchdog_timeo = 15 * HZ;
+
+ netdev->ethtool_ops = &mlx5e_ethtool_ops;
+
+ netdev->vlan_features |= NETIF_F_IP_CSUM;
+ netdev->vlan_features |= NETIF_F_IPV6_CSUM;
+ netdev->vlan_features |= NETIF_F_GRO;
+ netdev->vlan_features |= NETIF_F_TSO;
+ netdev->vlan_features |= NETIF_F_TSO6;
+ netdev->vlan_features |= NETIF_F_RXCSUM;
+ netdev->vlan_features |= NETIF_F_RXHASH;
+
+ if (!!MLX5_CAP_ETH(mdev, lro_cap))
+ netdev->vlan_features |= NETIF_F_LRO;
+
+ netdev->hw_features = netdev->vlan_features;
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
+ netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
+ netdev->features = netdev->hw_features;
+ if (!priv->params.lro_en)
+ netdev->features &= ~NETIF_F_LRO;
+
+ netdev->features |= NETIF_F_HIGHDMA;
+
+ netdev->priv_flags |= IFF_UNICAST_FLT;
+
+ mlx5e_set_netdev_dev_addr(netdev);
+}
+
+static int mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn,
+ struct mlx5_core_mr *mr)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_create_mkey_mbox_in *in;
+ int err;
+
+ in = mlx5_vzalloc(sizeof(*in));
+ if (!in)
+ return -ENOMEM;
+
+ in->seg.flags = MLX5_PERM_LOCAL_WRITE |
+ MLX5_PERM_LOCAL_READ |
+ MLX5_ACCESS_MODE_PA;
+ in->seg.flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64);
+ in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
+
+ err = mlx5_core_create_mkey(mdev, mr, in, sizeof(*in), NULL, NULL,
+ NULL);
+
+ kvfree(in);
+
+ return err;
+}
+
+static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
+{
+ struct net_device *netdev;
+ struct mlx5e_priv *priv;
+ int ncv = mdev->priv.eq_table.num_comp_vectors;
+ int err;
+
+ if (mlx5e_check_required_hca_cap(mdev))
+ return NULL;
+
+ netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
+ roundup_pow_of_two(ncv) * MLX5E_MAX_NUM_TC,
+ ncv);
+ if (!netdev) {
+ mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
+ return NULL;
+ }
+
+ mlx5e_build_netdev_priv(mdev, netdev, ncv);
+ mlx5e_build_netdev(netdev);
+
+ netif_carrier_off(netdev);
+
+ priv = netdev_priv(netdev);
+
+ err = mlx5_alloc_map_uar(mdev, &priv->cq_uar);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5_alloc_map_uar failed, %d\n",
+ __func__, err);
+ goto err_free_netdev;
+ }
+
+ err = mlx5_core_alloc_pd(mdev, &priv->pdn);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5_core_alloc_pd failed, %d\n",
+ __func__, err);
+ goto err_unmap_free_uar;
+ }
+
+ err = mlx5e_create_mkey(priv, priv->pdn, &priv->mr);
+ if (err) {
+ netdev_err(netdev, "%s: mlx5e_create_mkey failed, %d\n",
+ __func__, err);
+ goto err_dealloc_pd;
+ }
+
+ err = register_netdev(netdev);
+ if (err) {
+ netdev_err(netdev, "%s: register_netdev failed, %d\n",
+ __func__, err);
+ goto err_destroy_mkey;
+ }
+
+ mlx5e_enable_async_events(priv);
+
+ return priv;
+
+err_destroy_mkey:
+ mlx5_core_destroy_mkey(mdev, &priv->mr);
+
+err_dealloc_pd:
+ mlx5_core_dealloc_pd(mdev, priv->pdn);
+
+err_unmap_free_uar:
+ mlx5_unmap_free_uar(mdev, &priv->cq_uar);
+
+err_free_netdev:
+ free_netdev(netdev);
+
+ return NULL;
+}
+
+static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
+{
+ struct mlx5e_priv *priv = vpriv;
+ struct net_device *netdev = priv->netdev;
+
+ unregister_netdev(netdev);
+ mlx5_core_destroy_mkey(priv->mdev, &priv->mr);
+ mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
+ mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
+ mlx5e_disable_async_events(priv);
+ flush_scheduled_work();
+ free_netdev(netdev);
+}
+
+static void *mlx5e_get_netdev(void *vpriv)
+{
+ struct mlx5e_priv *priv = vpriv;
+
+ return priv->netdev;
+}
+
+static struct mlx5_interface mlx5e_interface = {
+ .add = mlx5e_create_netdev,
+ .remove = mlx5e_destroy_netdev,
+ .event = mlx5e_async_event,
+ .protocol = MLX5_INTERFACE_PROTOCOL_ETH,
+ .get_dev = mlx5e_get_netdev,
+};
+
+void mlx5e_init(void)
+{
+ mlx5_register_interface(&mlx5e_interface);
+}
+
+void mlx5e_cleanup(void)
+{
+ mlx5_unregister_interface(&mlx5e_interface);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
new file mode 100644
index 0000000..ce1317c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -0,0 +1,249 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/tcp.h>
+#include "en.h"
+
+static inline int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq,
+ struct mlx5e_rx_wqe *wqe, u16 ix)
+{
+ struct sk_buff *skb;
+ dma_addr_t dma_addr;
+
+ skb = netdev_alloc_skb(rq->netdev, rq->wqe_sz);
+ if (unlikely(!skb))
+ return -ENOMEM;
+
+ skb_reserve(skb, MLX5E_NET_IP_ALIGN);
+
+ dma_addr = dma_map_single(rq->pdev,
+ /* hw start padding */
+ skb->data - MLX5E_NET_IP_ALIGN,
+ /* hw end padding */
+ rq->wqe_sz,
+ DMA_FROM_DEVICE);
+
+ if (unlikely(dma_mapping_error(rq->pdev, dma_addr)))
+ goto err_free_skb;
+
+ *((dma_addr_t *)skb->cb) = dma_addr;
+ wqe->data.addr = cpu_to_be64(dma_addr + MLX5E_NET_IP_ALIGN);
+
+ rq->skb[ix] = skb;
+
+ return 0;
+
+err_free_skb:
+ dev_kfree_skb(skb);
+
+ return -ENOMEM;
+}
+
+bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
+{
+ struct mlx5_wq_ll *wq = &rq->wq;
+
+ if (unlikely(!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state)))
+ return false;
+
+ while (!mlx5_wq_ll_is_full(wq)) {
+ struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(wq, wq->head);
+
+ if (unlikely(mlx5e_alloc_rx_wqe(rq, wqe, wq->head)))
+ break;
+
+ mlx5_wq_ll_push(wq, be16_to_cpu(wqe->next.next_wqe_index));
+ }
+
+ /* ensure wqes are visible to device before updating doorbell record */
+ dma_wmb();
+
+ mlx5_wq_ll_update_db_record(wq);
+
+ return !mlx5_wq_ll_is_full(wq);
+}
+
+static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe)
+{
+ struct ethhdr *eth = (struct ethhdr *)(skb->data);
+ struct iphdr *ipv4 = (struct iphdr *)(skb->data + ETH_HLEN);
+ struct ipv6hdr *ipv6 = (struct ipv6hdr *)(skb->data + ETH_HLEN);
+ struct tcphdr *tcp;
+
+ u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
+ int tcp_ack = ((CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA == l4_hdr_type) ||
+ (CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA == l4_hdr_type));
+
+ u16 tot_len = be32_to_cpu(cqe->byte_cnt) - ETH_HLEN;
+
+ if (eth->h_proto == htons(ETH_P_IP)) {
+ tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
+ sizeof(struct iphdr));
+ ipv6 = NULL;
+ } else {
+ tcp = (struct tcphdr *)(skb->data + ETH_HLEN +
+ sizeof(struct ipv6hdr));
+ ipv4 = NULL;
+ }
+
+ if (get_cqe_lro_tcppsh(cqe))
+ tcp->psh = 1;
+
+ if (tcp_ack) {
+ tcp->ack = 1;
+ tcp->ack_seq = cqe->lro_ack_seq_num;
+ tcp->window = cqe->lro_tcp_win;
+ }
+
+ if (ipv4) {
+ ipv4->ttl = cqe->lro_min_ttl;
+ ipv4->tot_len = cpu_to_be16(tot_len);
+ ipv4->check = 0;
+ ipv4->check = ip_fast_csum((unsigned char *)ipv4,
+ ipv4->ihl);
+ } else {
+ ipv6->hop_limit = cqe->lro_min_ttl;
+ ipv6->payload_len = cpu_to_be16(tot_len -
+ sizeof(struct ipv6hdr));
+ }
+}
+
+static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe,
+ struct sk_buff *skb)
+{
+ u8 cht = cqe->rss_hash_type;
+ int ht = (cht & CQE_RSS_HTYPE_L4) ? PKT_HASH_TYPE_L4 :
+ (cht & CQE_RSS_HTYPE_IP) ? PKT_HASH_TYPE_L3 :
+ PKT_HASH_TYPE_NONE;
+ skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht);
+}
+
+static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
+ struct mlx5e_rq *rq,
+ struct sk_buff *skb)
+{
+ struct net_device *netdev = rq->netdev;
+ u32 cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
+ int lro_num_seg;
+
+ skb_put(skb, cqe_bcnt);
+
+ lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
+ if (lro_num_seg > 1) {
+ mlx5e_lro_update_hdr(skb, cqe);
+ skb_shinfo(skb)->gso_size = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
+ rq->stats.lro_packets++;
+ rq->stats.lro_bytes += cqe_bcnt;
+ }
+
+ if (likely(netdev->features & NETIF_F_RXCSUM) &&
+ (cqe->hds_ip_ext & CQE_L2_OK) &&
+ (cqe->hds_ip_ext & CQE_L3_OK) &&
+ (cqe->hds_ip_ext & CQE_L4_OK)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else {
+ skb->ip_summed = CHECKSUM_NONE;
+ rq->stats.csum_none++;
+ }
+
+ skb->protocol = eth_type_trans(skb, netdev);
+
+ skb_record_rx_queue(skb, rq->ix);
+
+ if (likely(netdev->features & NETIF_F_RXHASH))
+ mlx5e_skb_set_hash(cqe, skb);
+
+ if (cqe_has_vlan(cqe))
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ be16_to_cpu(cqe->vlan_info));
+}
+
+bool mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
+{
+ struct mlx5e_rq *rq = cq->sqrq;
+ int i;
+
+ /* avoid accessing cq (dma coherent memory) if not needed */
+ if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES, &cq->flags))
+ return false;
+
+ for (i = 0; i < budget; i++) {
+ struct mlx5e_rx_wqe *wqe;
+ struct mlx5_cqe64 *cqe;
+ struct sk_buff *skb;
+ __be16 wqe_counter_be;
+ u16 wqe_counter;
+
+ cqe = mlx5e_get_cqe(cq);
+ if (!cqe)
+ break;
+
+ wqe_counter_be = cqe->wqe_counter;
+ wqe_counter = be16_to_cpu(wqe_counter_be);
+ wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_counter);
+ skb = rq->skb[wqe_counter];
+ rq->skb[wqe_counter] = NULL;
+
+ dma_unmap_single(rq->pdev,
+ *((dma_addr_t *)skb->cb),
+ skb_end_offset(skb),
+ DMA_FROM_DEVICE);
+
+ if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) {
+ rq->stats.wqe_err++;
+ dev_kfree_skb(skb);
+ goto wq_ll_pop;
+ }
+
+ mlx5e_build_rx_skb(cqe, rq, skb);
+ rq->stats.packets++;
+ napi_gro_receive(cq->napi, skb);
+
+wq_ll_pop:
+ mlx5_wq_ll_pop(&rq->wq, wqe_counter_be,
+ &wqe->next.next_wqe_index);
+ }
+
+ mlx5_cqwq_update_db_record(&cq->wq);
+
+ /* ensure cq space is freed before enabling more cqes */
+ wmb();
+
+ if (i == budget) {
+ set_bit(MLX5E_CQ_HAS_CQES, &cq->flags);
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
new file mode 100644
index 0000000..8020986
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -0,0 +1,344 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/tcp.h>
+#include <linux/if_vlan.h>
+#include "en.h"
+
+static void mlx5e_dma_pop_last_pushed(struct mlx5e_sq *sq, dma_addr_t *addr,
+ u32 *size)
+{
+ sq->dma_fifo_pc--;
+ *addr = sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr;
+ *size = sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size;
+}
+
+static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, struct sk_buff *skb)
+{
+ dma_addr_t addr;
+ u32 size;
+ int i;
+
+ for (i = 0; i < MLX5E_TX_SKB_CB(skb)->num_dma; i++) {
+ mlx5e_dma_pop_last_pushed(sq, &addr, &size);
+ dma_unmap_single(sq->pdev, addr, size, DMA_TO_DEVICE);
+ }
+}
+
+static inline void mlx5e_dma_push(struct mlx5e_sq *sq, dma_addr_t addr,
+ u32 size)
+{
+ sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr = addr;
+ sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size = size;
+ sq->dma_fifo_pc++;
+}
+
+static inline void mlx5e_dma_get(struct mlx5e_sq *sq, u32 i, dma_addr_t *addr,
+ u32 *size)
+{
+ *addr = sq->dma_fifo[i & sq->dma_fifo_mask].addr;
+ *size = sq->dma_fifo[i & sq->dma_fifo_mask].size;
+}
+
+u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
+ void *accel_priv, select_queue_fallback_t fallback)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ int channel_ix = fallback(dev, skb);
+ int up = skb_vlan_tag_present(skb) ?
+ skb->vlan_tci >> VLAN_PRIO_SHIFT :
+ priv->default_vlan_prio;
+ int tc = netdev_get_prio_tc_map(dev, up);
+
+ return (tc << priv->order_base_2_num_channels) | channel_ix;
+}
+
+static inline u16 mlx5e_get_inline_hdr_size(struct mlx5e_sq *sq,
+ struct sk_buff *skb)
+{
+#define MLX5E_MIN_INLINE 16 /* eth header with vlan (w/o next ethertype) */
+ return MLX5E_MIN_INLINE;
+}
+
+static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs)
+{
+ struct vlan_ethhdr *vhdr = (struct vlan_ethhdr *)start;
+ int cpy1_sz = 2 * ETH_ALEN;
+ int cpy2_sz = ihs - cpy1_sz - VLAN_HLEN;
+
+ skb_copy_from_linear_data(skb, vhdr, cpy1_sz);
+ skb_pull_inline(skb, cpy1_sz);
+ vhdr->h_vlan_proto = skb->vlan_proto;
+ vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb));
+ skb_copy_from_linear_data(skb, &vhdr->h_vlan_encapsulated_proto,
+ cpy2_sz);
+ skb_pull_inline(skb, cpy2_sz);
+}
+
+static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
+{
+ struct mlx5_wq_cyc *wq = &sq->wq;
+
+ u16 pi = sq->pc & wq->sz_m1;
+ struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi);
+
+ struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
+ struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
+ struct mlx5_wqe_data_seg *dseg;
+
+ u8 opcode = MLX5_OPCODE_SEND;
+ dma_addr_t dma_addr = 0;
+ u16 headlen;
+ u16 ds_cnt;
+ u16 ihs;
+ int i;
+
+ memset(wqe, 0, sizeof(*wqe));
+
+ if (likely(skb->ip_summed == CHECKSUM_PARTIAL))
+ eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM;
+ else
+ sq->stats.csum_offload_none++;
+
+ if (skb_is_gso(skb)) {
+ u32 payload_len;
+ int num_pkts;
+
+ eseg->mss = cpu_to_be16(skb_shinfo(skb)->gso_size);
+ opcode = MLX5_OPCODE_LSO;
+ ihs = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ payload_len = skb->len - ihs;
+ num_pkts = (payload_len / skb_shinfo(skb)->gso_size) +
+ !!(payload_len % skb_shinfo(skb)->gso_size);
+ MLX5E_TX_SKB_CB(skb)->num_bytes = skb->len +
+ (num_pkts - 1) * ihs;
+ sq->stats.tso_packets++;
+ sq->stats.tso_bytes += payload_len;
+ } else {
+ ihs = mlx5e_get_inline_hdr_size(sq, skb);
+ MLX5E_TX_SKB_CB(skb)->num_bytes = max_t(unsigned int, skb->len,
+ ETH_ZLEN);
+ }
+
+ if (skb_vlan_tag_present(skb)) {
+ mlx5e_insert_vlan(eseg->inline_hdr_start, skb, ihs);
+ } else {
+ skb_copy_from_linear_data(skb, eseg->inline_hdr_start, ihs);
+ skb_pull_inline(skb, ihs);
+ }
+
+ eseg->inline_hdr_sz = cpu_to_be16(ihs);
+
+ ds_cnt = sizeof(*wqe) / MLX5_SEND_WQE_DS;
+ ds_cnt += DIV_ROUND_UP(ihs - sizeof(eseg->inline_hdr_start),
+ MLX5_SEND_WQE_DS);
+ dseg = (struct mlx5_wqe_data_seg *)cseg + ds_cnt;
+
+ MLX5E_TX_SKB_CB(skb)->num_dma = 0;
+
+ headlen = skb_headlen(skb);
+ if (headlen) {
+ dma_addr = dma_map_single(sq->pdev, skb->data, headlen,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
+ goto dma_unmap_wqe_err;
+
+ dseg->addr = cpu_to_be64(dma_addr);
+ dseg->lkey = sq->mkey_be;
+ dseg->byte_count = cpu_to_be32(headlen);
+
+ mlx5e_dma_push(sq, dma_addr, headlen);
+ MLX5E_TX_SKB_CB(skb)->num_dma++;
+
+ dseg++;
+ }
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
+ int fsz = skb_frag_size(frag);
+
+ dma_addr = skb_frag_dma_map(sq->pdev, frag, 0, fsz,
+ DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(sq->pdev, dma_addr)))
+ goto dma_unmap_wqe_err;
+
+ dseg->addr = cpu_to_be64(dma_addr);
+ dseg->lkey = sq->mkey_be;
+ dseg->byte_count = cpu_to_be32(fsz);
+
+ mlx5e_dma_push(sq, dma_addr, fsz);
+ MLX5E_TX_SKB_CB(skb)->num_dma++;
+
+ dseg++;
+ }
+
+ ds_cnt += MLX5E_TX_SKB_CB(skb)->num_dma;
+
+ cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | opcode);
+ cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
+ cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
+
+ sq->skb[pi] = skb;
+
+ MLX5E_TX_SKB_CB(skb)->num_wqebbs = DIV_ROUND_UP(ds_cnt,
+ MLX5_SEND_WQEBB_NUM_DS);
+ sq->pc += MLX5E_TX_SKB_CB(skb)->num_wqebbs;
+
+ netdev_tx_sent_queue(sq->txq, MLX5E_TX_SKB_CB(skb)->num_bytes);
+
+ if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5_SEND_WQE_MAX_WQEBBS))) {
+ netif_tx_stop_queue(sq->txq);
+ sq->stats.stopped++;
+ }
+
+ if (!skb->xmit_more || netif_xmit_stopped(sq->txq))
+ mlx5e_tx_notify_hw(sq, wqe);
+
+ sq->stats.packets++;
+ return NETDEV_TX_OK;
+
+dma_unmap_wqe_err:
+ sq->stats.dropped++;
+ mlx5e_dma_unmap_wqe_err(sq, skb);
+
+ dev_kfree_skb_any(skb);
+
+ return NETDEV_TX_OK;
+}
+
+netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ int ix = skb->queue_mapping;
+ int tc = 0;
+ struct mlx5e_channel *c = priv->channel[ix];
+ struct mlx5e_sq *sq = &c->sq[tc];
+
+ return mlx5e_sq_xmit(sq, skb);
+}
+
+netdev_tx_t mlx5e_xmit_multi_tc(struct sk_buff *skb, struct net_device *dev)
+{
+ struct mlx5e_priv *priv = netdev_priv(dev);
+ int ix = skb->queue_mapping & priv->queue_mapping_channel_mask;
+ int tc = skb->queue_mapping >> priv->order_base_2_num_channels;
+ struct mlx5e_channel *c = priv->channel[ix];
+ struct mlx5e_sq *sq = &c->sq[tc];
+
+ return mlx5e_sq_xmit(sq, skb);
+}
+
+bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq)
+{
+ struct mlx5e_sq *sq;
+ u32 dma_fifo_cc;
+ u32 nbytes;
+ u16 npkts;
+ u16 sqcc;
+ int i;
+
+ /* avoid accessing cq (dma coherent memory) if not needed */
+ if (!test_and_clear_bit(MLX5E_CQ_HAS_CQES, &cq->flags))
+ return false;
+
+ sq = cq->sqrq;
+
+ npkts = 0;
+ nbytes = 0;
+
+ /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
+ * otherwise a cq overrun may occur
+ */
+ sqcc = sq->cc;
+
+ /* avoid dirtying sq cache line every cqe */
+ dma_fifo_cc = sq->dma_fifo_cc;
+
+ for (i = 0; i < MLX5E_TX_CQ_POLL_BUDGET; i++) {
+ struct mlx5_cqe64 *cqe;
+ struct sk_buff *skb;
+ u16 ci;
+ int j;
+
+ cqe = mlx5e_get_cqe(cq);
+ if (!cqe)
+ break;
+
+ ci = sqcc & sq->wq.sz_m1;
+ skb = sq->skb[ci];
+
+ if (unlikely(!skb)) { /* nop */
+ sq->stats.nop++;
+ sqcc++;
+ goto free_skb;
+ }
+
+ for (j = 0; j < MLX5E_TX_SKB_CB(skb)->num_dma; j++) {
+ dma_addr_t addr;
+ u32 size;
+
+ mlx5e_dma_get(sq, dma_fifo_cc, &addr, &size);
+ dma_fifo_cc++;
+ dma_unmap_single(sq->pdev, addr, size, DMA_TO_DEVICE);
+ }
+
+ npkts++;
+ nbytes += MLX5E_TX_SKB_CB(skb)->num_bytes;
+ sqcc += MLX5E_TX_SKB_CB(skb)->num_wqebbs;
+
+free_skb:
+ dev_kfree_skb(skb);
+ }
+
+ mlx5_cqwq_update_db_record(&cq->wq);
+
+ /* ensure cq space is freed before enabling more cqes */
+ wmb();
+
+ sq->dma_fifo_cc = dma_fifo_cc;
+ sq->cc = sqcc;
+
+ netdev_tx_completed_queue(sq->txq, npkts, nbytes);
+
+ if (netif_tx_queue_stopped(sq->txq) &&
+ mlx5e_sq_has_room_for(sq, MLX5_SEND_WQE_MAX_WQEBBS) &&
+ likely(test_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state))) {
+ netif_tx_wake_queue(sq->txq);
+ sq->stats.wake++;
+ }
+ if (i == MLX5E_TX_CQ_POLL_BUDGET) {
+ set_bit(MLX5E_CQ_HAS_CQES, &cq->flags);
+ return true;
+ }
+
+ return false;
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
new file mode 100644
index 0000000..088bc42
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include "en.h"
+
+struct mlx5_cqe64 *mlx5e_get_cqe(struct mlx5e_cq *cq)
+{
+ struct mlx5_cqwq *wq = &cq->wq;
+ u32 ci = mlx5_cqwq_get_ci(wq);
+ struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci);
+ int cqe_ownership_bit = cqe->op_own & MLX5_CQE_OWNER_MASK;
+ int sw_ownership_val = mlx5_cqwq_get_wrap_cnt(wq) & 1;
+
+ if (cqe_ownership_bit != sw_ownership_val)
+ return NULL;
+
+ mlx5_cqwq_pop(wq);
+
+ /* ensure cqe content is read after cqe ownership bit */
+ rmb();
+
+ return cqe;
+}
+
+int mlx5e_napi_poll(struct napi_struct *napi, int budget)
+{
+ struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel,
+ napi);
+ bool busy = false;
+ int i;
+
+ clear_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags);
+
+ for (i = 0; i < c->num_tc; i++)
+ busy |= mlx5e_poll_tx_cq(&c->sq[i].cq);
+
+ busy |= mlx5e_poll_rx_cq(&c->rq.cq, budget);
+
+ busy |= mlx5e_post_rx_wqes(c->rq.cq.sqrq);
+
+ if (busy)
+ return budget;
+
+ napi_complete(napi);
+
+ /* avoid losing completion event during/after polling cqs */
+ if (test_bit(MLX5E_CHANNEL_NAPI_SCHED, &c->flags)) {
+ napi_schedule(napi);
+ return 0;
+ }
+
+ for (i = 0; i < c->num_tc; i++)
+ mlx5e_cq_arm(&c->sq[i].cq);
+ mlx5e_cq_arm(&c->rq.cq);
+
+ return 0;
+}
+
+void mlx5e_completion_event(struct mlx5_core_cq *mcq)
+{
+ struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
+
+ set_bit(MLX5E_CQ_HAS_CQES, &cq->flags);
+ set_bit(MLX5E_CHANNEL_NAPI_SCHED, &cq->channel->flags);
+ barrier();
+ napi_schedule(cq->napi);
+}
+
+void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event)
+{
+ struct mlx5e_cq *cq = container_of(mcq, struct mlx5e_cq, mcq);
+ struct mlx5e_channel *c = cq->channel;
+ struct mlx5e_priv *priv = c->priv;
+ struct net_device *netdev = priv->netdev;
+
+ netdev_err(netdev, "%s: cqn=0x%.6x event=0x%.2x\n",
+ __func__, mcq->cqn, event);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 58800e4..a40b96d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -339,15 +339,14 @@ static void init_eq_buf(struct mlx5_eq *eq)
int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
int nent, u64 mask, const char *name, struct mlx5_uar *uar)
{
- struct mlx5_eq_table *table = &dev->priv.eq_table;
+ struct mlx5_priv *priv = &dev->priv;
struct mlx5_create_eq_mbox_in *in;
struct mlx5_create_eq_mbox_out out;
int err;
int inlen;
eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE);
- err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, 2 * PAGE_SIZE,
- &eq->buf);
+ err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, &eq->buf);
if (err)
return err;
@@ -378,14 +377,15 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
goto err_in;
}
- snprintf(eq->name, MLX5_MAX_EQ_NAME, "%s@pci:%s",
+ snprintf(priv->irq_info[vecidx].name, MLX5_MAX_IRQ_NAME, "%s@pci:%s",
name, pci_name(dev->pdev));
+
eq->eqn = out.eq_number;
eq->irqn = vecidx;
eq->dev = dev;
eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET;
- err = request_irq(table->msix_arr[vecidx].vector, mlx5_msix_handler, 0,
- eq->name, eq);
+ err = request_irq(priv->msix_arr[vecidx].vector, mlx5_msix_handler, 0,
+ priv->irq_info[vecidx].name, eq);
if (err)
goto err_eq;
@@ -401,7 +401,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
return 0;
err_irq:
- free_irq(table->msix_arr[vecidx].vector, eq);
+ free_irq(priv->msix_arr[vecidx].vector, eq);
err_eq:
mlx5_cmd_destroy_eq(dev, eq->eqn);
@@ -417,16 +417,15 @@ EXPORT_SYMBOL_GPL(mlx5_create_map_eq);
int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
{
- struct mlx5_eq_table *table = &dev->priv.eq_table;
int err;
mlx5_debug_eq_remove(dev, eq);
- free_irq(table->msix_arr[eq->irqn].vector, eq);
+ free_irq(dev->priv.msix_arr[eq->irqn].vector, eq);
err = mlx5_cmd_destroy_eq(dev, eq->eqn);
if (err)
mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
eq->eqn);
- synchronize_irq(table->msix_arr[eq->irqn].vector);
+ synchronize_irq(dev->priv.msix_arr[eq->irqn].vector);
mlx5_buf_free(dev, &eq->buf);
return err;
@@ -456,7 +455,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
u32 async_event_mask = MLX5_ASYNC_EVENT_MASK;
int err;
- if (dev->caps.gen.flags & MLX5_DEV_CAP_FLAG_ON_DMND_PG)
+ if (MLX5_CAP_GEN(dev, pg))
async_event_mask |= (1ull << MLX5_EVENT_TYPE_PAGE_FAULT);
err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
@@ -479,7 +478,7 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
err = mlx5_create_map_eq(dev, &table->pages_eq,
MLX5_EQ_VEC_PAGES,
- dev->caps.gen.max_vf + 1,
+ /* TODO: sriov max_vf + */ 1,
1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq",
&dev->priv.uuari.uars[0]);
if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/flow_table.c b/drivers/net/ethernet/mellanox/mlx5/core/flow_table.c
new file mode 100644
index 0000000..ca90b9b
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/flow_table.c
@@ -0,0 +1,422 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include <linux/mlx5/driver.h>
+#include <linux/mlx5/flow_table.h>
+#include "mlx5_core.h"
+
+struct mlx5_ftg {
+ struct mlx5_flow_table_group g;
+ u32 id;
+ u32 start_ix;
+};
+
+struct mlx5_flow_table {
+ struct mlx5_core_dev *dev;
+ u8 level;
+ u8 type;
+ u32 id;
+ struct mutex mutex; /* sync bitmap alloc */
+ u16 num_groups;
+ struct mlx5_ftg *group;
+ unsigned long *bitmap;
+ u32 size;
+};
+
+static int mlx5_set_flow_entry_cmd(struct mlx5_flow_table *ft, u32 group_ix,
+ u32 flow_index, void *flow_context)
+{
+ u32 out[MLX5_ST_SZ_DW(set_fte_out)];
+ u32 *in;
+ void *in_flow_context;
+ int fcdls =
+ MLX5_GET(flow_context, flow_context, destination_list_size) *
+ MLX5_ST_SZ_BYTES(dest_format_struct);
+ int inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fcdls;
+ int err;
+
+ in = mlx5_vzalloc(inlen);
+ if (!in) {
+ mlx5_core_warn(ft->dev, "failed to allocate inbox\n");
+ return -ENOMEM;
+ }
+
+ MLX5_SET(set_fte_in, in, table_type, ft->type);
+ MLX5_SET(set_fte_in, in, table_id, ft->id);
+ MLX5_SET(set_fte_in, in, flow_index, flow_index);
+ MLX5_SET(set_fte_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY);
+
+ in_flow_context = MLX5_ADDR_OF(set_fte_in, in, flow_context);
+ memcpy(in_flow_context, flow_context,
+ MLX5_ST_SZ_BYTES(flow_context) + fcdls);
+
+ MLX5_SET(flow_context, in_flow_context, group_id,
+ ft->group[group_ix].id);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(ft->dev, in, inlen, out,
+ sizeof(out));
+ kvfree(in);
+
+ return err;
+}
+
+static void mlx5_del_flow_entry_cmd(struct mlx5_flow_table *ft, u32 flow_index)
+{
+ u32 in[MLX5_ST_SZ_DW(delete_fte_in)];
+ u32 out[MLX5_ST_SZ_DW(delete_fte_out)];
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+#define MLX5_SET_DFTEI(p, x, v) MLX5_SET(delete_fte_in, p, x, v)
+ MLX5_SET_DFTEI(in, table_type, ft->type);
+ MLX5_SET_DFTEI(in, table_id, ft->id);
+ MLX5_SET_DFTEI(in, flow_index, flow_index);
+ MLX5_SET_DFTEI(in, opcode, MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
+
+ mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out));
+}
+
+static void mlx5_destroy_flow_group_cmd(struct mlx5_flow_table *ft, int i)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_flow_group_in)];
+ u32 out[MLX5_ST_SZ_DW(destroy_flow_group_out)];
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+#define MLX5_SET_DFGI(p, x, v) MLX5_SET(destroy_flow_group_in, p, x, v)
+ MLX5_SET_DFGI(in, table_type, ft->type);
+ MLX5_SET_DFGI(in, table_id, ft->id);
+ MLX5_SET_DFGI(in, opcode, MLX5_CMD_OP_DESTROY_FLOW_GROUP);
+ MLX5_SET_DFGI(in, group_id, ft->group[i].id);
+ mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out));
+}
+
+static int mlx5_create_flow_group_cmd(struct mlx5_flow_table *ft, int i)
+{
+ u32 out[MLX5_ST_SZ_DW(create_flow_group_out)];
+ u32 *in;
+ void *in_match_criteria;
+ int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ struct mlx5_flow_table_group *g = &ft->group[i].g;
+ u32 start_ix = ft->group[i].start_ix;
+ u32 end_ix = start_ix + (1 << g->log_sz) - 1;
+ int err;
+
+ in = mlx5_vzalloc(inlen);
+ if (!in) {
+ mlx5_core_warn(ft->dev, "failed to allocate inbox\n");
+ return -ENOMEM;
+ }
+ in_match_criteria = MLX5_ADDR_OF(create_flow_group_in, in,
+ match_criteria);
+
+ memset(out, 0, sizeof(out));
+
+#define MLX5_SET_CFGI(p, x, v) MLX5_SET(create_flow_group_in, p, x, v)
+ MLX5_SET_CFGI(in, table_type, ft->type);
+ MLX5_SET_CFGI(in, table_id, ft->id);
+ MLX5_SET_CFGI(in, opcode, MLX5_CMD_OP_CREATE_FLOW_GROUP);
+ MLX5_SET_CFGI(in, start_flow_index, start_ix);
+ MLX5_SET_CFGI(in, end_flow_index, end_ix);
+ MLX5_SET_CFGI(in, match_criteria_enable, g->match_criteria_enable);
+
+ memcpy(in_match_criteria, g->match_criteria,
+ MLX5_ST_SZ_BYTES(fte_match_param));
+
+ err = mlx5_cmd_exec_check_status(ft->dev, in, inlen, out,
+ sizeof(out));
+ if (!err)
+ ft->group[i].id = MLX5_GET(create_flow_group_out, out,
+ group_id);
+
+ kvfree(in);
+
+ return err;
+}
+
+static void mlx5_destroy_flow_table_groups(struct mlx5_flow_table *ft)
+{
+ int i;
+
+ for (i = 0; i < ft->num_groups; i++)
+ mlx5_destroy_flow_group_cmd(ft, i);
+}
+
+static int mlx5_create_flow_table_groups(struct mlx5_flow_table *ft)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < ft->num_groups; i++) {
+ err = mlx5_create_flow_group_cmd(ft, i);
+ if (err)
+ goto err_destroy_flow_table_groups;
+ }
+
+ return 0;
+
+err_destroy_flow_table_groups:
+ for (i--; i >= 0; i--)
+ mlx5_destroy_flow_group_cmd(ft, i);
+
+ return err;
+}
+
+static int mlx5_create_flow_table_cmd(struct mlx5_flow_table *ft)
+{
+ u32 in[MLX5_ST_SZ_DW(create_flow_table_in)];
+ u32 out[MLX5_ST_SZ_DW(create_flow_table_out)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(create_flow_table_in, in, table_type, ft->type);
+ MLX5_SET(create_flow_table_in, in, level, ft->level);
+ MLX5_SET(create_flow_table_in, in, log_size, order_base_2(ft->size));
+
+ MLX5_SET(create_flow_table_in, in, opcode,
+ MLX5_CMD_OP_CREATE_FLOW_TABLE);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out,
+ sizeof(out));
+ if (err)
+ return err;
+
+ ft->id = MLX5_GET(create_flow_table_out, out, table_id);
+
+ return 0;
+}
+
+static void mlx5_destroy_flow_table_cmd(struct mlx5_flow_table *ft)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_flow_table_in)];
+ u32 out[MLX5_ST_SZ_DW(destroy_flow_table_out)];
+
+ memset(in, 0, sizeof(in));
+ memset(out, 0, sizeof(out));
+
+#define MLX5_SET_DFTI(p, x, v) MLX5_SET(destroy_flow_table_in, p, x, v)
+ MLX5_SET_DFTI(in, table_type, ft->type);
+ MLX5_SET_DFTI(in, table_id, ft->id);
+ MLX5_SET_DFTI(in, opcode, MLX5_CMD_OP_DESTROY_FLOW_TABLE);
+
+ mlx5_cmd_exec_check_status(ft->dev, in, sizeof(in), out, sizeof(out));
+}
+
+static int mlx5_find_group(struct mlx5_flow_table *ft, u8 match_criteria_enable,
+ u32 *match_criteria, int *group_ix)
+{
+ void *mc_outer = MLX5_ADDR_OF(fte_match_param, match_criteria,
+ outer_headers);
+ void *mc_misc = MLX5_ADDR_OF(fte_match_param, match_criteria,
+ misc_parameters);
+ void *mc_inner = MLX5_ADDR_OF(fte_match_param, match_criteria,
+ inner_headers);
+ int mc_outer_sz = MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4);
+ int mc_misc_sz = MLX5_ST_SZ_BYTES(fte_match_set_misc);
+ int mc_inner_sz = MLX5_ST_SZ_BYTES(fte_match_set_lyr_2_4);
+ int i;
+
+ for (i = 0; i < ft->num_groups; i++) {
+ struct mlx5_flow_table_group *g = &ft->group[i].g;
+ void *gmc_outer = MLX5_ADDR_OF(fte_match_param,
+ g->match_criteria,
+ outer_headers);
+ void *gmc_misc = MLX5_ADDR_OF(fte_match_param,
+ g->match_criteria,
+ misc_parameters);
+ void *gmc_inner = MLX5_ADDR_OF(fte_match_param,
+ g->match_criteria,
+ inner_headers);
+
+ if (g->match_criteria_enable != match_criteria_enable)
+ continue;
+
+ if (match_criteria_enable & MLX5_MATCH_OUTER_HEADERS)
+ if (memcmp(mc_outer, gmc_outer, mc_outer_sz))
+ continue;
+
+ if (match_criteria_enable & MLX5_MATCH_MISC_PARAMETERS)
+ if (memcmp(mc_misc, gmc_misc, mc_misc_sz))
+ continue;
+
+ if (match_criteria_enable & MLX5_MATCH_INNER_HEADERS)
+ if (memcmp(mc_inner, gmc_inner, mc_inner_sz))
+ continue;
+
+ *group_ix = i;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int alloc_flow_index(struct mlx5_flow_table *ft, int group_ix, u32 *ix)
+{
+ struct mlx5_ftg *g = &ft->group[group_ix];
+ int err = 0;
+
+ mutex_lock(&ft->mutex);
+
+ *ix = find_next_zero_bit(ft->bitmap, ft->size, g->start_ix);
+ if (*ix >= (g->start_ix + (1 << g->g.log_sz)))
+ err = -ENOSPC;
+ else
+ __set_bit(*ix, ft->bitmap);
+
+ mutex_unlock(&ft->mutex);
+
+ return err;
+}
+
+static void mlx5_free_flow_index(struct mlx5_flow_table *ft, u32 ix)
+{
+ __clear_bit(ix, ft->bitmap);
+}
+
+int mlx5_add_flow_table_entry(void *flow_table, u8 match_criteria_enable,
+ void *match_criteria, void *flow_context,
+ u32 *flow_index)
+{
+ struct mlx5_flow_table *ft = flow_table;
+ int group_ix;
+ int err;
+
+ err = mlx5_find_group(ft, match_criteria_enable, match_criteria,
+ &group_ix);
+ if (err) {
+ mlx5_core_warn(ft->dev, "mlx5_find_group failed\n");
+ return err;
+ }
+
+ err = alloc_flow_index(ft, group_ix, flow_index);
+ if (err) {
+ mlx5_core_warn(ft->dev, "alloc_flow_index failed\n");
+ return err;
+ }
+
+ return mlx5_set_flow_entry_cmd(ft, group_ix, *flow_index, flow_context);
+}
+EXPORT_SYMBOL(mlx5_add_flow_table_entry);
+
+void mlx5_del_flow_table_entry(void *flow_table, u32 flow_index)
+{
+ struct mlx5_flow_table *ft = flow_table;
+
+ mlx5_del_flow_entry_cmd(ft, flow_index);
+ mlx5_free_flow_index(ft, flow_index);
+}
+EXPORT_SYMBOL(mlx5_del_flow_table_entry);
+
+void *mlx5_create_flow_table(struct mlx5_core_dev *dev, u8 level, u8 table_type,
+ u16 num_groups,
+ struct mlx5_flow_table_group *group)
+{
+ struct mlx5_flow_table *ft;
+ u32 start_ix = 0;
+ u32 ft_size = 0;
+ void *gr;
+ void *bm;
+ int err;
+ int i;
+
+ for (i = 0; i < num_groups; i++)
+ ft_size += (1 << group[i].log_sz);
+
+ ft = kzalloc(sizeof(*ft), GFP_KERNEL);
+ gr = kcalloc(num_groups, sizeof(struct mlx5_ftg), GFP_KERNEL);
+ bm = kcalloc(BITS_TO_LONGS(ft_size), sizeof(uintptr_t), GFP_KERNEL);
+ if (!ft || !gr || !bm)
+ goto err_free_ft;
+
+ ft->group = gr;
+ ft->bitmap = bm;
+ ft->num_groups = num_groups;
+ ft->level = level;
+ ft->type = table_type;
+ ft->size = ft_size;
+ ft->dev = dev;
+ mutex_init(&ft->mutex);
+
+ for (i = 0; i < ft->num_groups; i++) {
+ memcpy(&ft->group[i].g, &group[i], sizeof(*group));
+ ft->group[i].start_ix = start_ix;
+ start_ix += 1 << group[i].log_sz;
+ }
+
+ err = mlx5_create_flow_table_cmd(ft);
+ if (err)
+ goto err_free_ft;
+
+ err = mlx5_create_flow_table_groups(ft);
+ if (err)
+ goto err_destroy_flow_table_cmd;
+
+ return ft;
+
+err_destroy_flow_table_cmd:
+ mlx5_destroy_flow_table_cmd(ft);
+
+err_free_ft:
+ mlx5_core_warn(dev, "failed to alloc flow table\n");
+ kfree(bm);
+ kfree(gr);
+ kfree(ft);
+
+ return NULL;
+}
+EXPORT_SYMBOL(mlx5_create_flow_table);
+
+void mlx5_destroy_flow_table(void *flow_table)
+{
+ struct mlx5_flow_table *ft = flow_table;
+
+ mlx5_destroy_flow_table_groups(ft);
+ mlx5_destroy_flow_table_cmd(ft);
+ kfree(ft->bitmap);
+ kfree(ft->group);
+ kfree(ft);
+}
+EXPORT_SYMBOL(mlx5_destroy_flow_table);
+
+u32 mlx5_get_flow_table_id(void *flow_table)
+{
+ struct mlx5_flow_table *ft = flow_table;
+
+ return ft->id;
+}
+EXPORT_SYMBOL(mlx5_get_flow_table_id);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 4b4cda3..801ccad 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -64,50 +64,74 @@ out_out:
return err;
}
-int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev, struct mlx5_caps *caps)
+int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
{
- return mlx5_core_get_caps(dev, caps, HCA_CAP_OPMOD_GET_CUR);
-}
-
-int mlx5_query_odp_caps(struct mlx5_core_dev *dev, struct mlx5_odp_caps *caps)
-{
- u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)];
- int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
- void *out;
int err;
- if (!(dev->caps.gen.flags & MLX5_DEV_CAP_FLAG_ON_DMND_PG))
- return -ENOTSUPP;
+ err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ return err;
- memset(in, 0, sizeof(in));
- out = kzalloc(out_sz, GFP_KERNEL);
- if (!out)
- return -ENOMEM;
- MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
- MLX5_SET(query_hca_cap_in, in, op_mod, HCA_CAP_OPMOD_GET_ODP_CUR);
- err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
+ err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_MAX);
if (err)
- goto out;
+ return err;
- err = mlx5_cmd_status_to_err_v2(out);
- if (err) {
- mlx5_core_warn(dev, "query cur hca ODP caps failed, %d\n", err);
- goto out;
+ if (MLX5_CAP_GEN(dev, eth_net_offloads)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS,
+ HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ return err;
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ETHERNET_OFFLOADS,
+ HCA_CAP_OPMOD_GET_MAX);
+ if (err)
+ return err;
}
- memcpy(caps, MLX5_ADDR_OF(query_hca_cap_out, out, capability_struct),
- sizeof(*caps));
+ if (MLX5_CAP_GEN(dev, pg)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ODP,
+ HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ return err;
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ODP,
+ HCA_CAP_OPMOD_GET_MAX);
+ if (err)
+ return err;
+ }
- mlx5_core_dbg(dev, "on-demand paging capabilities:\nrc: %08x\nuc: %08x\nud: %08x\n",
- be32_to_cpu(caps->per_transport_caps.rc_odp_caps),
- be32_to_cpu(caps->per_transport_caps.uc_odp_caps),
- be32_to_cpu(caps->per_transport_caps.ud_odp_caps));
+ if (MLX5_CAP_GEN(dev, atomic)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC,
+ HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ return err;
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC,
+ HCA_CAP_OPMOD_GET_MAX);
+ if (err)
+ return err;
+ }
-out:
- kfree(out);
- return err;
+ if (MLX5_CAP_GEN(dev, roce)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE,
+ HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ return err;
+ err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE,
+ HCA_CAP_OPMOD_GET_MAX);
+ if (err)
+ return err;
+ }
+
+ if (MLX5_CAP_GEN(dev, nic_flow_table)) {
+ err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE,
+ HCA_CAP_OPMOD_GET_CUR);
+ if (err)
+ return err;
+ err = mlx5_core_get_caps(dev, MLX5_CAP_FLOW_TABLE,
+ HCA_CAP_OPMOD_GET_MAX);
+ if (err)
+ return err;
+ }
+ return 0;
}
-EXPORT_SYMBOL(mlx5_query_odp_caps);
int mlx5_cmd_init_hca(struct mlx5_core_dev *dev)
{
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 28425e5..2510fed 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -38,6 +38,7 @@
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/io-mapping.h>
+#include <linux/interrupt.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/cq.h>
#include <linux/mlx5/qp.h>
@@ -47,10 +48,6 @@
#include <linux/mlx5/mlx5_ifc.h>
#include "mlx5_core.h"
-#define DRIVER_NAME "mlx5_core"
-#define DRIVER_VERSION "3.0"
-#define DRIVER_RELDATE "January 2015"
-
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver");
MODULE_LICENSE("Dual BSD/GPL");
@@ -208,24 +205,28 @@ static void release_bar(struct pci_dev *pdev)
static int mlx5_enable_msix(struct mlx5_core_dev *dev)
{
- struct mlx5_eq_table *table = &dev->priv.eq_table;
- int num_eqs = 1 << dev->caps.gen.log_max_eq;
+ struct mlx5_priv *priv = &dev->priv;
+ struct mlx5_eq_table *table = &priv->eq_table;
+ int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq);
int nvec;
int i;
- nvec = dev->caps.gen.num_ports * num_online_cpus() + MLX5_EQ_VEC_COMP_BASE;
+ nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
+ MLX5_EQ_VEC_COMP_BASE;
nvec = min_t(int, nvec, num_eqs);
if (nvec <= MLX5_EQ_VEC_COMP_BASE)
return -ENOMEM;
- table->msix_arr = kzalloc(nvec * sizeof(*table->msix_arr), GFP_KERNEL);
- if (!table->msix_arr)
- return -ENOMEM;
+ priv->msix_arr = kcalloc(nvec, sizeof(*priv->msix_arr), GFP_KERNEL);
+
+ priv->irq_info = kcalloc(nvec, sizeof(*priv->irq_info), GFP_KERNEL);
+ if (!priv->msix_arr || !priv->irq_info)
+ goto err_free_msix;
for (i = 0; i < nvec; i++)
- table->msix_arr[i].entry = i;
+ priv->msix_arr[i].entry = i;
- nvec = pci_enable_msix_range(dev->pdev, table->msix_arr,
+ nvec = pci_enable_msix_range(dev->pdev, priv->msix_arr,
MLX5_EQ_VEC_COMP_BASE + 1, nvec);
if (nvec < 0)
return nvec;
@@ -233,14 +234,20 @@ static int mlx5_enable_msix(struct mlx5_core_dev *dev)
table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
return 0;
+
+err_free_msix:
+ kfree(priv->irq_info);
+ kfree(priv->msix_arr);
+ return -ENOMEM;
}
static void mlx5_disable_msix(struct mlx5_core_dev *dev)
{
- struct mlx5_eq_table *table = &dev->priv.eq_table;
+ struct mlx5_priv *priv = &dev->priv;
pci_disable_msix(dev->pdev);
- kfree(table->msix_arr);
+ kfree(priv->irq_info);
+ kfree(priv->msix_arr);
}
struct mlx5_reg_host_endianess {
@@ -277,98 +284,28 @@ static u16 to_fw_pkey_sz(u32 size)
}
}
-/* selectively copy writable fields clearing any reserved area
- */
-static void copy_rw_fields(void *to, struct mlx5_caps *from)
+static u16 to_sw_pkey_sz(int pkey_sz)
{
- __be64 *flags_off = (__be64 *)MLX5_ADDR_OF(cmd_hca_cap, to, reserved_22);
- u64 v64;
-
- MLX5_SET(cmd_hca_cap, to, log_max_qp, from->gen.log_max_qp);
- MLX5_SET(cmd_hca_cap, to, log_max_ra_req_qp, from->gen.log_max_ra_req_qp);
- MLX5_SET(cmd_hca_cap, to, log_max_ra_res_qp, from->gen.log_max_ra_res_qp);
- MLX5_SET(cmd_hca_cap, to, pkey_table_size, from->gen.pkey_table_size);
- MLX5_SET(cmd_hca_cap, to, pkey_table_size, to_fw_pkey_sz(from->gen.pkey_table_size));
- MLX5_SET(cmd_hca_cap, to, log_uar_page_sz, PAGE_SHIFT - 12);
- v64 = from->gen.flags & MLX5_CAP_BITS_RW_MASK;
- *flags_off = cpu_to_be64(v64);
-}
-
-static u16 get_pkey_table_size(int pkey)
-{
- if (pkey > MLX5_MAX_LOG_PKEY_TABLE)
+ if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE)
return 0;
- return MLX5_MIN_PKEY_TABLE_SIZE << pkey;
+ return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz;
}
-static void fw2drv_caps(struct mlx5_caps *caps, void *out)
-{
- struct mlx5_general_caps *gen = &caps->gen;
-
- gen->max_srq_wqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_srq_sz);
- gen->max_wqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_qp_sz);
- gen->log_max_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_qp);
- gen->log_max_strq = MLX5_GET_PR(cmd_hca_cap, out, log_max_strq_sz);
- gen->log_max_srq = MLX5_GET_PR(cmd_hca_cap, out, log_max_srqs);
- gen->max_cqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_cq_sz);
- gen->log_max_cq = MLX5_GET_PR(cmd_hca_cap, out, log_max_cq);
- gen->max_eqes = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_max_eq_sz);
- gen->log_max_mkey = MLX5_GET_PR(cmd_hca_cap, out, log_max_mkey);
- gen->log_max_eq = MLX5_GET_PR(cmd_hca_cap, out, log_max_eq);
- gen->max_indirection = MLX5_GET_PR(cmd_hca_cap, out, max_indirection);
- gen->log_max_mrw_sz = MLX5_GET_PR(cmd_hca_cap, out, log_max_mrw_sz);
- gen->log_max_bsf_list_size = MLX5_GET_PR(cmd_hca_cap, out, log_max_bsf_list_size);
- gen->log_max_klm_list_size = MLX5_GET_PR(cmd_hca_cap, out, log_max_klm_list_size);
- gen->log_max_ra_req_dc = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_req_dc);
- gen->log_max_ra_res_dc = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_res_dc);
- gen->log_max_ra_req_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_req_qp);
- gen->log_max_ra_res_qp = MLX5_GET_PR(cmd_hca_cap, out, log_max_ra_res_qp);
- gen->max_qp_counters = MLX5_GET_PR(cmd_hca_cap, out, max_qp_cnt);
- gen->pkey_table_size = get_pkey_table_size(MLX5_GET_PR(cmd_hca_cap, out, pkey_table_size));
- gen->local_ca_ack_delay = MLX5_GET_PR(cmd_hca_cap, out, local_ca_ack_delay);
- gen->num_ports = MLX5_GET_PR(cmd_hca_cap, out, num_ports);
- gen->log_max_msg = MLX5_GET_PR(cmd_hca_cap, out, log_max_msg);
- gen->stat_rate_support = MLX5_GET_PR(cmd_hca_cap, out, stat_rate_support);
- gen->flags = be64_to_cpu(*(__be64 *)MLX5_ADDR_OF(cmd_hca_cap, out, reserved_22));
- pr_debug("flags = 0x%llx\n", gen->flags);
- gen->uar_sz = MLX5_GET_PR(cmd_hca_cap, out, uar_sz);
- gen->min_log_pg_sz = MLX5_GET_PR(cmd_hca_cap, out, log_pg_sz);
- gen->bf_reg_size = MLX5_GET_PR(cmd_hca_cap, out, bf);
- gen->bf_reg_size = 1 << MLX5_GET_PR(cmd_hca_cap, out, log_bf_reg_size);
- gen->max_sq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_sq);
- gen->max_rq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_rq);
- gen->max_dc_sq_desc_sz = MLX5_GET_PR(cmd_hca_cap, out, max_wqe_sz_sq_dc);
- gen->max_qp_mcg = MLX5_GET_PR(cmd_hca_cap, out, max_qp_mcg);
- gen->log_max_pd = MLX5_GET_PR(cmd_hca_cap, out, log_max_pd);
- gen->log_max_xrcd = MLX5_GET_PR(cmd_hca_cap, out, log_max_xrcd);
- gen->log_uar_page_sz = MLX5_GET_PR(cmd_hca_cap, out, log_uar_page_sz);
-}
-
-static const char *caps_opmod_str(u16 opmod)
-{
- switch (opmod) {
- case HCA_CAP_OPMOD_GET_MAX:
- return "GET_MAX";
- case HCA_CAP_OPMOD_GET_CUR:
- return "GET_CUR";
- default:
- return "Invalid";
- }
-}
-
-int mlx5_core_get_caps(struct mlx5_core_dev *dev, struct mlx5_caps *caps,
- u16 opmod)
+int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type,
+ enum mlx5_cap_mode cap_mode)
{
u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)];
int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
- void *out;
+ void *out, *hca_caps;
+ u16 opmod = (cap_type << 1) | (cap_mode & 0x01);
int err;
memset(in, 0, sizeof(in));
out = kzalloc(out_sz, GFP_KERNEL);
if (!out)
return -ENOMEM;
+
MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
@@ -377,12 +314,30 @@ int mlx5_core_get_caps(struct mlx5_core_dev *dev, struct mlx5_caps *caps,
err = mlx5_cmd_status_to_err_v2(out);
if (err) {
- mlx5_core_warn(dev, "query max hca cap failed, %d\n", err);
+ mlx5_core_warn(dev,
+ "QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n",
+ cap_type, cap_mode, err);
goto query_ex;
}
- mlx5_core_dbg(dev, "%s\n", caps_opmod_str(opmod));
- fw2drv_caps(caps, MLX5_ADDR_OF(query_hca_cap_out, out, capability_struct));
+ hca_caps = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
+
+ switch (cap_mode) {
+ case HCA_CAP_OPMOD_GET_MAX:
+ memcpy(dev->hca_caps_max[cap_type], hca_caps,
+ MLX5_UN_SZ_BYTES(hca_cap_union));
+ break;
+ case HCA_CAP_OPMOD_GET_CUR:
+ memcpy(dev->hca_caps_cur[cap_type], hca_caps,
+ MLX5_UN_SZ_BYTES(hca_cap_union));
+ break;
+ default:
+ mlx5_core_warn(dev,
+ "Tried to query dev cap type(%x) with wrong opmode(%x)\n",
+ cap_type, cap_mode);
+ err = -EINVAL;
+ break;
+ }
query_ex:
kfree(out);
return err;
@@ -409,49 +364,45 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
{
void *set_ctx = NULL;
struct mlx5_profile *prof = dev->profile;
- struct mlx5_caps *cur_caps = NULL;
- struct mlx5_caps *max_caps = NULL;
int err = -ENOMEM;
int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
+ void *set_hca_cap;
set_ctx = kzalloc(set_sz, GFP_KERNEL);
if (!set_ctx)
goto query_ex;
- max_caps = kzalloc(sizeof(*max_caps), GFP_KERNEL);
- if (!max_caps)
- goto query_ex;
-
- cur_caps = kzalloc(sizeof(*cur_caps), GFP_KERNEL);
- if (!cur_caps)
- goto query_ex;
-
- err = mlx5_core_get_caps(dev, max_caps, HCA_CAP_OPMOD_GET_MAX);
+ err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_MAX);
if (err)
goto query_ex;
- err = mlx5_core_get_caps(dev, cur_caps, HCA_CAP_OPMOD_GET_CUR);
+ err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_CUR);
if (err)
goto query_ex;
+ set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx,
+ capability);
+ memcpy(set_hca_cap, dev->hca_caps_cur[MLX5_CAP_GENERAL],
+ MLX5_ST_SZ_BYTES(cmd_hca_cap));
+
+ mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n",
+ to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size)),
+ 128);
/* we limit the size of the pkey table to 128 entries for now */
- cur_caps->gen.pkey_table_size = 128;
+ MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size,
+ to_fw_pkey_sz(128));
if (prof->mask & MLX5_PROF_MASK_QP_SIZE)
- cur_caps->gen.log_max_qp = prof->log_max_qp;
+ MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp,
+ prof->log_max_qp);
- /* disable checksum */
- cur_caps->gen.flags &= ~MLX5_DEV_CAP_FLAG_CMDIF_CSUM;
+ /* disable cmdif checksum */
+ MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0);
- copy_rw_fields(MLX5_ADDR_OF(set_hca_cap_in, set_ctx, hca_capability_struct),
- cur_caps);
err = set_caps(dev, set_ctx, set_sz);
query_ex:
- kfree(cur_caps);
- kfree(max_caps);
kfree(set_ctx);
-
return err;
}
@@ -507,6 +458,74 @@ static int mlx5_core_disable_hca(struct mlx5_core_dev *dev)
return 0;
}
+static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
+{
+ struct mlx5_priv *priv = &mdev->priv;
+ struct msix_entry *msix = priv->msix_arr;
+ int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector;
+ int numa_node = dev_to_node(&mdev->pdev->dev);
+ int err;
+
+ if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
+ mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
+ return -ENOMEM;
+ }
+
+ cpumask_set_cpu(cpumask_local_spread(i, numa_node),
+ priv->irq_info[i].mask);
+
+ err = irq_set_affinity_hint(irq, priv->irq_info[i].mask);
+ if (err) {
+ mlx5_core_warn(mdev, "irq_set_affinity_hint failed,irq 0x%.4x",
+ irq);
+ goto err_clear_mask;
+ }
+
+ return 0;
+
+err_clear_mask:
+ free_cpumask_var(priv->irq_info[i].mask);
+ return err;
+}
+
+static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i)
+{
+ struct mlx5_priv *priv = &mdev->priv;
+ struct msix_entry *msix = priv->msix_arr;
+ int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector;
+
+ irq_set_affinity_hint(irq, NULL);
+ free_cpumask_var(priv->irq_info[i].mask);
+}
+
+static int mlx5_irq_set_affinity_hints(struct mlx5_core_dev *mdev)
+{
+ int err;
+ int i;
+
+ for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++) {
+ err = mlx5_irq_set_affinity_hint(mdev, i);
+ if (err)
+ goto err_out;
+ }
+
+ return 0;
+
+err_out:
+ for (i--; i >= 0; i--)
+ mlx5_irq_clear_affinity_hint(mdev, i);
+
+ return err;
+}
+
+static void mlx5_irq_clear_affinity_hints(struct mlx5_core_dev *mdev)
+{
+ int i;
+
+ for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++)
+ mlx5_irq_clear_affinity_hint(mdev, i);
+}
+
int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, int *irqn)
{
struct mlx5_eq_table *table = &dev->priv.eq_table;
@@ -549,7 +568,7 @@ static void free_comp_eqs(struct mlx5_core_dev *dev)
static int alloc_comp_eqs(struct mlx5_core_dev *dev)
{
struct mlx5_eq_table *table = &dev->priv.eq_table;
- char name[MLX5_MAX_EQ_NAME];
+ char name[MLX5_MAX_IRQ_NAME];
struct mlx5_eq *eq;
int ncomp_vec;
int nent;
@@ -566,7 +585,7 @@ static int alloc_comp_eqs(struct mlx5_core_dev *dev)
goto clean;
}
- snprintf(name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i);
+ snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i);
err = mlx5_create_map_eq(dev, eq,
i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
name, &dev->priv.uuari.uars[0]);
@@ -588,6 +607,61 @@ clean:
return err;
}
+#ifdef CONFIG_MLX5_CORE_EN
+static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
+{
+ u32 query_in[MLX5_ST_SZ_DW(query_issi_in)];
+ u32 query_out[MLX5_ST_SZ_DW(query_issi_out)];
+ u32 set_in[MLX5_ST_SZ_DW(set_issi_in)];
+ u32 set_out[MLX5_ST_SZ_DW(set_issi_out)];
+ int err;
+ u32 sup_issi;
+
+ memset(query_in, 0, sizeof(query_in));
+ memset(query_out, 0, sizeof(query_out));
+
+ MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI);
+
+ err = mlx5_cmd_exec_check_status(dev, query_in, sizeof(query_in),
+ query_out, sizeof(query_out));
+ if (err) {
+ if (((struct mlx5_outbox_hdr *)query_out)->status ==
+ MLX5_CMD_STAT_BAD_OP_ERR) {
+ pr_debug("Only ISSI 0 is supported\n");
+ return 0;
+ }
+
+ pr_err("failed to query ISSI\n");
+ return err;
+ }
+
+ sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0);
+
+ if (sup_issi & (1 << 1)) {
+ memset(set_in, 0, sizeof(set_in));
+ memset(set_out, 0, sizeof(set_out));
+
+ MLX5_SET(set_issi_in, set_in, opcode, MLX5_CMD_OP_SET_ISSI);
+ MLX5_SET(set_issi_in, set_in, current_issi, 1);
+
+ err = mlx5_cmd_exec_check_status(dev, set_in, sizeof(set_in),
+ set_out, sizeof(set_out));
+ if (err) {
+ pr_err("failed to set ISSI=1\n");
+ return err;
+ }
+
+ dev->issi = 1;
+
+ return 0;
+ } else if (sup_issi & (1 << 0)) {
+ return 0;
+ }
+
+ return -ENOTSUPP;
+}
+#endif
+
static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
{
struct mlx5_priv *priv = &dev->priv;
@@ -650,6 +724,14 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
goto err_pagealloc_cleanup;
}
+#ifdef CONFIG_MLX5_CORE_EN
+ err = mlx5_core_set_issi(dev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to set issi\n");
+ goto err_disable_hca;
+ }
+#endif
+
err = mlx5_satisfy_startup_pages(dev, 1);
if (err) {
dev_err(&pdev->dev, "failed to allocate boot pages\n");
@@ -688,7 +770,7 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
mlx5_start_health_poll(dev);
- err = mlx5_cmd_query_hca_cap(dev, &dev->caps);
+ err = mlx5_query_hca_caps(dev);
if (err) {
dev_err(&pdev->dev, "query hca failed\n");
goto err_stop_poll;
@@ -730,6 +812,12 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
goto err_stop_eqs;
}
+ err = mlx5_irq_set_affinity_hints(dev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n");
+ goto err_free_comp_eqs;
+ }
+
MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock);
mlx5_init_cq_table(dev);
@@ -739,6 +827,9 @@ static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
return 0;
+err_free_comp_eqs:
+ free_comp_eqs(dev);
+
err_stop_eqs:
mlx5_stop_eqs(dev);
@@ -793,6 +884,7 @@ static void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
mlx5_cleanup_srq_table(dev);
mlx5_cleanup_qp_table(dev);
mlx5_cleanup_cq_table(dev);
+ mlx5_irq_clear_affinity_hints(dev);
free_comp_eqs(dev);
mlx5_stop_eqs(dev);
mlx5_free_uuars(dev, &priv->uuari);
@@ -1048,6 +1140,10 @@ static int __init init(void)
if (err)
goto err_health;
+#ifdef CONFIG_MLX5_CORE_EN
+ mlx5e_init();
+#endif
+
return 0;
err_health:
@@ -1060,6 +1156,9 @@ err_debug:
static void __exit cleanup(void)
{
+#ifdef CONFIG_MLX5_CORE_EN
+ mlx5e_cleanup();
+#endif
pci_unregister_driver(&mlx5_core_driver);
mlx5_health_cleanup();
destroy_workqueue(mlx5_core_wq);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mcg.c b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c
index d79fd85..d5a0c2d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mcg.c
@@ -91,7 +91,7 @@ int mlx5_core_detach_mcg(struct mlx5_core_dev *dev, union ib_gid *mgid, u32 qpn)
memset(&in, 0, sizeof(in));
memset(&out, 0, sizeof(out));
- in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DETACH_FROM_MCG);
+ in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DETTACH_FROM_MCG);
memcpy(in.gid, mgid, sizeof(*mgid));
in.qpn = cpu_to_be32(qpn);
err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index a051b90..6983c10 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -37,6 +37,10 @@
#include <linux/kernel.h>
#include <linux/sched.h>
+#define DRIVER_NAME "mlx5_core"
+#define DRIVER_VERSION "3.0-1"
+#define DRIVER_RELDATE "January 2015"
+
extern int mlx5_core_debug_mask;
#define mlx5_core_dbg(dev, format, ...) \
@@ -65,11 +69,20 @@ enum {
MLX5_CMD_TIME, /* print command execution time */
};
+static inline int mlx5_cmd_exec_check_status(struct mlx5_core_dev *dev, u32 *in,
+ int in_size, u32 *out,
+ int out_size)
+{
+ mlx5_cmd_exec(dev, in, in_size, out, out_size);
+ return mlx5_cmd_status_to_err((struct mlx5_outbox_hdr *)out);
+}
-int mlx5_cmd_query_hca_cap(struct mlx5_core_dev *dev,
- struct mlx5_caps *caps);
+int mlx5_query_hca_caps(struct mlx5_core_dev *dev);
int mlx5_cmd_query_adapter(struct mlx5_core_dev *dev);
int mlx5_cmd_init_hca(struct mlx5_core_dev *dev);
int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev);
+void mlx5e_init(void);
+void mlx5e_cleanup(void);
+
#endif /* __MLX5_CORE_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index 49e90f2..7d3d0f9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -102,3 +102,165 @@ int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps)
return err;
}
EXPORT_SYMBOL_GPL(mlx5_set_port_caps);
+
+int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys,
+ int ptys_size, int proto_mask)
+{
+ u32 in[MLX5_ST_SZ_DW(ptys_reg)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+ MLX5_SET(ptys_reg, in, local_port, 1);
+ MLX5_SET(ptys_reg, in, proto_mask, proto_mask);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), ptys,
+ ptys_size, MLX5_REG_PTYS, 0, 0);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_ptys);
+
+int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev,
+ u32 *proto_cap, int proto_mask)
+{
+ u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+ int err;
+
+ err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask);
+ if (err)
+ return err;
+
+ if (proto_mask == MLX5_PTYS_EN)
+ *proto_cap = MLX5_GET(ptys_reg, out, eth_proto_capability);
+ else
+ *proto_cap = MLX5_GET(ptys_reg, out, ib_proto_capability);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_proto_cap);
+
+int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev,
+ u32 *proto_admin, int proto_mask)
+{
+ u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+ int err;
+
+ err = mlx5_query_port_ptys(dev, out, sizeof(out), proto_mask);
+ if (err)
+ return err;
+
+ if (proto_mask == MLX5_PTYS_EN)
+ *proto_admin = MLX5_GET(ptys_reg, out, eth_proto_admin);
+ else
+ *proto_admin = MLX5_GET(ptys_reg, out, ib_proto_admin);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_proto_admin);
+
+int mlx5_set_port_proto(struct mlx5_core_dev *dev, u32 proto_admin,
+ int proto_mask)
+{
+ u32 in[MLX5_ST_SZ_DW(ptys_reg)];
+ u32 out[MLX5_ST_SZ_DW(ptys_reg)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(ptys_reg, in, local_port, 1);
+ MLX5_SET(ptys_reg, in, proto_mask, proto_mask);
+ if (proto_mask == MLX5_PTYS_EN)
+ MLX5_SET(ptys_reg, in, eth_proto_admin, proto_admin);
+ else
+ MLX5_SET(ptys_reg, in, ib_proto_admin, proto_admin);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PTYS, 0, 1);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_set_port_proto);
+
+int mlx5_set_port_status(struct mlx5_core_dev *dev,
+ enum mlx5_port_status status)
+{
+ u32 in[MLX5_ST_SZ_DW(paos_reg)];
+ u32 out[MLX5_ST_SZ_DW(paos_reg)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(paos_reg, in, admin_status, status);
+ MLX5_SET(paos_reg, in, ase, 1);
+
+ return mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PAOS, 0, 1);
+}
+
+int mlx5_query_port_status(struct mlx5_core_dev *dev, u8 *status)
+{
+ u32 in[MLX5_ST_SZ_DW(paos_reg)];
+ u32 out[MLX5_ST_SZ_DW(paos_reg)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PAOS, 0, 0);
+ if (err)
+ return err;
+
+ *status = MLX5_GET(paos_reg, out, oper_status);
+ return err;
+}
+
+static int mlx5_query_port_mtu(struct mlx5_core_dev *dev,
+ int *admin_mtu, int *max_mtu, int *oper_mtu)
+{
+ u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
+ u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(pmtu_reg, in, local_port, 1);
+
+ err = mlx5_core_access_reg(dev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_PMTU, 0, 0);
+ if (err)
+ return err;
+
+ if (max_mtu)
+ *max_mtu = MLX5_GET(pmtu_reg, out, max_mtu);
+ if (oper_mtu)
+ *oper_mtu = MLX5_GET(pmtu_reg, out, oper_mtu);
+ if (admin_mtu)
+ *admin_mtu = MLX5_GET(pmtu_reg, out, admin_mtu);
+
+ return 0;
+}
+
+int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu)
+{
+ u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
+ u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(pmtu_reg, in, admin_mtu, mtu);
+ MLX5_SET(pmtu_reg, in, local_port, 1);
+
+ return mlx5_core_access_reg(dev, in, sizeof(in), out, sizeof(out),
+ MLX5_REG_PMTU, 0, 1);
+}
+EXPORT_SYMBOL_GPL(mlx5_set_port_mtu);
+
+int mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu)
+{
+ return mlx5_query_port_mtu(dev, NULL, max_mtu, NULL);
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_max_mtu);
+
+int mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu)
+{
+ return mlx5_query_port_mtu(dev, NULL, NULL, oper_mtu);
+}
+EXPORT_SYMBOL_GPL(mlx5_query_port_oper_mtu);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
new file mode 100644
index 0000000..3c555d7
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mlx5/driver.h>
+#include "mlx5_core.h"
+#include "transobj.h"
+
+int mlx5_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn)
+{
+ u32 out[MLX5_ST_SZ_DW(create_rq_out)];
+ int err;
+
+ MLX5_SET(create_rq_in, in, opcode, MLX5_CMD_OP_CREATE_RQ);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ if (!err)
+ *rqn = MLX5_GET(create_rq_out, out, rqn);
+
+ return err;
+}
+
+int mlx5_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen)
+{
+ u32 out[MLX5_ST_SZ_DW(modify_rq_out)];
+
+ MLX5_SET(modify_rq_in, in, rqn, rqn);
+ MLX5_SET(modify_rq_in, in, opcode, MLX5_CMD_OP_MODIFY_RQ);
+
+ memset(out, 0, sizeof(out));
+ return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+}
+
+void mlx5_destroy_rq(struct mlx5_core_dev *dev, u32 rqn)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_rq_in)];
+ u32 out[MLX5_ST_SZ_DW(destroy_rq_out)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(destroy_rq_in, in, opcode, MLX5_CMD_OP_DESTROY_RQ);
+ MLX5_SET(destroy_rq_in, in, rqn, rqn);
+
+ mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn)
+{
+ u32 out[MLX5_ST_SZ_DW(create_sq_out)];
+ int err;
+
+ MLX5_SET(create_sq_in, in, opcode, MLX5_CMD_OP_CREATE_SQ);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ if (!err)
+ *sqn = MLX5_GET(create_sq_out, out, sqn);
+
+ return err;
+}
+
+int mlx5_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen)
+{
+ u32 out[MLX5_ST_SZ_DW(modify_sq_out)];
+
+ MLX5_SET(modify_sq_in, in, sqn, sqn);
+ MLX5_SET(modify_sq_in, in, opcode, MLX5_CMD_OP_MODIFY_SQ);
+
+ memset(out, 0, sizeof(out));
+ return mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+}
+
+void mlx5_destroy_sq(struct mlx5_core_dev *dev, u32 sqn)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_sq_in)];
+ u32 out[MLX5_ST_SZ_DW(destroy_sq_out)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(destroy_sq_in, in, opcode, MLX5_CMD_OP_DESTROY_SQ);
+ MLX5_SET(destroy_sq_in, in, sqn, sqn);
+
+ mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *tirn)
+{
+ u32 out[MLX5_ST_SZ_DW(create_tir_out)];
+ int err;
+
+ MLX5_SET(create_tir_in, in, opcode, MLX5_CMD_OP_CREATE_TIR);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ if (!err)
+ *tirn = MLX5_GET(create_tir_out, out, tirn);
+
+ return err;
+}
+
+void mlx5_destroy_tir(struct mlx5_core_dev *dev, u32 tirn)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_tir_out)];
+ u32 out[MLX5_ST_SZ_DW(destroy_tir_out)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(destroy_tir_in, in, opcode, MLX5_CMD_OP_DESTROY_TIR);
+ MLX5_SET(destroy_tir_in, in, tirn, tirn);
+
+ mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+}
+
+int mlx5_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *tisn)
+{
+ u32 out[MLX5_ST_SZ_DW(create_tis_out)];
+ int err;
+
+ MLX5_SET(create_tis_in, in, opcode, MLX5_CMD_OP_CREATE_TIS);
+
+ memset(out, 0, sizeof(out));
+ err = mlx5_cmd_exec_check_status(dev, in, inlen, out, sizeof(out));
+ if (!err)
+ *tisn = MLX5_GET(create_tis_out, out, tisn);
+
+ return err;
+}
+
+void mlx5_destroy_tis(struct mlx5_core_dev *dev, u32 tisn)
+{
+ u32 in[MLX5_ST_SZ_DW(destroy_tis_out)];
+ u32 out[MLX5_ST_SZ_DW(destroy_tis_out)];
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(destroy_tis_in, in, opcode, MLX5_CMD_OP_DESTROY_TIS);
+ MLX5_SET(destroy_tis_in, in, tisn, tisn);
+
+ mlx5_cmd_exec_check_status(dev, in, sizeof(in), out, sizeof(out));
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.h b/drivers/net/ethernet/mellanox/mlx5/core/transobj.h
new file mode 100644
index 0000000..1bc898c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __TRANSOBJ_H__
+#define __TRANSOBJ_H__
+
+int mlx5_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqn);
+int mlx5_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen);
+void mlx5_destroy_rq(struct mlx5_core_dev *dev, u32 rqn);
+int mlx5_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *sqn);
+int mlx5_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen);
+void mlx5_destroy_sq(struct mlx5_core_dev *dev, u32 sqn);
+int mlx5_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *tirn);
+void mlx5_destroy_tir(struct mlx5_core_dev *dev, u32 tirn);
+int mlx5_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *tisn);
+void mlx5_destroy_tis(struct mlx5_core_dev *dev, u32 tisn);
+
+#endif /* __TRANSOBJ_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/uar.c b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
index 5a89bb1..9ef8587 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/uar.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/uar.c
@@ -175,12 +175,13 @@ int mlx5_alloc_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari)
for (i = 0; i < tot_uuars; i++) {
bf = &uuari->bfs[i];
- bf->buf_size = dev->caps.gen.bf_reg_size / 2;
+ bf->buf_size = (1 << MLX5_CAP_GEN(dev, log_bf_reg_size)) / 2;
bf->uar = &uuari->uars[i / MLX5_BF_REGS_PER_PAGE];
bf->regreg = uuari->uars[i / MLX5_BF_REGS_PER_PAGE].map;
bf->reg = NULL; /* Add WC support */
- bf->offset = (i % MLX5_BF_REGS_PER_PAGE) * dev->caps.gen.bf_reg_size +
- MLX5_BF_OFFSET;
+ bf->offset = (i % MLX5_BF_REGS_PER_PAGE) *
+ (1 << MLX5_CAP_GEN(dev, log_bf_reg_size)) +
+ MLX5_BF_OFFSET;
bf->need_lock = need_uuar_lock(i);
spin_lock_init(&bf->lock);
spin_lock_init(&bf->lock32);
@@ -223,3 +224,40 @@ int mlx5_free_uuars(struct mlx5_core_dev *dev, struct mlx5_uuar_info *uuari)
return 0;
}
+
+int mlx5_alloc_map_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
+{
+ phys_addr_t pfn;
+ phys_addr_t uar_bar_start;
+ int err;
+
+ err = mlx5_cmd_alloc_uar(mdev, &uar->index);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_cmd_alloc_uar() failed, %d\n", err);
+ return err;
+ }
+
+ uar_bar_start = pci_resource_start(mdev->pdev, 0);
+ pfn = (uar_bar_start >> PAGE_SHIFT) + uar->index;
+ uar->map = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
+ if (!uar->map) {
+ mlx5_core_warn(mdev, "ioremap() failed, %d\n", err);
+ err = -ENOMEM;
+ goto err_free_uar;
+ }
+
+ return 0;
+
+err_free_uar:
+ mlx5_cmd_free_uar(mdev, uar->index);
+
+ return err;
+}
+EXPORT_SYMBOL(mlx5_alloc_map_uar);
+
+void mlx5_unmap_free_uar(struct mlx5_core_dev *mdev, struct mlx5_uar *uar)
+{
+ iounmap(uar->map);
+ mlx5_cmd_free_uar(mdev, uar->index);
+}
+EXPORT_SYMBOL(mlx5_unmap_free_uar);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
new file mode 100644
index 0000000..ba374b9
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/export.h>
+#include <linux/etherdevice.h>
+#include <linux/mlx5/driver.h>
+#include "vport.h"
+#include "mlx5_core.h"
+
+u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod)
+{
+ u32 in[MLX5_ST_SZ_DW(query_vport_state_in)];
+ u32 out[MLX5_ST_SZ_DW(query_vport_state_out)];
+ int err;
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(query_vport_state_in, in, opcode,
+ MLX5_CMD_OP_QUERY_VPORT_STATE);
+ MLX5_SET(query_vport_state_in, in, op_mod, opmod);
+
+ err = mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out,
+ sizeof(out));
+ if (err)
+ mlx5_core_warn(mdev, "MLX5_CMD_OP_QUERY_VPORT_STATE failed\n");
+
+ return MLX5_GET(query_vport_state_out, out, state);
+}
+
+void mlx5_query_vport_mac_address(struct mlx5_core_dev *mdev, u8 *addr)
+{
+ u32 in[MLX5_ST_SZ_DW(query_nic_vport_context_in)];
+ u32 *out;
+ int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
+ u8 *out_addr;
+
+ out = mlx5_vzalloc(outlen);
+ if (!out)
+ return;
+
+ out_addr = MLX5_ADDR_OF(query_nic_vport_context_out, out,
+ nic_vport_context.permanent_address);
+
+ memset(in, 0, sizeof(in));
+
+ MLX5_SET(query_nic_vport_context_in, in, opcode,
+ MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT);
+
+ memset(out, 0, outlen);
+ mlx5_cmd_exec_check_status(mdev, in, sizeof(in), out, outlen);
+
+ ether_addr_copy(addr, &out_addr[2]);
+
+ kvfree(out);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.h b/drivers/net/ethernet/mellanox/mlx5/core/vport.h
new file mode 100644
index 0000000..c05ca2c
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __MLX5_VPORT_H__
+#define __MLX5_VPORT_H__
+
+#include <linux/mlx5/driver.h>
+
+u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod);
+void mlx5_query_vport_mac_address(struct mlx5_core_dev *mdev, u8 *addr);
+
+#endif /* __MLX5_VPORT_H__ */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
new file mode 100644
index 0000000..8388411
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
@@ -0,0 +1,183 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/mlx5/driver.h>
+#include "wq.h"
+#include "mlx5_core.h"
+
+u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq)
+{
+ return (u32)wq->sz_m1 + 1;
+}
+
+u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq)
+{
+ return wq->sz_m1 + 1;
+}
+
+u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq)
+{
+ return (u32)wq->sz_m1 + 1;
+}
+
+static u32 mlx5_wq_cyc_get_byte_size(struct mlx5_wq_cyc *wq)
+{
+ return mlx5_wq_cyc_get_size(wq) << wq->log_stride;
+}
+
+static u32 mlx5_cqwq_get_byte_size(struct mlx5_cqwq *wq)
+{
+ return mlx5_cqwq_get_size(wq) << wq->log_stride;
+}
+
+static u32 mlx5_wq_ll_get_byte_size(struct mlx5_wq_ll *wq)
+{
+ return mlx5_wq_ll_get_size(wq) << wq->log_stride;
+}
+
+int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+ void *wqc, struct mlx5_wq_cyc *wq,
+ struct mlx5_wq_ctrl *wq_ctrl)
+{
+ int err;
+
+ wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride);
+ wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1;
+
+ err = mlx5_db_alloc(mdev, &wq_ctrl->db);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
+ return err;
+ }
+
+ err = mlx5_buf_alloc(mdev, mlx5_wq_cyc_get_byte_size(wq), &wq_ctrl->buf);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
+ goto err_db_free;
+ }
+
+ wq->buf = wq_ctrl->buf.direct.buf;
+ wq->db = wq_ctrl->db.db;
+
+ wq_ctrl->mdev = mdev;
+
+ return 0;
+
+err_db_free:
+ mlx5_db_free(mdev, &wq_ctrl->db);
+
+ return err;
+}
+
+int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+ void *cqc, struct mlx5_cqwq *wq,
+ struct mlx5_wq_ctrl *wq_ctrl)
+{
+ int err;
+
+ wq->log_stride = 6 + MLX5_GET(cqc, cqc, cqe_sz);
+ wq->log_sz = MLX5_GET(cqc, cqc, log_cq_size);
+ wq->sz_m1 = (1 << wq->log_sz) - 1;
+
+ err = mlx5_db_alloc(mdev, &wq_ctrl->db);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
+ return err;
+ }
+
+ err = mlx5_buf_alloc(mdev, mlx5_cqwq_get_byte_size(wq), &wq_ctrl->buf);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
+ goto err_db_free;
+ }
+
+ wq->buf = wq_ctrl->buf.direct.buf;
+ wq->db = wq_ctrl->db.db;
+
+ wq_ctrl->mdev = mdev;
+
+ return 0;
+
+err_db_free:
+ mlx5_db_free(mdev, &wq_ctrl->db);
+
+ return err;
+}
+
+int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+ void *wqc, struct mlx5_wq_ll *wq,
+ struct mlx5_wq_ctrl *wq_ctrl)
+{
+ struct mlx5_wqe_srq_next_seg *next_seg;
+ int err;
+ int i;
+
+ wq->log_stride = MLX5_GET(wq, wqc, log_wq_stride);
+ wq->sz_m1 = (1 << MLX5_GET(wq, wqc, log_wq_sz)) - 1;
+
+ err = mlx5_db_alloc(mdev, &wq_ctrl->db);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_db_alloc() failed, %d\n", err);
+ return err;
+ }
+
+ err = mlx5_buf_alloc(mdev, mlx5_wq_ll_get_byte_size(wq), &wq_ctrl->buf);
+ if (err) {
+ mlx5_core_warn(mdev, "mlx5_buf_alloc() failed, %d\n", err);
+ goto err_db_free;
+ }
+
+ wq->buf = wq_ctrl->buf.direct.buf;
+ wq->db = wq_ctrl->db.db;
+
+ for (i = 0; i < wq->sz_m1; i++) {
+ next_seg = mlx5_wq_ll_get_wqe(wq, i);
+ next_seg->next_wqe_index = cpu_to_be16(i + 1);
+ }
+ next_seg = mlx5_wq_ll_get_wqe(wq, i);
+ wq->tail_next = &next_seg->next_wqe_index;
+
+ wq_ctrl->mdev = mdev;
+
+ return 0;
+
+err_db_free:
+ mlx5_db_free(mdev, &wq_ctrl->db);
+
+ return err;
+}
+
+void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl)
+{
+ mlx5_buf_free(wq_ctrl->mdev, &wq_ctrl->buf);
+ mlx5_db_free(wq_ctrl->mdev, &wq_ctrl->db);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
new file mode 100644
index 0000000..e0ddd69
--- /dev/null
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
@@ -0,0 +1,171 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __MLX5_WQ_H__
+#define __MLX5_WQ_H__
+
+#include <linux/mlx5/mlx5_ifc.h>
+
+struct mlx5_wq_param {
+ int linear;
+ int numa;
+};
+
+struct mlx5_wq_ctrl {
+ struct mlx5_core_dev *mdev;
+ struct mlx5_buf buf;
+ struct mlx5_db db;
+};
+
+struct mlx5_wq_cyc {
+ void *buf;
+ __be32 *db;
+ u16 sz_m1;
+ u8 log_stride;
+};
+
+struct mlx5_cqwq {
+ void *buf;
+ __be32 *db;
+ u32 sz_m1;
+ u32 cc; /* consumer counter */
+ u8 log_sz;
+ u8 log_stride;
+};
+
+struct mlx5_wq_ll {
+ void *buf;
+ __be32 *db;
+ __be16 *tail_next;
+ u16 sz_m1;
+ u16 head;
+ u16 wqe_ctr;
+ u16 cur_sz;
+ u8 log_stride;
+};
+
+int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+ void *wqc, struct mlx5_wq_cyc *wq,
+ struct mlx5_wq_ctrl *wq_ctrl);
+u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq);
+
+int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+ void *cqc, struct mlx5_cqwq *wq,
+ struct mlx5_wq_ctrl *wq_ctrl);
+u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq);
+
+int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
+ void *wqc, struct mlx5_wq_ll *wq,
+ struct mlx5_wq_ctrl *wq_ctrl);
+u32 mlx5_wq_ll_get_size(struct mlx5_wq_ll *wq);
+
+void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl);
+
+static inline u16 mlx5_wq_cyc_ctr2ix(struct mlx5_wq_cyc *wq, u16 ctr)
+{
+ return ctr & wq->sz_m1;
+}
+
+static inline void *mlx5_wq_cyc_get_wqe(struct mlx5_wq_cyc *wq, u16 ix)
+{
+ return wq->buf + (ix << wq->log_stride);
+}
+
+static inline int mlx5_wq_cyc_cc_bigger(u16 cc1, u16 cc2)
+{
+ int equal = (cc1 == cc2);
+ int smaller = 0x8000 & (cc1 - cc2);
+
+ return !equal && !smaller;
+}
+
+static inline u32 mlx5_cqwq_get_ci(struct mlx5_cqwq *wq)
+{
+ return wq->cc & wq->sz_m1;
+}
+
+static inline void *mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix)
+{
+ return wq->buf + (ix << wq->log_stride);
+}
+
+static inline u32 mlx5_cqwq_get_wrap_cnt(struct mlx5_cqwq *wq)
+{
+ return wq->cc >> wq->log_sz;
+}
+
+static inline void mlx5_cqwq_pop(struct mlx5_cqwq *wq)
+{
+ wq->cc++;
+}
+
+static inline void mlx5_cqwq_update_db_record(struct mlx5_cqwq *wq)
+{
+ *wq->db = cpu_to_be32(wq->cc & 0xffffff);
+}
+
+static inline int mlx5_wq_ll_is_full(struct mlx5_wq_ll *wq)
+{
+ return wq->cur_sz == wq->sz_m1;
+}
+
+static inline int mlx5_wq_ll_is_empty(struct mlx5_wq_ll *wq)
+{
+ return !wq->cur_sz;
+}
+
+static inline void *mlx5_wq_ll_get_wqe(struct mlx5_wq_ll *wq, u16 ix)
+{
+ return wq->buf + (ix << wq->log_stride);
+}
+
+static inline void mlx5_wq_ll_push(struct mlx5_wq_ll *wq, u16 head_next)
+{
+ wq->head = head_next;
+ wq->wqe_ctr++;
+ wq->cur_sz++;
+}
+
+static inline void mlx5_wq_ll_pop(struct mlx5_wq_ll *wq, __be16 ix,
+ __be16 *next_tail_next)
+{
+ *wq->tail_next = ix;
+ wq->tail_next = next_tail_next;
+ wq->cur_sz--;
+}
+
+static inline void mlx5_wq_ll_update_db_record(struct mlx5_wq_ll *wq)
+{
+ *wq->db = cpu_to_be32(wq->wqe_ctr);
+}
+
+#endif /* __MLX5_WQ_H__ */
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 6f332eb..48d2aec 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -6664,7 +6664,7 @@ static void mib_read_work(struct work_struct *work)
wake_up_interruptible(
&hw_priv->counter[i].counter);
}
- } else if (jiffies >= hw_priv->counter[i].time) {
+ } else if (time_after_eq(jiffies, hw_priv->counter[i].time)) {
/* Only read MIB counters when the port is connected. */
if (media_connected == mib->state)
hw_priv->counter[i].read = 1;
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 1e0f72b..c281117 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -5308,7 +5308,8 @@ static int do_s2io_prog_unicast(struct net_device *dev, u8 *addr)
/**
* s2io_ethtool_sset - Sets different link parameters.
- * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
+ * @sp : private member of the device structure, which is a pointer to the
+ * s2io_nic structure.
* @info: pointer to the structure with parameters given by ethtool to set
* link information.
* Description:
@@ -5793,7 +5794,8 @@ static void s2io_vpd_read(struct s2io_nic *nic)
/**
* s2io_ethtool_geeprom - reads the value stored in the Eeprom.
- * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
+ * @sp : private member of the device structure, which is a pointer to the
+ * s2io_nic structure.
* @eeprom : pointer to the user level structure provided by ethtool,
* containing all relevant information.
* @data_buf : user defined value to be written into Eeprom.
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index e0c31e3..6409a06 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -3025,9 +3025,9 @@ netxen_sysfs_read_dimm(struct file *filp, struct kobject *kobj,
u8 dw, rows, cols, banks, ranks;
u32 val;
- if (size != sizeof(struct netxen_dimm_cfg)) {
+ if (size < attr->size) {
netdev_err(netdev, "Invalid size\n");
- return -1;
+ return -EINVAL;
}
memset(&dimm, 0, sizeof(struct netxen_dimm_cfg));
@@ -3137,7 +3137,7 @@ out:
static struct bin_attribute bin_attr_dimm = {
.attr = { .name = "dimm", .mode = (S_IRUGO | S_IWUSR) },
- .size = 0,
+ .size = sizeof(struct netxen_dimm_cfg),
.read = netxen_sysfs_read_dimm,
};
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index f221126..055f376 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -1326,9 +1326,6 @@ struct qlcnic_eswitch {
};
-/* Return codes for Error handling */
-#define QL_STATUS_INVALID_PARAM -1
-
#define MAX_BW 100 /* % of link speed */
#define MIN_BW 1 /* % of link speed */
#define MAX_VLAN_ID 4095
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 367f397..2f6cc42 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -1031,7 +1031,7 @@ int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
pfn = pci_info[i].id;
if (pfn >= ahw->max_vnic_func) {
- ret = QL_STATUS_INVALID_PARAM;
+ ret = -EINVAL;
dev_err(&adapter->pdev->dev, "%s: Invalid function 0x%x, max 0x%x\n",
__func__, pfn, ahw->max_vnic_func);
goto err_eswitch;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 59a721f..05c28f2 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -24,8 +24,6 @@
#include <linux/hwmon-sysfs.h>
#endif
-#define QLC_STATUS_UNSUPPORTED_CMD -2
-
int qlcnicvf_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable)
{
return -EOPNOTSUPP;
@@ -166,7 +164,7 @@ static int qlcnic_82xx_store_beacon(struct qlcnic_adapter *adapter,
u8 b_state, b_rate;
if (len != sizeof(u16))
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
memcpy(&beacon, buf, sizeof(u16));
err = qlcnic_validate_beacon(adapter, beacon, &b_state, &b_rate);
@@ -383,17 +381,17 @@ static int validate_pm_config(struct qlcnic_adapter *adapter,
dest_pci_func = pm_cfg[i].dest_npar;
src_index = qlcnic_is_valid_nic_func(adapter, src_pci_func);
if (src_index < 0)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
dest_index = qlcnic_is_valid_nic_func(adapter, dest_pci_func);
if (dest_index < 0)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
s_esw_id = adapter->npars[src_index].phy_port;
d_esw_id = adapter->npars[dest_index].phy_port;
if (s_esw_id != d_esw_id)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
}
return 0;
@@ -414,7 +412,7 @@ static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
count = size / sizeof(struct qlcnic_pm_func_cfg);
rem = size % sizeof(struct qlcnic_pm_func_cfg);
if (rem)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
pm_cfg = (struct qlcnic_pm_func_cfg *)buf;
@@ -427,7 +425,7 @@ static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
action = !!pm_cfg[i].action;
index = qlcnic_is_valid_nic_func(adapter, pci_func);
if (index < 0)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
id = adapter->npars[index].phy_port;
ret = qlcnic_config_port_mirroring(adapter, id,
@@ -440,7 +438,7 @@ static ssize_t qlcnic_sysfs_write_pm_config(struct file *filp,
pci_func = pm_cfg[i].pci_func;
index = qlcnic_is_valid_nic_func(adapter, pci_func);
if (index < 0)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
id = adapter->npars[index].phy_port;
adapter->npars[index].enable_pm = !!pm_cfg[i].action;
adapter->npars[index].dest_npar = id;
@@ -499,11 +497,11 @@ static int validate_esw_config(struct qlcnic_adapter *adapter,
for (i = 0; i < count; i++) {
pci_func = esw_cfg[i].pci_func;
if (pci_func >= ahw->max_vnic_func)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
if (qlcnic_is_valid_nic_func(adapter, pci_func) < 0)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
switch (esw_cfg[i].op_mode) {
case QLCNIC_PORT_DEFAULTS:
@@ -517,25 +515,25 @@ static int validate_esw_config(struct qlcnic_adapter *adapter,
if (ret != QLCNIC_NON_PRIV_FUNC) {
if (esw_cfg[i].mac_anti_spoof != 0)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
if (esw_cfg[i].mac_override != 1)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
if (esw_cfg[i].promisc_mode != 1)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
}
break;
case QLCNIC_ADD_VLAN:
if (!IS_VALID_VLAN(esw_cfg[i].vlan_id))
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
if (!esw_cfg[i].op_type)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
break;
case QLCNIC_DEL_VLAN:
if (!esw_cfg[i].op_type)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
break;
default:
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
}
}
@@ -559,7 +557,7 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
count = size / sizeof(struct qlcnic_esw_func_cfg);
rem = size % sizeof(struct qlcnic_esw_func_cfg);
if (rem)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
esw_cfg = (struct qlcnic_esw_func_cfg *)buf;
@@ -570,7 +568,7 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
for (i = 0; i < count; i++) {
if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
if (qlcnic_config_switch_port(adapter, &esw_cfg[i]))
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
if (adapter->ahw->pci_func != esw_cfg[i].pci_func)
continue;
@@ -604,7 +602,7 @@ static ssize_t qlcnic_sysfs_write_esw_config(struct file *file,
pci_func = esw_cfg[i].pci_func;
index = qlcnic_is_valid_nic_func(adapter, pci_func);
if (index < 0)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
npar = &adapter->npars[index];
switch (esw_cfg[i].op_mode) {
case QLCNIC_PORT_DEFAULTS:
@@ -654,7 +652,7 @@ static ssize_t qlcnic_sysfs_read_esw_config(struct file *file,
esw_cfg[pci_func].pci_func = pci_func;
if (qlcnic_get_eswitch_port_config(adapter, &esw_cfg[pci_func]))
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
}
qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
return size;
@@ -669,11 +667,11 @@ static int validate_npar_config(struct qlcnic_adapter *adapter,
for (i = 0; i < count; i++) {
pci_func = np_cfg[i].pci_func;
if (qlcnic_is_valid_nic_func(adapter, pci_func) < 0)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
if (!IS_VALID_BW(np_cfg[i].min_bw) ||
!IS_VALID_BW(np_cfg[i].max_bw))
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
}
return 0;
}
@@ -694,7 +692,7 @@ static ssize_t qlcnic_sysfs_write_npar_config(struct file *file,
count = size / sizeof(struct qlcnic_npar_func_cfg);
rem = size % sizeof(struct qlcnic_npar_func_cfg);
if (rem)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
qlcnic_swap32_buffer((u32 *)buf, size / sizeof(u32));
np_cfg = (struct qlcnic_npar_func_cfg *)buf;
@@ -717,7 +715,7 @@ static ssize_t qlcnic_sysfs_write_npar_config(struct file *file,
return ret;
index = qlcnic_is_valid_nic_func(adapter, pci_func);
if (index < 0)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
adapter->npars[index].min_bw = nic_info.min_tx_bw;
adapter->npars[index].max_bw = nic_info.max_tx_bw;
}
@@ -784,13 +782,13 @@ static ssize_t qlcnic_sysfs_get_port_stats(struct file *file,
int ret;
if (qlcnic_83xx_check(adapter))
- return QLC_STATUS_UNSUPPORTED_CMD;
+ return -EOPNOTSUPP;
if (size != sizeof(struct qlcnic_esw_statistics))
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
if (offset >= adapter->ahw->max_vnic_func)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
memset(&port_stats, 0, size);
ret = qlcnic_get_port_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
@@ -819,13 +817,13 @@ static ssize_t qlcnic_sysfs_get_esw_stats(struct file *file,
int ret;
if (qlcnic_83xx_check(adapter))
- return QLC_STATUS_UNSUPPORTED_CMD;
+ return -EOPNOTSUPP;
if (size != sizeof(struct qlcnic_esw_statistics))
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
memset(&esw_stats, 0, size);
ret = qlcnic_get_eswitch_stats(adapter, offset, QLCNIC_QUERY_RX_COUNTER,
@@ -853,10 +851,10 @@ static ssize_t qlcnic_sysfs_clear_esw_stats(struct file *file,
int ret;
if (qlcnic_83xx_check(adapter))
- return QLC_STATUS_UNSUPPORTED_CMD;
+ return -EOPNOTSUPP;
if (offset >= QLCNIC_NIU_MAX_XG_PORTS)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_ESWITCH, offset,
QLCNIC_QUERY_RX_COUNTER);
@@ -883,10 +881,10 @@ static ssize_t qlcnic_sysfs_clear_port_stats(struct file *file,
int ret;
if (qlcnic_83xx_check(adapter))
- return QLC_STATUS_UNSUPPORTED_CMD;
+ return -EOPNOTSUPP;
if (offset >= adapter->ahw->max_vnic_func)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
QLCNIC_QUERY_RX_COUNTER);
@@ -953,9 +951,7 @@ static ssize_t qlcnic_83xx_sysfs_flash_read_handler(struct file *filp,
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
if (!size)
- return QL_STATUS_INVALID_PARAM;
- if (!buf)
- return QL_STATUS_INVALID_PARAM;
+ return -EINVAL;
count = size / sizeof(u32);
@@ -1132,9 +1128,6 @@ static ssize_t qlcnic_83xx_sysfs_flash_write_handler(struct file *filp,
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- if (!buf)
- return QL_STATUS_INVALID_PARAM;
-
ret = kstrtoul(buf, 16, &data);
switch (data) {
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
index 701ffc2..819289e 100644
--- a/drivers/net/ethernet/rocker/rocker.c
+++ b/drivers/net/ethernet/rocker/rocker.c
@@ -295,7 +295,7 @@ static bool rocker_vlan_id_is_internal(__be16 vlan_id)
return (_vlan_id >= start && _vlan_id <= end);
}
-static __be16 rocker_port_vid_to_vlan(struct rocker_port *rocker_port,
+static __be16 rocker_port_vid_to_vlan(const struct rocker_port *rocker_port,
u16 vid, bool *pop_vlan)
{
__be16 vlan_id;
@@ -312,7 +312,7 @@ static __be16 rocker_port_vid_to_vlan(struct rocker_port *rocker_port,
return vlan_id;
}
-static u16 rocker_port_vlan_to_vid(struct rocker_port *rocker_port,
+static u16 rocker_port_vlan_to_vid(const struct rocker_port *rocker_port,
__be16 vlan_id)
{
if (rocker_vlan_id_is_internal(vlan_id))
@@ -321,7 +321,7 @@ static u16 rocker_port_vlan_to_vid(struct rocker_port *rocker_port,
return ntohs(vlan_id);
}
-static bool rocker_port_is_bridged(struct rocker_port *rocker_port)
+static bool rocker_port_is_bridged(const struct rocker_port *rocker_port)
{
return !!rocker_port->bridge_dev;
}
@@ -377,8 +377,7 @@ static void *rocker_port_kcalloc(struct rocker_port *rocker_port,
return __rocker_port_mem_alloc(rocker_port, trans, n * size);
}
-static void rocker_port_kfree(struct rocker_port *rocker_port,
- enum switchdev_trans trans, const void *mem)
+static void rocker_port_kfree(enum switchdev_trans trans, const void *mem)
{
struct list_head *elem;
@@ -423,11 +422,10 @@ static struct rocker_wait *rocker_wait_create(struct rocker_port *rocker_port,
return wait;
}
-static void rocker_wait_destroy(struct rocker_port *rocker_port,
- enum switchdev_trans trans,
+static void rocker_wait_destroy(enum switchdev_trans trans,
struct rocker_wait *wait)
{
- rocker_port_kfree(rocker_port, trans, wait);
+ rocker_port_kfree(trans, wait);
}
static bool rocker_wait_event_timeout(struct rocker_wait *wait,
@@ -445,18 +443,18 @@ static void rocker_wait_wake_up(struct rocker_wait *wait)
wake_up(&wait->wait);
}
-static u32 rocker_msix_vector(struct rocker *rocker, unsigned int vector)
+static u32 rocker_msix_vector(const struct rocker *rocker, unsigned int vector)
{
return rocker->msix_entries[vector].vector;
}
-static u32 rocker_msix_tx_vector(struct rocker_port *rocker_port)
+static u32 rocker_msix_tx_vector(const struct rocker_port *rocker_port)
{
return rocker_msix_vector(rocker_port->rocker,
ROCKER_MSIX_VEC_TX(rocker_port->port_number));
}
-static u32 rocker_msix_rx_vector(struct rocker_port *rocker_port)
+static u32 rocker_msix_rx_vector(const struct rocker_port *rocker_port)
{
return rocker_msix_vector(rocker_port->rocker,
ROCKER_MSIX_VEC_RX(rocker_port->port_number));
@@ -475,9 +473,9 @@ static u32 rocker_msix_rx_vector(struct rocker_port *rocker_port)
* HW basic testing functions
*****************************/
-static int rocker_reg_test(struct rocker *rocker)
+static int rocker_reg_test(const struct rocker *rocker)
{
- struct pci_dev *pdev = rocker->pdev;
+ const struct pci_dev *pdev = rocker->pdev;
u64 test_reg;
u64 rnd;
@@ -505,12 +503,12 @@ static int rocker_reg_test(struct rocker *rocker)
return 0;
}
-static int rocker_dma_test_one(struct rocker *rocker, struct rocker_wait *wait,
- u32 test_type, dma_addr_t dma_handle,
- unsigned char *buf, unsigned char *expect,
- size_t size)
+static int rocker_dma_test_one(const struct rocker *rocker,
+ struct rocker_wait *wait, u32 test_type,
+ dma_addr_t dma_handle, const unsigned char *buf,
+ const unsigned char *expect, size_t size)
{
- struct pci_dev *pdev = rocker->pdev;
+ const struct pci_dev *pdev = rocker->pdev;
int i;
rocker_wait_reset(wait);
@@ -534,7 +532,7 @@ static int rocker_dma_test_one(struct rocker *rocker, struct rocker_wait *wait,
#define ROCKER_TEST_DMA_BUF_SIZE (PAGE_SIZE * 4)
#define ROCKER_TEST_DMA_FILL_PATTERN 0x96
-static int rocker_dma_test_offset(struct rocker *rocker,
+static int rocker_dma_test_offset(const struct rocker *rocker,
struct rocker_wait *wait, int offset)
{
struct pci_dev *pdev = rocker->pdev;
@@ -594,7 +592,8 @@ free_alloc:
return err;
}
-static int rocker_dma_test(struct rocker *rocker, struct rocker_wait *wait)
+static int rocker_dma_test(const struct rocker *rocker,
+ struct rocker_wait *wait)
{
int i;
int err;
@@ -616,9 +615,9 @@ static irqreturn_t rocker_test_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int rocker_basic_hw_test(struct rocker *rocker)
+static int rocker_basic_hw_test(const struct rocker *rocker)
{
- struct pci_dev *pdev = rocker->pdev;
+ const struct pci_dev *pdev = rocker->pdev;
struct rocker_wait wait;
int err;
@@ -751,7 +750,7 @@ static u64 rocker_tlv_get_u64(const struct rocker_tlv *tlv)
return *(u64 *) rocker_tlv_data(tlv);
}
-static void rocker_tlv_parse(struct rocker_tlv **tb, int maxtype,
+static void rocker_tlv_parse(const struct rocker_tlv **tb, int maxtype,
const char *buf, int buf_len)
{
const struct rocker_tlv *tlv;
@@ -764,19 +763,19 @@ static void rocker_tlv_parse(struct rocker_tlv **tb, int maxtype,
u32 type = rocker_tlv_type(tlv);
if (type > 0 && type <= maxtype)
- tb[type] = (struct rocker_tlv *) tlv;
+ tb[type] = tlv;
}
}
-static void rocker_tlv_parse_nested(struct rocker_tlv **tb, int maxtype,
+static void rocker_tlv_parse_nested(const struct rocker_tlv **tb, int maxtype,
const struct rocker_tlv *tlv)
{
rocker_tlv_parse(tb, maxtype, rocker_tlv_data(tlv),
rocker_tlv_len(tlv));
}
-static void rocker_tlv_parse_desc(struct rocker_tlv **tb, int maxtype,
- struct rocker_desc_info *desc_info)
+static void rocker_tlv_parse_desc(const struct rocker_tlv **tb, int maxtype,
+ const struct rocker_desc_info *desc_info)
{
rocker_tlv_parse(tb, maxtype, desc_info->data,
desc_info->desc->tlv_size);
@@ -861,9 +860,9 @@ static void rocker_tlv_nest_end(struct rocker_desc_info *desc_info,
}
static void rocker_tlv_nest_cancel(struct rocker_desc_info *desc_info,
- struct rocker_tlv *start)
+ const struct rocker_tlv *start)
{
- desc_info->tlv_size = (char *) start - desc_info->data;
+ desc_info->tlv_size = (const char *) start - desc_info->data;
}
/******************************************
@@ -875,7 +874,7 @@ static u32 __pos_inc(u32 pos, size_t limit)
return ++pos == limit ? 0 : pos;
}
-static int rocker_desc_err(struct rocker_desc_info *desc_info)
+static int rocker_desc_err(const struct rocker_desc_info *desc_info)
{
int err = desc_info->desc->comp_err & ~ROCKER_DMA_DESC_COMP_ERR_GEN;
@@ -903,31 +902,31 @@ static int rocker_desc_err(struct rocker_desc_info *desc_info)
return -EINVAL;
}
-static void rocker_desc_gen_clear(struct rocker_desc_info *desc_info)
+static void rocker_desc_gen_clear(const struct rocker_desc_info *desc_info)
{
desc_info->desc->comp_err &= ~ROCKER_DMA_DESC_COMP_ERR_GEN;
}
-static bool rocker_desc_gen(struct rocker_desc_info *desc_info)
+static bool rocker_desc_gen(const struct rocker_desc_info *desc_info)
{
u32 comp_err = desc_info->desc->comp_err;
return comp_err & ROCKER_DMA_DESC_COMP_ERR_GEN ? true : false;
}
-static void *rocker_desc_cookie_ptr_get(struct rocker_desc_info *desc_info)
+static void *rocker_desc_cookie_ptr_get(const struct rocker_desc_info *desc_info)
{
return (void *)(uintptr_t)desc_info->desc->cookie;
}
-static void rocker_desc_cookie_ptr_set(struct rocker_desc_info *desc_info,
+static void rocker_desc_cookie_ptr_set(const struct rocker_desc_info *desc_info,
void *ptr)
{
desc_info->desc->cookie = (uintptr_t) ptr;
}
static struct rocker_desc_info *
-rocker_desc_head_get(struct rocker_dma_ring_info *info)
+rocker_desc_head_get(const struct rocker_dma_ring_info *info)
{
static struct rocker_desc_info *desc_info;
u32 head = __pos_inc(info->head, info->size);
@@ -939,15 +938,15 @@ rocker_desc_head_get(struct rocker_dma_ring_info *info)
return desc_info;
}
-static void rocker_desc_commit(struct rocker_desc_info *desc_info)
+static void rocker_desc_commit(const struct rocker_desc_info *desc_info)
{
desc_info->desc->buf_size = desc_info->data_size;
desc_info->desc->tlv_size = desc_info->tlv_size;
}
-static void rocker_desc_head_set(struct rocker *rocker,
+static void rocker_desc_head_set(const struct rocker *rocker,
struct rocker_dma_ring_info *info,
- struct rocker_desc_info *desc_info)
+ const struct rocker_desc_info *desc_info)
{
u32 head = __pos_inc(info->head, info->size);
@@ -972,8 +971,8 @@ rocker_desc_tail_get(struct rocker_dma_ring_info *info)
return desc_info;
}
-static void rocker_dma_ring_credits_set(struct rocker *rocker,
- struct rocker_dma_ring_info *info,
+static void rocker_dma_ring_credits_set(const struct rocker *rocker,
+ const struct rocker_dma_ring_info *info,
u32 credits)
{
if (credits)
@@ -986,7 +985,7 @@ static unsigned long rocker_dma_ring_size_fix(size_t size)
min(roundup_pow_of_two(size), ROCKER_DMA_SIZE_MAX));
}
-static int rocker_dma_ring_create(struct rocker *rocker,
+static int rocker_dma_ring_create(const struct rocker *rocker,
unsigned int type,
size_t size,
struct rocker_dma_ring_info *info)
@@ -1022,8 +1021,8 @@ static int rocker_dma_ring_create(struct rocker *rocker,
return 0;
}
-static void rocker_dma_ring_destroy(struct rocker *rocker,
- struct rocker_dma_ring_info *info)
+static void rocker_dma_ring_destroy(const struct rocker *rocker,
+ const struct rocker_dma_ring_info *info)
{
rocker_write64(rocker, DMA_DESC_ADDR(info->type), 0);
@@ -1033,7 +1032,7 @@ static void rocker_dma_ring_destroy(struct rocker *rocker,
kfree(info->desc_info);
}
-static void rocker_dma_ring_pass_to_producer(struct rocker *rocker,
+static void rocker_dma_ring_pass_to_producer(const struct rocker *rocker,
struct rocker_dma_ring_info *info)
{
int i;
@@ -1048,8 +1047,8 @@ static void rocker_dma_ring_pass_to_producer(struct rocker *rocker,
rocker_desc_commit(&info->desc_info[i]);
}
-static int rocker_dma_ring_bufs_alloc(struct rocker *rocker,
- struct rocker_dma_ring_info *info,
+static int rocker_dma_ring_bufs_alloc(const struct rocker *rocker,
+ const struct rocker_dma_ring_info *info,
int direction, size_t buf_size)
{
struct pci_dev *pdev = rocker->pdev;
@@ -1086,7 +1085,7 @@ static int rocker_dma_ring_bufs_alloc(struct rocker *rocker,
rollback:
for (i--; i >= 0; i--) {
- struct rocker_desc_info *desc_info = &info->desc_info[i];
+ const struct rocker_desc_info *desc_info = &info->desc_info[i];
pci_unmap_single(pdev, dma_unmap_addr(desc_info, mapaddr),
desc_info->data_size, direction);
@@ -1095,15 +1094,15 @@ rollback:
return err;
}
-static void rocker_dma_ring_bufs_free(struct rocker *rocker,
- struct rocker_dma_ring_info *info,
+static void rocker_dma_ring_bufs_free(const struct rocker *rocker,
+ const struct rocker_dma_ring_info *info,
int direction)
{
struct pci_dev *pdev = rocker->pdev;
int i;
for (i = 0; i < info->size; i++) {
- struct rocker_desc_info *desc_info = &info->desc_info[i];
+ const struct rocker_desc_info *desc_info = &info->desc_info[i];
struct rocker_desc *desc = &info->desc[i];
desc->buf_addr = 0;
@@ -1116,7 +1115,7 @@ static void rocker_dma_ring_bufs_free(struct rocker *rocker,
static int rocker_dma_rings_init(struct rocker *rocker)
{
- struct pci_dev *pdev = rocker->pdev;
+ const struct pci_dev *pdev = rocker->pdev;
int err;
err = rocker_dma_ring_create(rocker, ROCKER_DMA_CMD,
@@ -1173,11 +1172,11 @@ static void rocker_dma_rings_fini(struct rocker *rocker)
rocker_dma_ring_destroy(rocker, &rocker->cmd_ring);
}
-static int rocker_dma_rx_ring_skb_map(struct rocker *rocker,
- struct rocker_port *rocker_port,
+static int rocker_dma_rx_ring_skb_map(const struct rocker_port *rocker_port,
struct rocker_desc_info *desc_info,
struct sk_buff *skb, size_t buf_len)
{
+ const struct rocker *rocker = rocker_port->rocker;
struct pci_dev *pdev = rocker->pdev;
dma_addr_t dma_handle;
@@ -1197,13 +1196,12 @@ tlv_put_failure:
return -EMSGSIZE;
}
-static size_t rocker_port_rx_buf_len(struct rocker_port *rocker_port)
+static size_t rocker_port_rx_buf_len(const struct rocker_port *rocker_port)
{
return rocker_port->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
}
-static int rocker_dma_rx_ring_skb_alloc(struct rocker *rocker,
- struct rocker_port *rocker_port,
+static int rocker_dma_rx_ring_skb_alloc(const struct rocker_port *rocker_port,
struct rocker_desc_info *desc_info)
{
struct net_device *dev = rocker_port->dev;
@@ -1220,8 +1218,7 @@ static int rocker_dma_rx_ring_skb_alloc(struct rocker *rocker,
skb = netdev_alloc_skb_ip_align(dev, buf_len);
if (!skb)
return -ENOMEM;
- err = rocker_dma_rx_ring_skb_map(rocker, rocker_port, desc_info,
- skb, buf_len);
+ err = rocker_dma_rx_ring_skb_map(rocker_port, desc_info, skb, buf_len);
if (err) {
dev_kfree_skb_any(skb);
return err;
@@ -1230,8 +1227,8 @@ static int rocker_dma_rx_ring_skb_alloc(struct rocker *rocker,
return 0;
}
-static void rocker_dma_rx_ring_skb_unmap(struct rocker *rocker,
- struct rocker_tlv **attrs)
+static void rocker_dma_rx_ring_skb_unmap(const struct rocker *rocker,
+ const struct rocker_tlv **attrs)
{
struct pci_dev *pdev = rocker->pdev;
dma_addr_t dma_handle;
@@ -1245,10 +1242,10 @@ static void rocker_dma_rx_ring_skb_unmap(struct rocker *rocker,
pci_unmap_single(pdev, dma_handle, len, PCI_DMA_FROMDEVICE);
}
-static void rocker_dma_rx_ring_skb_free(struct rocker *rocker,
- struct rocker_desc_info *desc_info)
+static void rocker_dma_rx_ring_skb_free(const struct rocker *rocker,
+ const struct rocker_desc_info *desc_info)
{
- struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
+ const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
if (!skb)
@@ -1258,15 +1255,15 @@ static void rocker_dma_rx_ring_skb_free(struct rocker *rocker,
dev_kfree_skb_any(skb);
}
-static int rocker_dma_rx_ring_skbs_alloc(struct rocker *rocker,
- struct rocker_port *rocker_port)
+static int rocker_dma_rx_ring_skbs_alloc(const struct rocker_port *rocker_port)
{
- struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
+ const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
+ const struct rocker *rocker = rocker_port->rocker;
int i;
int err;
for (i = 0; i < rx_ring->size; i++) {
- err = rocker_dma_rx_ring_skb_alloc(rocker, rocker_port,
+ err = rocker_dma_rx_ring_skb_alloc(rocker_port,
&rx_ring->desc_info[i]);
if (err)
goto rollback;
@@ -1279,10 +1276,10 @@ rollback:
return err;
}
-static void rocker_dma_rx_ring_skbs_free(struct rocker *rocker,
- struct rocker_port *rocker_port)
+static void rocker_dma_rx_ring_skbs_free(const struct rocker_port *rocker_port)
{
- struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
+ const struct rocker_dma_ring_info *rx_ring = &rocker_port->rx_ring;
+ const struct rocker *rocker = rocker_port->rocker;
int i;
for (i = 0; i < rx_ring->size; i++)
@@ -1328,7 +1325,7 @@ static int rocker_port_dma_rings_init(struct rocker_port *rocker_port)
goto err_dma_rx_ring_bufs_alloc;
}
- err = rocker_dma_rx_ring_skbs_alloc(rocker, rocker_port);
+ err = rocker_dma_rx_ring_skbs_alloc(rocker_port);
if (err) {
netdev_err(rocker_port->dev, "failed to alloc rx dma ring skbs\n");
goto err_dma_rx_ring_skbs_alloc;
@@ -1354,7 +1351,7 @@ static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port)
{
struct rocker *rocker = rocker_port->rocker;
- rocker_dma_rx_ring_skbs_free(rocker, rocker_port);
+ rocker_dma_rx_ring_skbs_free(rocker_port);
rocker_dma_ring_bufs_free(rocker, &rocker_port->rx_ring,
PCI_DMA_BIDIRECTIONAL);
rocker_dma_ring_destroy(rocker, &rocker_port->rx_ring);
@@ -1363,7 +1360,8 @@ static void rocker_port_dma_rings_fini(struct rocker_port *rocker_port)
rocker_dma_ring_destroy(rocker, &rocker_port->tx_ring);
}
-static void rocker_port_set_enable(struct rocker_port *rocker_port, bool enable)
+static void rocker_port_set_enable(const struct rocker_port *rocker_port,
+ bool enable)
{
u64 val = rocker_read64(rocker_port->rocker, PORT_PHYS_ENABLE);
@@ -1381,7 +1379,7 @@ static void rocker_port_set_enable(struct rocker_port *rocker_port, bool enable)
static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id)
{
struct rocker *rocker = dev_id;
- struct rocker_desc_info *desc_info;
+ const struct rocker_desc_info *desc_info;
struct rocker_wait *wait;
u32 credits = 0;
@@ -1397,22 +1395,22 @@ static irqreturn_t rocker_cmd_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void rocker_port_link_up(struct rocker_port *rocker_port)
+static void rocker_port_link_up(const struct rocker_port *rocker_port)
{
netif_carrier_on(rocker_port->dev);
netdev_info(rocker_port->dev, "Link is up\n");
}
-static void rocker_port_link_down(struct rocker_port *rocker_port)
+static void rocker_port_link_down(const struct rocker_port *rocker_port)
{
netif_carrier_off(rocker_port->dev);
netdev_info(rocker_port->dev, "Link is down\n");
}
-static int rocker_event_link_change(struct rocker *rocker,
+static int rocker_event_link_change(const struct rocker *rocker,
const struct rocker_tlv *info)
{
- struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1];
+ const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_LINK_CHANGED_MAX + 1];
unsigned int port_number;
bool link_up;
struct rocker_port *rocker_port;
@@ -1458,7 +1456,7 @@ struct rocker_mac_vlan_seen_work {
static void rocker_event_mac_vlan_seen_work(struct work_struct *work)
{
- struct rocker_mac_vlan_seen_work *sw =
+ const struct rocker_mac_vlan_seen_work *sw =
container_of(work, struct rocker_mac_vlan_seen_work, work);
rtnl_lock();
@@ -1469,14 +1467,14 @@ static void rocker_event_mac_vlan_seen_work(struct work_struct *work)
kfree(work);
}
-static int rocker_event_mac_vlan_seen(struct rocker *rocker,
+static int rocker_event_mac_vlan_seen(const struct rocker *rocker,
const struct rocker_tlv *info)
{
struct rocker_mac_vlan_seen_work *sw;
- struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1];
+ const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAC_VLAN_MAX + 1];
unsigned int port_number;
struct rocker_port *rocker_port;
- unsigned char *addr;
+ const unsigned char *addr;
int flags = ROCKER_OP_FLAG_LEARNED;
__be16 vlan_id;
@@ -1515,11 +1513,11 @@ static int rocker_event_mac_vlan_seen(struct rocker *rocker,
return 0;
}
-static int rocker_event_process(struct rocker *rocker,
- struct rocker_desc_info *desc_info)
+static int rocker_event_process(const struct rocker *rocker,
+ const struct rocker_desc_info *desc_info)
{
- struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1];
- struct rocker_tlv *info;
+ const struct rocker_tlv *attrs[ROCKER_TLV_EVENT_MAX + 1];
+ const struct rocker_tlv *info;
u16 type;
rocker_tlv_parse_desc(attrs, ROCKER_TLV_EVENT_MAX, desc_info);
@@ -1543,8 +1541,8 @@ static int rocker_event_process(struct rocker *rocker,
static irqreturn_t rocker_event_irq_handler(int irq, void *dev_id)
{
struct rocker *rocker = dev_id;
- struct pci_dev *pdev = rocker->pdev;
- struct rocker_desc_info *desc_info;
+ const struct pci_dev *pdev = rocker->pdev;
+ const struct rocker_desc_info *desc_info;
u32 credits = 0;
int err;
@@ -1588,17 +1586,20 @@ static irqreturn_t rocker_rx_irq_handler(int irq, void *dev_id)
* Command interface
********************/
-typedef int (*rocker_cmd_cb_t)(struct rocker *rocker,
- struct rocker_port *rocker_port,
- struct rocker_desc_info *desc_info,
- void *priv);
+typedef int (*rocker_cmd_prep_cb_t)(const struct rocker_port *rocker_port,
+ struct rocker_desc_info *desc_info,
+ void *priv);
-static int rocker_cmd_exec(struct rocker *rocker,
- struct rocker_port *rocker_port,
+typedef int (*rocker_cmd_proc_cb_t)(const struct rocker_port *rocker_port,
+ const struct rocker_desc_info *desc_info,
+ void *priv);
+
+static int rocker_cmd_exec(struct rocker_port *rocker_port,
enum switchdev_trans trans,
- rocker_cmd_cb_t prepare, void *prepare_priv,
- rocker_cmd_cb_t process, void *process_priv)
+ rocker_cmd_prep_cb_t prepare, void *prepare_priv,
+ rocker_cmd_proc_cb_t process, void *process_priv)
{
+ struct rocker *rocker = rocker_port->rocker;
struct rocker_desc_info *desc_info;
struct rocker_wait *wait;
unsigned long flags;
@@ -1617,7 +1618,7 @@ static int rocker_cmd_exec(struct rocker *rocker,
goto out;
}
- err = prepare(rocker, rocker_port, desc_info, prepare_priv);
+ err = prepare(rocker_port, desc_info, prepare_priv);
if (err) {
spin_unlock_irqrestore(&rocker->cmd_ring_lock, flags);
goto out;
@@ -1639,17 +1640,16 @@ static int rocker_cmd_exec(struct rocker *rocker,
return err;
if (process)
- err = process(rocker, rocker_port, desc_info, process_priv);
+ err = process(rocker_port, desc_info, process_priv);
rocker_desc_gen_clear(desc_info);
out:
- rocker_wait_destroy(rocker_port, trans, wait);
+ rocker_wait_destroy(trans, wait);
return err;
}
static int
-rocker_cmd_get_port_settings_prep(struct rocker *rocker,
- struct rocker_port *rocker_port,
+rocker_cmd_get_port_settings_prep(const struct rocker_port *rocker_port,
struct rocker_desc_info *desc_info,
void *priv)
{
@@ -1669,14 +1669,13 @@ rocker_cmd_get_port_settings_prep(struct rocker *rocker,
}
static int
-rocker_cmd_get_port_settings_ethtool_proc(struct rocker *rocker,
- struct rocker_port *rocker_port,
- struct rocker_desc_info *desc_info,
+rocker_cmd_get_port_settings_ethtool_proc(const struct rocker_port *rocker_port,
+ const struct rocker_desc_info *desc_info,
void *priv)
{
struct ethtool_cmd *ecmd = priv;
- struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
- struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
+ const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
+ const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
u32 speed;
u8 duplex;
u8 autoneg;
@@ -1708,15 +1707,14 @@ rocker_cmd_get_port_settings_ethtool_proc(struct rocker *rocker,
}
static int
-rocker_cmd_get_port_settings_macaddr_proc(struct rocker *rocker,
- struct rocker_port *rocker_port,
- struct rocker_desc_info *desc_info,
+rocker_cmd_get_port_settings_macaddr_proc(const struct rocker_port *rocker_port,
+ const struct rocker_desc_info *desc_info,
void *priv)
{
unsigned char *macaddr = priv;
- struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
- struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
- struct rocker_tlv *attr;
+ const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
+ const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
+ const struct rocker_tlv *attr;
rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
if (!attrs[ROCKER_TLV_CMD_INFO])
@@ -1741,17 +1739,16 @@ struct port_name {
};
static int
-rocker_cmd_get_port_settings_phys_name_proc(struct rocker *rocker,
- struct rocker_port *rocker_port,
- struct rocker_desc_info *desc_info,
+rocker_cmd_get_port_settings_phys_name_proc(const struct rocker_port *rocker_port,
+ const struct rocker_desc_info *desc_info,
void *priv)
{
- struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
- struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
+ const struct rocker_tlv *info_attrs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
+ const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
struct port_name *name = priv;
- struct rocker_tlv *attr;
+ const struct rocker_tlv *attr;
size_t i, j, len;
- char *str;
+ const char *str;
rocker_tlv_parse_desc(attrs, ROCKER_TLV_CMD_MAX, desc_info);
if (!attrs[ROCKER_TLV_CMD_INFO])
@@ -1783,8 +1780,7 @@ rocker_cmd_get_port_settings_phys_name_proc(struct rocker *rocker,
}
static int
-rocker_cmd_set_port_settings_ethtool_prep(struct rocker *rocker,
- struct rocker_port *rocker_port,
+rocker_cmd_set_port_settings_ethtool_prep(const struct rocker_port *rocker_port,
struct rocker_desc_info *desc_info,
void *priv)
{
@@ -1814,12 +1810,11 @@ rocker_cmd_set_port_settings_ethtool_prep(struct rocker *rocker,
}
static int
-rocker_cmd_set_port_settings_macaddr_prep(struct rocker *rocker,
- struct rocker_port *rocker_port,
+rocker_cmd_set_port_settings_macaddr_prep(const struct rocker_port *rocker_port,
struct rocker_desc_info *desc_info,
void *priv)
{
- unsigned char *macaddr = priv;
+ const unsigned char *macaddr = priv;
struct rocker_tlv *cmd_info;
if (rocker_tlv_put_u16(desc_info, ROCKER_TLV_CMD_TYPE,
@@ -1839,8 +1834,7 @@ rocker_cmd_set_port_settings_macaddr_prep(struct rocker *rocker,
}
static int
-rocker_cmd_set_port_learning_prep(struct rocker *rocker,
- struct rocker_port *rocker_port,
+rocker_cmd_set_port_learning_prep(const struct rocker_port *rocker_port,
struct rocker_desc_info *desc_info,
void *priv)
{
@@ -1865,8 +1859,7 @@ rocker_cmd_set_port_learning_prep(struct rocker *rocker,
static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
struct ethtool_cmd *ecmd)
{
- return rocker_cmd_exec(rocker_port->rocker, rocker_port,
- SWITCHDEV_TRANS_NONE,
+ return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE,
rocker_cmd_get_port_settings_prep, NULL,
rocker_cmd_get_port_settings_ethtool_proc,
ecmd);
@@ -1875,8 +1868,7 @@ static int rocker_cmd_get_port_settings_ethtool(struct rocker_port *rocker_port,
static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port,
unsigned char *macaddr)
{
- return rocker_cmd_exec(rocker_port->rocker, rocker_port,
- SWITCHDEV_TRANS_NONE,
+ return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE,
rocker_cmd_get_port_settings_prep, NULL,
rocker_cmd_get_port_settings_macaddr_proc,
macaddr);
@@ -1885,8 +1877,7 @@ static int rocker_cmd_get_port_settings_macaddr(struct rocker_port *rocker_port,
static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
struct ethtool_cmd *ecmd)
{
- return rocker_cmd_exec(rocker_port->rocker, rocker_port,
- SWITCHDEV_TRANS_NONE,
+ return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE,
rocker_cmd_set_port_settings_ethtool_prep,
ecmd, NULL, NULL);
}
@@ -1894,8 +1885,7 @@ static int rocker_cmd_set_port_settings_ethtool(struct rocker_port *rocker_port,
static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
unsigned char *macaddr)
{
- return rocker_cmd_exec(rocker_port->rocker, rocker_port,
- SWITCHDEV_TRANS_NONE,
+ return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE,
rocker_cmd_set_port_settings_macaddr_prep,
macaddr, NULL, NULL);
}
@@ -1903,13 +1893,14 @@ static int rocker_cmd_set_port_settings_macaddr(struct rocker_port *rocker_port,
static int rocker_port_set_learning(struct rocker_port *rocker_port,
enum switchdev_trans trans)
{
- return rocker_cmd_exec(rocker_port->rocker, rocker_port, trans,
+ return rocker_cmd_exec(rocker_port, trans,
rocker_cmd_set_port_learning_prep,
NULL, NULL, NULL);
}
-static int rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
- struct rocker_flow_tbl_entry *entry)
+static int
+rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
+ const struct rocker_flow_tbl_entry *entry)
{
if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
entry->key.ig_port.in_pport))
@@ -1924,8 +1915,9 @@ static int rocker_cmd_flow_tbl_add_ig_port(struct rocker_desc_info *desc_info,
return 0;
}
-static int rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
- struct rocker_flow_tbl_entry *entry)
+static int
+rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
+ const struct rocker_flow_tbl_entry *entry)
{
if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
entry->key.vlan.in_pport))
@@ -1947,8 +1939,9 @@ static int rocker_cmd_flow_tbl_add_vlan(struct rocker_desc_info *desc_info,
return 0;
}
-static int rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
- struct rocker_flow_tbl_entry *entry)
+static int
+rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
+ const struct rocker_flow_tbl_entry *entry)
{
if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
entry->key.term_mac.in_pport))
@@ -1984,7 +1977,7 @@ static int rocker_cmd_flow_tbl_add_term_mac(struct rocker_desc_info *desc_info,
static int
rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
- struct rocker_flow_tbl_entry *entry)
+ const struct rocker_flow_tbl_entry *entry)
{
if (rocker_tlv_put_be16(desc_info, ROCKER_TLV_OF_DPA_ETHERTYPE,
entry->key.ucast_routing.eth_type))
@@ -2005,8 +1998,9 @@ rocker_cmd_flow_tbl_add_ucast_routing(struct rocker_desc_info *desc_info,
return 0;
}
-static int rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
- struct rocker_flow_tbl_entry *entry)
+static int
+rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
+ const struct rocker_flow_tbl_entry *entry)
{
if (entry->key.bridge.has_eth_dst &&
rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_DST_MAC,
@@ -2038,8 +2032,9 @@ static int rocker_cmd_flow_tbl_add_bridge(struct rocker_desc_info *desc_info,
return 0;
}
-static int rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
- struct rocker_flow_tbl_entry *entry)
+static int
+rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
+ const struct rocker_flow_tbl_entry *entry)
{
if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_IN_PPORT,
entry->key.acl.in_pport))
@@ -2104,12 +2099,11 @@ static int rocker_cmd_flow_tbl_add_acl(struct rocker_desc_info *desc_info,
return 0;
}
-static int rocker_cmd_flow_tbl_add(struct rocker *rocker,
- struct rocker_port *rocker_port,
+static int rocker_cmd_flow_tbl_add(const struct rocker_port *rocker_port,
struct rocker_desc_info *desc_info,
void *priv)
{
- struct rocker_flow_tbl_entry *entry = priv;
+ const struct rocker_flow_tbl_entry *entry = priv;
struct rocker_tlv *cmd_info;
int err = 0;
@@ -2162,8 +2156,7 @@ static int rocker_cmd_flow_tbl_add(struct rocker *rocker,
return 0;
}
-static int rocker_cmd_flow_tbl_del(struct rocker *rocker,
- struct rocker_port *rocker_port,
+static int rocker_cmd_flow_tbl_del(const struct rocker_port *rocker_port,
struct rocker_desc_info *desc_info,
void *priv)
{
@@ -2199,7 +2192,7 @@ rocker_cmd_group_tbl_add_l2_interface(struct rocker_desc_info *desc_info,
static int
rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
- struct rocker_group_tbl_entry *entry)
+ const struct rocker_group_tbl_entry *entry)
{
if (rocker_tlv_put_u32(desc_info, ROCKER_TLV_OF_DPA_GROUP_ID_LOWER,
entry->l2_rewrite.group_id))
@@ -2222,7 +2215,7 @@ rocker_cmd_group_tbl_add_l2_rewrite(struct rocker_desc_info *desc_info,
static int
rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
- struct rocker_group_tbl_entry *entry)
+ const struct rocker_group_tbl_entry *entry)
{
int i;
struct rocker_tlv *group_ids;
@@ -2248,7 +2241,7 @@ rocker_cmd_group_tbl_add_group_ids(struct rocker_desc_info *desc_info,
static int
rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
- struct rocker_group_tbl_entry *entry)
+ const struct rocker_group_tbl_entry *entry)
{
if (!is_zero_ether_addr(entry->l3_unicast.eth_src) &&
rocker_tlv_put(desc_info, ROCKER_TLV_OF_DPA_SRC_MAC,
@@ -2272,8 +2265,7 @@ rocker_cmd_group_tbl_add_l3_unicast(struct rocker_desc_info *desc_info,
return 0;
}
-static int rocker_cmd_group_tbl_add(struct rocker *rocker,
- struct rocker_port *rocker_port,
+static int rocker_cmd_group_tbl_add(const struct rocker_port *rocker_port,
struct rocker_desc_info *desc_info,
void *priv)
{
@@ -2318,8 +2310,7 @@ static int rocker_cmd_group_tbl_add(struct rocker *rocker,
return 0;
}
-static int rocker_cmd_group_tbl_del(struct rocker *rocker,
- struct rocker_port *rocker_port,
+static int rocker_cmd_group_tbl_del(const struct rocker_port *rocker_port,
struct rocker_desc_info *desc_info,
void *priv)
{
@@ -2402,7 +2393,8 @@ static void rocker_free_tbls(struct rocker *rocker)
}
static struct rocker_flow_tbl_entry *
-rocker_flow_tbl_find(struct rocker *rocker, struct rocker_flow_tbl_entry *match)
+rocker_flow_tbl_find(const struct rocker *rocker,
+ const struct rocker_flow_tbl_entry *match)
{
struct rocker_flow_tbl_entry *found;
size_t key_len = match->key_len ? match->key_len : sizeof(found->key);
@@ -2435,7 +2427,7 @@ static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
match->cookie = found->cookie;
if (trans != SWITCHDEV_TRANS_PREPARE)
hash_del(&found->entry);
- rocker_port_kfree(rocker_port, trans, found);
+ rocker_port_kfree(trans, found);
found = match;
found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD;
} else {
@@ -2449,8 +2441,7 @@ static int rocker_flow_tbl_add(struct rocker_port *rocker_port,
spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
- return rocker_cmd_exec(rocker, rocker_port, trans,
- rocker_cmd_flow_tbl_add,
+ return rocker_cmd_exec(rocker_port, trans, rocker_cmd_flow_tbl_add,
found, NULL, NULL);
}
@@ -2478,13 +2469,13 @@ static int rocker_flow_tbl_del(struct rocker_port *rocker_port,
spin_unlock_irqrestore(&rocker->flow_tbl_lock, flags);
- rocker_port_kfree(rocker_port, trans, match);
+ rocker_port_kfree(trans, match);
if (found) {
- err = rocker_cmd_exec(rocker, rocker_port, trans,
+ err = rocker_cmd_exec(rocker_port, trans,
rocker_cmd_flow_tbl_del,
found, NULL, NULL);
- rocker_port_kfree(rocker_port, trans, found);
+ rocker_port_kfree(trans, found);
}
return err;
@@ -2715,8 +2706,8 @@ static int rocker_flow_tbl_acl(struct rocker_port *rocker_port,
}
static struct rocker_group_tbl_entry *
-rocker_group_tbl_find(struct rocker *rocker,
- struct rocker_group_tbl_entry *match)
+rocker_group_tbl_find(const struct rocker *rocker,
+ const struct rocker_group_tbl_entry *match)
{
struct rocker_group_tbl_entry *found;
@@ -2729,19 +2720,18 @@ rocker_group_tbl_find(struct rocker *rocker,
return NULL;
}
-static void rocker_group_tbl_entry_free(struct rocker_port *rocker_port,
- enum switchdev_trans trans,
+static void rocker_group_tbl_entry_free(enum switchdev_trans trans,
struct rocker_group_tbl_entry *entry)
{
switch (ROCKER_GROUP_TYPE_GET(entry->group_id)) {
case ROCKER_OF_DPA_GROUP_TYPE_L2_FLOOD:
case ROCKER_OF_DPA_GROUP_TYPE_L2_MCAST:
- rocker_port_kfree(rocker_port, trans, entry->group_ids);
+ rocker_port_kfree(trans, entry->group_ids);
break;
default:
break;
}
- rocker_port_kfree(rocker_port, trans, entry);
+ rocker_port_kfree(trans, entry);
}
static int rocker_group_tbl_add(struct rocker_port *rocker_port,
@@ -2759,7 +2749,7 @@ static int rocker_group_tbl_add(struct rocker_port *rocker_port,
if (found) {
if (trans != SWITCHDEV_TRANS_PREPARE)
hash_del(&found->entry);
- rocker_group_tbl_entry_free(rocker_port, trans, found);
+ rocker_group_tbl_entry_free(trans, found);
found = match;
found->cmd = ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD;
} else {
@@ -2772,8 +2762,7 @@ static int rocker_group_tbl_add(struct rocker_port *rocker_port,
spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
- return rocker_cmd_exec(rocker, rocker_port, trans,
- rocker_cmd_group_tbl_add,
+ return rocker_cmd_exec(rocker_port, trans, rocker_cmd_group_tbl_add,
found, NULL, NULL);
}
@@ -2798,13 +2787,13 @@ static int rocker_group_tbl_del(struct rocker_port *rocker_port,
spin_unlock_irqrestore(&rocker->group_tbl_lock, flags);
- rocker_group_tbl_entry_free(rocker_port, trans, match);
+ rocker_group_tbl_entry_free(trans, match);
if (found) {
- err = rocker_cmd_exec(rocker, rocker_port, trans,
+ err = rocker_cmd_exec(rocker_port, trans,
rocker_cmd_group_tbl_del,
found, NULL, NULL);
- rocker_group_tbl_entry_free(rocker_port, trans, found);
+ rocker_group_tbl_entry_free(trans, found);
}
return err;
@@ -2840,7 +2829,7 @@ static int rocker_group_l2_interface(struct rocker_port *rocker_port,
static int rocker_group_l2_fan_out(struct rocker_port *rocker_port,
enum switchdev_trans trans,
int flags, u8 group_count,
- u32 *group_ids, u32 group_id)
+ const u32 *group_ids, u32 group_id)
{
struct rocker_group_tbl_entry *entry;
@@ -2854,7 +2843,7 @@ static int rocker_group_l2_fan_out(struct rocker_port *rocker_port,
entry->group_ids = rocker_port_kcalloc(rocker_port, trans, group_count,
sizeof(u32));
if (!entry->group_ids) {
- rocker_port_kfree(rocker_port, trans, entry);
+ rocker_port_kfree(trans, entry);
return -ENOMEM;
}
memcpy(entry->group_ids, group_ids, group_count * sizeof(u32));
@@ -2865,7 +2854,7 @@ static int rocker_group_l2_fan_out(struct rocker_port *rocker_port,
static int rocker_group_l2_flood(struct rocker_port *rocker_port,
enum switchdev_trans trans, int flags,
__be16 vlan_id, u8 group_count,
- u32 *group_ids, u32 group_id)
+ const u32 *group_ids, u32 group_id)
{
return rocker_group_l2_fan_out(rocker_port, trans, flags,
group_count, group_ids,
@@ -2874,7 +2863,7 @@ static int rocker_group_l2_flood(struct rocker_port *rocker_port,
static int rocker_group_l3_unicast(struct rocker_port *rocker_port,
enum switchdev_trans trans, int flags,
- u32 index, u8 *src_mac, u8 *dst_mac,
+ u32 index, const u8 *src_mac, const u8 *dst_mac,
__be16 vlan_id, bool ttl_check, u32 pport)
{
struct rocker_group_tbl_entry *entry;
@@ -2896,7 +2885,7 @@ static int rocker_group_l3_unicast(struct rocker_port *rocker_port,
}
static struct rocker_neigh_tbl_entry *
- rocker_neigh_tbl_find(struct rocker *rocker, __be32 ip_addr)
+rocker_neigh_tbl_find(const struct rocker *rocker, __be32 ip_addr)
{
struct rocker_neigh_tbl_entry *found;
@@ -2921,21 +2910,20 @@ static void _rocker_neigh_add(struct rocker *rocker,
be32_to_cpu(entry->ip_addr));
}
-static void _rocker_neigh_del(struct rocker_port *rocker_port,
- enum switchdev_trans trans,
+static void _rocker_neigh_del(enum switchdev_trans trans,
struct rocker_neigh_tbl_entry *entry)
{
if (trans == SWITCHDEV_TRANS_PREPARE)
return;
if (--entry->ref_count == 0) {
hash_del(&entry->entry);
- rocker_port_kfree(rocker_port, trans, entry);
+ rocker_port_kfree(trans, entry);
}
}
static void _rocker_neigh_update(struct rocker_neigh_tbl_entry *entry,
enum switchdev_trans trans,
- u8 *eth_dst, bool ttl_check)
+ const u8 *eth_dst, bool ttl_check)
{
if (eth_dst) {
ether_addr_copy(entry->eth_dst, eth_dst);
@@ -2947,7 +2935,7 @@ static void _rocker_neigh_update(struct rocker_neigh_tbl_entry *entry,
static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
enum switchdev_trans trans,
- int flags, __be32 ip_addr, u8 *eth_dst)
+ int flags, __be32 ip_addr, const u8 *eth_dst)
{
struct rocker *rocker = rocker_port->rocker;
struct rocker_neigh_tbl_entry *entry;
@@ -2983,7 +2971,7 @@ static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
_rocker_neigh_add(rocker, trans, entry);
} else if (removing) {
memcpy(entry, found, sizeof(*entry));
- _rocker_neigh_del(rocker_port, trans, found);
+ _rocker_neigh_del(trans, found);
} else if (updating) {
_rocker_neigh_update(found, trans, eth_dst, true);
memcpy(entry, found, sizeof(*entry));
@@ -3032,7 +3020,7 @@ static int rocker_port_ipv4_neigh(struct rocker_port *rocker_port,
err_out:
if (!adding)
- rocker_port_kfree(rocker_port, trans, entry);
+ rocker_port_kfree(trans, entry);
return err;
}
@@ -3100,7 +3088,7 @@ static int rocker_port_ipv4_nh(struct rocker_port *rocker_port,
*index = entry->index;
resolved = false;
} else if (removing) {
- _rocker_neigh_del(rocker_port, trans, found);
+ _rocker_neigh_del(trans, found);
} else if (updating) {
_rocker_neigh_update(found, trans, NULL, false);
resolved = !is_zero_ether_addr(found->eth_dst);
@@ -3111,7 +3099,7 @@ static int rocker_port_ipv4_nh(struct rocker_port *rocker_port,
spin_unlock_irqrestore(&rocker->neigh_tbl_lock, lock_flags);
if (!adding)
- rocker_port_kfree(rocker_port, trans, entry);
+ rocker_port_kfree(trans, entry);
if (err)
return err;
@@ -3129,7 +3117,7 @@ static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
int flags, __be16 vlan_id)
{
struct rocker_port *p;
- struct rocker *rocker = rocker_port->rocker;
+ const struct rocker *rocker = rocker_port->rocker;
u32 group_id = ROCKER_GROUP_L2_FLOOD(vlan_id, 0);
u32 *group_ids;
u8 group_count = 0;
@@ -3148,6 +3136,8 @@ static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
for (i = 0; i < rocker->port_count; i++) {
p = rocker->ports[i];
+ if (!p)
+ continue;
if (!rocker_port_is_bridged(p))
continue;
if (test_bit(ntohs(vlan_id), p->vlan_bitmap)) {
@@ -3167,7 +3157,7 @@ static int rocker_port_vlan_flood_group(struct rocker_port *rocker_port,
"Error (%d) port VLAN l2 flood group\n", err);
no_ports_in_vlan:
- rocker_port_kfree(rocker_port, trans, group_ids);
+ rocker_port_kfree(trans, group_ids);
return err;
}
@@ -3175,7 +3165,7 @@ static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
enum switchdev_trans trans, int flags,
__be16 vlan_id, bool pop_vlan)
{
- struct rocker *rocker = rocker_port->rocker;
+ const struct rocker *rocker = rocker_port->rocker;
struct rocker_port *p;
bool adding = !(flags & ROCKER_OP_FLAG_REMOVE);
u32 out_pport;
@@ -3207,7 +3197,7 @@ static int rocker_port_vlan_l2_groups(struct rocker_port *rocker_port,
for (i = 0; i < rocker->port_count; i++) {
p = rocker->ports[i];
- if (test_bit(ntohs(vlan_id), p->vlan_bitmap))
+ if (p && test_bit(ntohs(vlan_id), p->vlan_bitmap))
ref++;
}
@@ -3273,13 +3263,13 @@ static struct rocker_ctrl {
static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
enum switchdev_trans trans, int flags,
- struct rocker_ctrl *ctrl, __be16 vlan_id)
+ const struct rocker_ctrl *ctrl, __be16 vlan_id)
{
u32 in_pport = rocker_port->pport;
u32 in_pport_mask = 0xffffffff;
u32 out_pport = 0;
- u8 *eth_src = NULL;
- u8 *eth_src_mask = NULL;
+ const u8 *eth_src = NULL;
+ const u8 *eth_src_mask = NULL;
__be16 vlan_id_mask = htons(0xffff);
u8 ip_proto = 0;
u8 ip_proto_mask = 0;
@@ -3306,7 +3296,7 @@ static int rocker_port_ctrl_vlan_acl(struct rocker_port *rocker_port,
static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port,
enum switchdev_trans trans, int flags,
- struct rocker_ctrl *ctrl,
+ const struct rocker_ctrl *ctrl,
__be16 vlan_id)
{
enum rocker_of_dpa_table_id goto_tbl =
@@ -3331,7 +3321,7 @@ static int rocker_port_ctrl_vlan_bridge(struct rocker_port *rocker_port,
static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
enum switchdev_trans trans, int flags,
- struct rocker_ctrl *ctrl, __be16 vlan_id)
+ const struct rocker_ctrl *ctrl, __be16 vlan_id)
{
u32 in_pport_mask = 0xffffffff;
__be16 vlan_id_mask = htons(0xffff);
@@ -3355,7 +3345,7 @@ static int rocker_port_ctrl_vlan_term(struct rocker_port *rocker_port,
static int rocker_port_ctrl_vlan(struct rocker_port *rocker_port,
enum switchdev_trans trans, int flags,
- struct rocker_ctrl *ctrl, __be16 vlan_id)
+ const struct rocker_ctrl *ctrl, __be16 vlan_id)
{
if (ctrl->acl)
return rocker_port_ctrl_vlan_acl(rocker_port, trans, flags,
@@ -3392,7 +3382,7 @@ static int rocker_port_ctrl_vlan_add(struct rocker_port *rocker_port,
static int rocker_port_ctrl(struct rocker_port *rocker_port,
enum switchdev_trans trans, int flags,
- struct rocker_ctrl *ctrl)
+ const struct rocker_ctrl *ctrl)
{
u16 vid;
int err = 0;
@@ -3510,7 +3500,7 @@ struct rocker_fdb_learn_work {
static void rocker_port_fdb_learn_work(struct work_struct *work)
{
- struct rocker_fdb_learn_work *lw =
+ const struct rocker_fdb_learn_work *lw =
container_of(work, struct rocker_fdb_learn_work, work);
bool removing = (lw->flags & ROCKER_OP_FLAG_REMOVE);
bool learned = (lw->flags & ROCKER_OP_FLAG_LEARNED);
@@ -3526,7 +3516,7 @@ static void rocker_port_fdb_learn_work(struct work_struct *work)
call_switchdev_notifiers(SWITCHDEV_FDB_ADD,
lw->rocker_port->dev, &info.info);
- rocker_port_kfree(lw->rocker_port, lw->trans, work);
+ rocker_port_kfree(lw->trans, work);
}
static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
@@ -3573,7 +3563,7 @@ static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
lw->vid = rocker_port_vlan_to_vid(rocker_port, vlan_id);
if (trans == SWITCHDEV_TRANS_PREPARE)
- rocker_port_kfree(rocker_port, trans, lw);
+ rocker_port_kfree(trans, lw);
else
schedule_work(&lw->work);
@@ -3581,7 +3571,8 @@ static int rocker_port_fdb_learn(struct rocker_port *rocker_port,
}
static struct rocker_fdb_tbl_entry *
-rocker_fdb_tbl_find(struct rocker *rocker, struct rocker_fdb_tbl_entry *match)
+rocker_fdb_tbl_find(const struct rocker *rocker,
+ const struct rocker_fdb_tbl_entry *match)
{
struct rocker_fdb_tbl_entry *found;
@@ -3618,7 +3609,7 @@ static int rocker_port_fdb(struct rocker_port *rocker_port,
found = rocker_fdb_tbl_find(rocker, fdb);
if (removing && found) {
- rocker_port_kfree(rocker_port, trans, fdb);
+ rocker_port_kfree(trans, fdb);
if (trans != SWITCHDEV_TRANS_PREPARE)
hash_del(&found->entry);
} else if (!removing && !found) {
@@ -3630,7 +3621,7 @@ static int rocker_port_fdb(struct rocker_port *rocker_port,
/* Check if adding and already exists, or removing and can't find */
if (!found != !removing) {
- rocker_port_kfree(rocker_port, trans, fdb);
+ rocker_port_kfree(trans, fdb);
if (!found && removing)
return 0;
/* Refreshing existing to update aging timers */
@@ -3839,7 +3830,7 @@ static int rocker_port_fwd_disable(struct rocker_port *rocker_port,
}
static struct rocker_internal_vlan_tbl_entry *
-rocker_internal_vlan_tbl_find(struct rocker *rocker, int ifindex)
+rocker_internal_vlan_tbl_find(const struct rocker *rocker, int ifindex)
{
struct rocker_internal_vlan_tbl_entry *found;
@@ -3894,8 +3885,9 @@ found:
return found->vlan_id;
}
-static void rocker_port_internal_vlan_id_put(struct rocker_port *rocker_port,
- int ifindex)
+static void
+rocker_port_internal_vlan_id_put(const struct rocker_port *rocker_port,
+ int ifindex)
{
struct rocker *rocker = rocker_port->rocker;
struct rocker_internal_vlan_tbl_entry *found;
@@ -3925,10 +3917,10 @@ not_found:
static int rocker_port_fib_ipv4(struct rocker_port *rocker_port,
enum switchdev_trans trans, __be32 dst,
- int dst_len, struct fib_info *fi, u32 tb_id,
- int flags)
+ int dst_len, const struct fib_info *fi,
+ u32 tb_id, int flags)
{
- struct fib_nh *nh;
+ const struct fib_nh *nh;
__be16 eth_type = htons(ETH_P_IP);
__be32 dst_mask = inet_make_mask(dst_len);
__be16 internal_vlan_id = rocker_port->internal_vlan_id;
@@ -4033,12 +4025,12 @@ static int rocker_port_stop(struct net_device *dev)
return 0;
}
-static void rocker_tx_desc_frags_unmap(struct rocker_port *rocker_port,
- struct rocker_desc_info *desc_info)
+static void rocker_tx_desc_frags_unmap(const struct rocker_port *rocker_port,
+ const struct rocker_desc_info *desc_info)
{
- struct rocker *rocker = rocker_port->rocker;
+ const struct rocker *rocker = rocker_port->rocker;
struct pci_dev *pdev = rocker->pdev;
- struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1];
+ const struct rocker_tlv *attrs[ROCKER_TLV_TX_MAX + 1];
struct rocker_tlv *attr;
int rem;
@@ -4046,7 +4038,7 @@ static void rocker_tx_desc_frags_unmap(struct rocker_port *rocker_port,
if (!attrs[ROCKER_TLV_TX_FRAGS])
return;
rocker_tlv_for_each_nested(attr, attrs[ROCKER_TLV_TX_FRAGS], rem) {
- struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1];
+ const struct rocker_tlv *frag_attrs[ROCKER_TLV_TX_FRAG_ATTR_MAX + 1];
dma_addr_t dma_handle;
size_t len;
@@ -4063,11 +4055,11 @@ static void rocker_tx_desc_frags_unmap(struct rocker_port *rocker_port,
}
}
-static int rocker_tx_desc_frag_map_put(struct rocker_port *rocker_port,
+static int rocker_tx_desc_frag_map_put(const struct rocker_port *rocker_port,
struct rocker_desc_info *desc_info,
char *buf, size_t buf_len)
{
- struct rocker *rocker = rocker_port->rocker;
+ const struct rocker *rocker = rocker_port->rocker;
struct pci_dev *pdev = rocker->pdev;
dma_addr_t dma_handle;
struct rocker_tlv *frag;
@@ -4172,35 +4164,6 @@ static int rocker_port_set_mac_address(struct net_device *dev, void *p)
return 0;
}
-static int rocker_port_vlan_rx_add_vid(struct net_device *dev,
- __be16 proto, u16 vid)
-{
- struct rocker_port *rocker_port = netdev_priv(dev);
- int err;
-
- err = rocker_port_vlan(rocker_port, SWITCHDEV_TRANS_NONE, 0, vid);
- if (err)
- return err;
-
- return rocker_port_router_mac(rocker_port, SWITCHDEV_TRANS_NONE,
- 0, htons(vid));
-}
-
-static int rocker_port_vlan_rx_kill_vid(struct net_device *dev,
- __be16 proto, u16 vid)
-{
- struct rocker_port *rocker_port = netdev_priv(dev);
- int err;
-
- err = rocker_port_router_mac(rocker_port, SWITCHDEV_TRANS_NONE,
- ROCKER_OP_FLAG_REMOVE, htons(vid));
- if (err)
- return err;
-
- return rocker_port_vlan(rocker_port, SWITCHDEV_TRANS_NONE,
- ROCKER_OP_FLAG_REMOVE, vid);
-}
-
static int rocker_port_get_phys_port_name(struct net_device *dev,
char *buf, size_t len)
{
@@ -4208,8 +4171,7 @@ static int rocker_port_get_phys_port_name(struct net_device *dev,
struct port_name name = { .buf = buf, .len = len };
int err;
- err = rocker_cmd_exec(rocker_port->rocker, rocker_port,
- SWITCHDEV_TRANS_NONE,
+ err = rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE,
rocker_cmd_get_port_settings_prep, NULL,
rocker_cmd_get_port_settings_phys_name_proc,
&name);
@@ -4222,8 +4184,6 @@ static const struct net_device_ops rocker_port_netdev_ops = {
.ndo_stop = rocker_port_stop,
.ndo_start_xmit = rocker_port_xmit,
.ndo_set_mac_address = rocker_port_set_mac_address,
- .ndo_vlan_rx_add_vid = rocker_port_vlan_rx_add_vid,
- .ndo_vlan_rx_kill_vid = rocker_port_vlan_rx_kill_vid,
.ndo_bridge_getlink = switchdev_port_bridge_getlink,
.ndo_bridge_setlink = switchdev_port_bridge_setlink,
.ndo_bridge_dellink = switchdev_port_bridge_dellink,
@@ -4240,8 +4200,8 @@ static const struct net_device_ops rocker_port_netdev_ops = {
static int rocker_port_attr_get(struct net_device *dev,
struct switchdev_attr *attr)
{
- struct rocker_port *rocker_port = netdev_priv(dev);
- struct rocker *rocker = rocker_port->rocker;
+ const struct rocker_port *rocker_port = netdev_priv(dev);
+ const struct rocker *rocker = rocker_port->rocker;
switch (attr->id) {
case SWITCHDEV_ATTR_PORT_PARENT_ID:
@@ -4258,7 +4218,7 @@ static int rocker_port_attr_get(struct net_device *dev,
return 0;
}
-static void rocker_port_trans_abort(struct rocker_port *rocker_port)
+static void rocker_port_trans_abort(const struct rocker_port *rocker_port)
{
struct list_head *mem, *tmp;
@@ -4331,12 +4291,17 @@ static int rocker_port_vlan_add(struct rocker_port *rocker_port,
if (err)
return err;
- return rocker_port_router_mac(rocker_port, trans, 0, htons(vid));
+ err = rocker_port_router_mac(rocker_port, trans, 0, htons(vid));
+ if (err)
+ rocker_port_vlan(rocker_port, trans,
+ ROCKER_OP_FLAG_REMOVE, vid);
+
+ return err;
}
static int rocker_port_vlans_add(struct rocker_port *rocker_port,
enum switchdev_trans trans,
- struct switchdev_obj_vlan *vlan)
+ const struct switchdev_obj_vlan *vlan)
{
u16 vid;
int err;
@@ -4353,7 +4318,7 @@ static int rocker_port_vlans_add(struct rocker_port *rocker_port,
static int rocker_port_fdb_add(struct rocker_port *rocker_port,
enum switchdev_trans trans,
- struct switchdev_obj_fdb *fdb)
+ const struct switchdev_obj_fdb *fdb)
{
__be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
int flags = 0;
@@ -4368,7 +4333,7 @@ static int rocker_port_obj_add(struct net_device *dev,
struct switchdev_obj *obj)
{
struct rocker_port *rocker_port = netdev_priv(dev);
- struct switchdev_obj_ipv4_fib *fib4;
+ const struct switchdev_obj_ipv4_fib *fib4;
int err = 0;
switch (obj->trans) {
@@ -4419,7 +4384,7 @@ static int rocker_port_vlan_del(struct rocker_port *rocker_port,
}
static int rocker_port_vlans_del(struct rocker_port *rocker_port,
- struct switchdev_obj_vlan *vlan)
+ const struct switchdev_obj_vlan *vlan)
{
u16 vid;
int err;
@@ -4435,7 +4400,7 @@ static int rocker_port_vlans_del(struct rocker_port *rocker_port,
static int rocker_port_fdb_del(struct rocker_port *rocker_port,
enum switchdev_trans trans,
- struct switchdev_obj_fdb *fdb)
+ const struct switchdev_obj_fdb *fdb)
{
__be16 vlan_id = rocker_port_vid_to_vlan(rocker_port, fdb->vid, NULL);
int flags = ROCKER_OP_FLAG_REMOVE;
@@ -4450,7 +4415,7 @@ static int rocker_port_obj_del(struct net_device *dev,
struct switchdev_obj *obj)
{
struct rocker_port *rocker_port = netdev_priv(dev);
- struct switchdev_obj_ipv4_fib *fib4;
+ const struct switchdev_obj_ipv4_fib *fib4;
int err = 0;
switch (obj->id) {
@@ -4475,7 +4440,7 @@ static int rocker_port_obj_del(struct net_device *dev,
return err;
}
-static int rocker_port_fdb_dump(struct rocker_port *rocker_port,
+static int rocker_port_fdb_dump(const struct rocker_port *rocker_port,
struct switchdev_obj *obj)
{
struct rocker *rocker = rocker_port->rocker;
@@ -4505,7 +4470,7 @@ static int rocker_port_fdb_dump(struct rocker_port *rocker_port,
static int rocker_port_obj_dump(struct net_device *dev,
struct switchdev_obj *obj)
{
- struct rocker_port *rocker_port = netdev_priv(dev);
+ const struct rocker_port *rocker_port = netdev_priv(dev);
int err = 0;
switch (obj->id) {
@@ -4589,8 +4554,7 @@ static void rocker_port_get_strings(struct net_device *netdev, u32 stringset,
}
static int
-rocker_cmd_get_port_stats_prep(struct rocker *rocker,
- struct rocker_port *rocker_port,
+rocker_cmd_get_port_stats_prep(const struct rocker_port *rocker_port,
struct rocker_desc_info *desc_info,
void *priv)
{
@@ -4614,14 +4578,13 @@ rocker_cmd_get_port_stats_prep(struct rocker *rocker,
}
static int
-rocker_cmd_get_port_stats_ethtool_proc(struct rocker *rocker,
- struct rocker_port *rocker_port,
- struct rocker_desc_info *desc_info,
+rocker_cmd_get_port_stats_ethtool_proc(const struct rocker_port *rocker_port,
+ const struct rocker_desc_info *desc_info,
void *priv)
{
- struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
- struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1];
- struct rocker_tlv *pattr;
+ const struct rocker_tlv *attrs[ROCKER_TLV_CMD_MAX + 1];
+ const struct rocker_tlv *stats_attrs[ROCKER_TLV_CMD_PORT_STATS_MAX + 1];
+ const struct rocker_tlv *pattr;
u32 pport;
u64 *data = priv;
int i;
@@ -4655,8 +4618,7 @@ rocker_cmd_get_port_stats_ethtool_proc(struct rocker *rocker,
static int rocker_cmd_get_port_stats_ethtool(struct rocker_port *rocker_port,
void *priv)
{
- return rocker_cmd_exec(rocker_port->rocker, rocker_port,
- SWITCHDEV_TRANS_NONE,
+ return rocker_cmd_exec(rocker_port, SWITCHDEV_TRANS_NONE,
rocker_cmd_get_port_stats_prep, NULL,
rocker_cmd_get_port_stats_ethtool_proc,
priv);
@@ -4707,8 +4669,8 @@ static struct rocker_port *rocker_port_napi_tx_get(struct napi_struct *napi)
static int rocker_port_poll_tx(struct napi_struct *napi, int budget)
{
struct rocker_port *rocker_port = rocker_port_napi_tx_get(napi);
- struct rocker *rocker = rocker_port->rocker;
- struct rocker_desc_info *desc_info;
+ const struct rocker *rocker = rocker_port->rocker;
+ const struct rocker_desc_info *desc_info;
u32 credits = 0;
int err;
@@ -4743,11 +4705,11 @@ static int rocker_port_poll_tx(struct napi_struct *napi, int budget)
return 0;
}
-static int rocker_port_rx_proc(struct rocker *rocker,
- struct rocker_port *rocker_port,
+static int rocker_port_rx_proc(const struct rocker *rocker,
+ const struct rocker_port *rocker_port,
struct rocker_desc_info *desc_info)
{
- struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
+ const struct rocker_tlv *attrs[ROCKER_TLV_RX_MAX + 1];
struct sk_buff *skb = rocker_desc_cookie_ptr_get(desc_info);
size_t rx_len;
@@ -4769,7 +4731,7 @@ static int rocker_port_rx_proc(struct rocker *rocker,
netif_receive_skb(skb);
- return rocker_dma_rx_ring_skb_alloc(rocker, rocker_port, desc_info);
+ return rocker_dma_rx_ring_skb_alloc(rocker_port, desc_info);
}
static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi)
@@ -4780,7 +4742,7 @@ static struct rocker_port *rocker_port_napi_rx_get(struct napi_struct *napi)
static int rocker_port_poll_rx(struct napi_struct *napi, int budget)
{
struct rocker_port *rocker_port = rocker_port_napi_rx_get(napi);
- struct rocker *rocker = rocker_port->rocker;
+ const struct rocker *rocker = rocker_port->rocker;
struct rocker_desc_info *desc_info;
u32 credits = 0;
int err;
@@ -4820,9 +4782,9 @@ static int rocker_port_poll_rx(struct napi_struct *napi, int budget)
* PCI driver ops
*****************/
-static void rocker_carrier_init(struct rocker_port *rocker_port)
+static void rocker_carrier_init(const struct rocker_port *rocker_port)
{
- struct rocker *rocker = rocker_port->rocker;
+ const struct rocker *rocker = rocker_port->rocker;
u64 link_status = rocker_read64(rocker, PORT_PHYS_LINK_STATUS);
bool link_up;
@@ -4833,7 +4795,7 @@ static void rocker_carrier_init(struct rocker_port *rocker_port)
netif_carrier_off(rocker_port->dev);
}
-static void rocker_remove_ports(struct rocker *rocker)
+static void rocker_remove_ports(const struct rocker *rocker)
{
struct rocker_port *rocker_port;
int i;
@@ -4847,10 +4809,10 @@ static void rocker_remove_ports(struct rocker *rocker)
kfree(rocker->ports);
}
-static void rocker_port_dev_addr_init(struct rocker *rocker,
- struct rocker_port *rocker_port)
+static void rocker_port_dev_addr_init(struct rocker_port *rocker_port)
{
- struct pci_dev *pdev = rocker->pdev;
+ const struct rocker *rocker = rocker_port->rocker;
+ const struct pci_dev *pdev = rocker->pdev;
int err;
err = rocker_cmd_get_port_settings_macaddr(rocker_port,
@@ -4863,9 +4825,10 @@ static void rocker_port_dev_addr_init(struct rocker *rocker,
static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
{
- struct pci_dev *pdev = rocker->pdev;
+ const struct pci_dev *pdev = rocker->pdev;
struct rocker_port *rocker_port;
struct net_device *dev;
+ u16 untagged_vid = 0;
int err;
dev = alloc_etherdev(sizeof(struct rocker_port));
@@ -4879,7 +4842,7 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
rocker_port->brport_flags = BR_LEARNING | BR_LEARNING_SYNC;
INIT_LIST_HEAD(&rocker_port->trans_mem);
- rocker_port_dev_addr_init(rocker, rocker_port);
+ rocker_port_dev_addr_init(rocker_port);
dev->netdev_ops = &rocker_port_netdev_ops;
dev->ethtool_ops = &rocker_port_ethtool_ops;
dev->switchdev_ops = &rocker_port_switchdev_ops;
@@ -4889,8 +4852,7 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
NAPI_POLL_WEIGHT);
rocker_carrier_init(rocker_port);
- dev->features |= NETIF_F_NETNS_LOCAL |
- NETIF_F_HW_VLAN_CTAG_FILTER;
+ dev->features |= NETIF_F_NETNS_LOCAL;
err = register_netdev(dev);
if (err) {
@@ -4901,16 +4863,27 @@ static int rocker_probe_port(struct rocker *rocker, unsigned int port_number)
rocker_port_set_learning(rocker_port, SWITCHDEV_TRANS_NONE);
- rocker_port->internal_vlan_id =
- rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex);
err = rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE, 0);
if (err) {
dev_err(&pdev->dev, "install ig port table failed\n");
goto err_port_ig_tbl;
}
+ rocker_port->internal_vlan_id =
+ rocker_port_internal_vlan_id_get(rocker_port, dev->ifindex);
+
+ err = rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE,
+ untagged_vid, 0);
+ if (err) {
+ netdev_err(rocker_port->dev, "install untagged VLAN failed\n");
+ goto err_untagged_vlan;
+ }
+
return 0;
+err_untagged_vlan:
+ rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE,
+ ROCKER_OP_FLAG_REMOVE);
err_port_ig_tbl:
unregister_netdev(dev);
err_register_netdev:
@@ -4925,7 +4898,7 @@ static int rocker_probe_ports(struct rocker *rocker)
int err;
alloc_size = sizeof(struct rocker_port *) * rocker->port_count;
- rocker->ports = kmalloc(alloc_size, GFP_KERNEL);
+ rocker->ports = kzalloc(alloc_size, GFP_KERNEL);
if (!rocker->ports)
return -ENOMEM;
for (i = 0; i < rocker->port_count; i++) {
@@ -4974,7 +4947,7 @@ err_enable_msix:
return err;
}
-static void rocker_msix_fini(struct rocker *rocker)
+static void rocker_msix_fini(const struct rocker *rocker)
{
pci_disable_msix(rocker->pdev);
kfree(rocker->msix_entries);
@@ -5140,7 +5113,7 @@ static struct pci_driver rocker_pci_driver = {
* Net device notifier event handler
************************************/
-static bool rocker_port_dev_check(struct net_device *dev)
+static bool rocker_port_dev_check(const struct net_device *dev)
{
return dev->netdev_ops == &rocker_port_netdev_ops;
}
@@ -5148,41 +5121,49 @@ static bool rocker_port_dev_check(struct net_device *dev)
static int rocker_port_bridge_join(struct rocker_port *rocker_port,
struct net_device *bridge)
{
+ u16 untagged_vid = 0;
int err;
- rocker_port_internal_vlan_id_put(rocker_port,
- rocker_port->dev->ifindex);
-
- rocker_port->bridge_dev = bridge;
+ /* Port is joining bridge, so the internal VLAN for the
+ * port is going to change to the bridge internal VLAN.
+ * Let's remove untagged VLAN (vid=0) from port and
+ * re-add once internal VLAN has changed.
+ */
- /* Use bridge internal VLAN ID for untagged pkts */
- err = rocker_port_vlan(rocker_port, SWITCHDEV_TRANS_NONE,
- ROCKER_OP_FLAG_REMOVE, 0);
+ err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
if (err)
return err;
+
+ rocker_port_internal_vlan_id_put(rocker_port,
+ rocker_port->dev->ifindex);
rocker_port->internal_vlan_id =
rocker_port_internal_vlan_id_get(rocker_port, bridge->ifindex);
- return rocker_port_vlan(rocker_port, SWITCHDEV_TRANS_NONE, 0, 0);
+
+ rocker_port->bridge_dev = bridge;
+
+ return rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE,
+ untagged_vid, 0);
}
static int rocker_port_bridge_leave(struct rocker_port *rocker_port)
{
+ u16 untagged_vid = 0;
int err;
- rocker_port_internal_vlan_id_put(rocker_port,
- rocker_port->bridge_dev->ifindex);
-
- rocker_port->bridge_dev = NULL;
-
- /* Use port internal VLAN ID for untagged pkts */
- err = rocker_port_vlan(rocker_port, SWITCHDEV_TRANS_NONE,
- ROCKER_OP_FLAG_REMOVE, 0);
+ err = rocker_port_vlan_del(rocker_port, untagged_vid, 0);
if (err)
return err;
+
+ rocker_port_internal_vlan_id_put(rocker_port,
+ rocker_port->bridge_dev->ifindex);
rocker_port->internal_vlan_id =
rocker_port_internal_vlan_id_get(rocker_port,
rocker_port->dev->ifindex);
- err = rocker_port_vlan(rocker_port, SWITCHDEV_TRANS_NONE, 0, 0);
+
+ rocker_port->bridge_dev = NULL;
+
+ err = rocker_port_vlan_add(rocker_port, SWITCHDEV_TRANS_NONE,
+ untagged_vid, 0);
if (err)
return err;
diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig
index 0889212..4dd92b7 100644
--- a/drivers/net/ethernet/sfc/Kconfig
+++ b/drivers/net/ethernet/sfc/Kconfig
@@ -36,3 +36,12 @@ config SFC_SRIOV
This enables support for the SFC9000 I/O Virtualization
features, allowing accelerated network performance in
virtualized environments.
+config SFC_MCDI_LOGGING
+ bool "Solarflare SFC9000/SFC9100-family MCDI logging support"
+ depends on SFC
+ default y
+ ---help---
+ This enables support for tracing of MCDI (Management-Controller-to-
+ Driver-Interface) commands and responses, allowing debugging of
+ driver/firmware interaction. The tracing is actually enabled by
+ a sysfs file 'mcdi_logging' under the PCI device.
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index a547ceb..8476434 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -246,9 +246,38 @@ static int efx_ef10_get_mac_address_vf(struct efx_nic *efx, u8 *mac_address)
return 0;
}
+static ssize_t efx_ef10_show_link_control_flag(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+
+ return sprintf(buf, "%d\n",
+ ((efx->mcdi->fn_flags) &
+ (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL))
+ ? 1 : 0);
+}
+
+static ssize_t efx_ef10_show_primary_flag(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+
+ return sprintf(buf, "%d\n",
+ ((efx->mcdi->fn_flags) &
+ (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY))
+ ? 1 : 0);
+}
+
+static DEVICE_ATTR(link_control_flag, 0444, efx_ef10_show_link_control_flag,
+ NULL);
+static DEVICE_ATTR(primary_flag, 0444, efx_ef10_show_primary_flag, NULL);
+
static int efx_ef10_probe(struct efx_nic *efx)
{
struct efx_ef10_nic_data *nic_data;
+ struct net_device *net_dev = efx->net_dev;
int i, rc;
/* We can have one VI for each 8K region. However, until we
@@ -267,6 +296,9 @@ static int efx_ef10_probe(struct efx_nic *efx)
return -ENOMEM;
efx->nic_data = nic_data;
+ /* we assume later that we can copy from this buffer in dwords */
+ BUILD_BUG_ON(MCDI_CTL_SDU_LEN_MAX_V2 % 4);
+
rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf,
8 + MCDI_CTL_SDU_LEN_MAX_V2, GFP_KERNEL);
if (rc)
@@ -311,29 +343,39 @@ static int efx_ef10_probe(struct efx_nic *efx)
if (rc)
goto fail3;
- rc = efx_ef10_get_pf_index(efx);
+ rc = device_create_file(&efx->pci_dev->dev,
+ &dev_attr_link_control_flag);
if (rc)
goto fail3;
+ rc = device_create_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
+ if (rc)
+ goto fail4;
+
+ rc = efx_ef10_get_pf_index(efx);
+ if (rc)
+ goto fail5;
+
rc = efx_ef10_init_datapath_caps(efx);
if (rc < 0)
- goto fail3;
+ goto fail5;
efx->rx_packet_len_offset =
ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE;
rc = efx_mcdi_port_get_number(efx);
if (rc < 0)
- goto fail3;
+ goto fail5;
efx->port_num = rc;
+ net_dev->dev_port = rc;
rc = efx->type->get_mac_address(efx, efx->net_dev->perm_addr);
if (rc)
- goto fail3;
+ goto fail5;
rc = efx_ef10_get_sysclk_freq(efx);
if (rc < 0)
- goto fail3;
+ goto fail5;
efx->timer_quantum_ns = 1536000 / rc; /* 1536 cycles */
/* Check whether firmware supports bug 35388 workaround.
@@ -341,9 +383,9 @@ static int efx_ef10_probe(struct efx_nic *efx)
* ask if it's already enabled
*/
rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true);
- if (rc == 0)
+ if (rc == 0) {
nic_data->workaround_35388 = true;
- else if (rc == -EPERM) {
+ } else if (rc == -EPERM) {
unsigned int enabled;
rc = efx_mcdi_get_workarounds(efx, NULL, &enabled);
@@ -351,21 +393,35 @@ static int efx_ef10_probe(struct efx_nic *efx)
goto fail3;
nic_data->workaround_35388 = enabled &
MC_CMD_GET_WORKAROUNDS_OUT_BUG35388;
+ } else if (rc != -ENOSYS && rc != -ENOENT) {
+ goto fail5;
}
- else if (rc != -ENOSYS && rc != -ENOENT)
- goto fail3;
netif_dbg(efx, probe, efx->net_dev,
"workaround for bug 35388 is %sabled\n",
nic_data->workaround_35388 ? "en" : "dis");
rc = efx_mcdi_mon_probe(efx);
if (rc && rc != -EPERM)
- goto fail3;
+ goto fail5;
efx_ptp_probe(efx, NULL);
+#ifdef CONFIG_SFC_SRIOV
+ if ((efx->pci_dev->physfn) && (!efx->pci_dev->is_physfn)) {
+ struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
+ struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
+
+ efx_pf->type->get_mac_address(efx_pf, nic_data->port_id);
+ } else
+#endif
+ ether_addr_copy(nic_data->port_id, efx->net_dev->perm_addr);
+
return 0;
+fail5:
+ device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
+fail4:
+ device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag);
fail3:
efx_mcdi_fini(efx);
fail2:
@@ -608,6 +664,9 @@ static void efx_ef10_remove(struct efx_nic *efx)
if (!nic_data->must_restore_piobufs)
efx_ef10_free_piobufs(efx);
+ device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
+ device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag);
+
efx_mcdi_fini(efx);
efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
kfree(nic_data);
@@ -622,6 +681,24 @@ static int efx_ef10_probe_pf(struct efx_nic *efx)
static int efx_ef10_probe_vf(struct efx_nic *efx)
{
int rc;
+ struct pci_dev *pci_dev_pf;
+
+ /* If the parent PF has no VF data structure, it doesn't know about this
+ * VF so fail probe. The VF needs to be re-created. This can happen
+ * if the PF driver is unloaded while the VF is assigned to a guest.
+ */
+ pci_dev_pf = efx->pci_dev->physfn;
+ if (pci_dev_pf) {
+ struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
+ struct efx_ef10_nic_data *nic_data_pf = efx_pf->nic_data;
+
+ if (!nic_data_pf->vf) {
+ netif_info(efx, drv, efx->net_dev,
+ "The VF cannot link to its parent PF; "
+ "please destroy and re-create the VF\n");
+ return -EBUSY;
+ }
+ }
rc = efx_ef10_probe(efx);
if (rc)
@@ -639,6 +716,8 @@ static int efx_ef10_probe_vf(struct efx_nic *efx)
struct efx_ef10_nic_data *nic_data = efx->nic_data;
nic_data_p->vf[nic_data->vf_index].efx = efx;
+ nic_data_p->vf[nic_data->vf_index].pci_dev =
+ efx->pci_dev;
} else
netif_info(efx, drv, efx->net_dev,
"Could not get the PF id from VF\n");
@@ -932,93 +1011,112 @@ static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type)
[GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 }
static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
- EF10_DMA_STAT(tx_bytes, TX_BYTES),
- EF10_DMA_STAT(tx_packets, TX_PKTS),
- EF10_DMA_STAT(tx_pause, TX_PAUSE_PKTS),
- EF10_DMA_STAT(tx_control, TX_CONTROL_PKTS),
- EF10_DMA_STAT(tx_unicast, TX_UNICAST_PKTS),
- EF10_DMA_STAT(tx_multicast, TX_MULTICAST_PKTS),
- EF10_DMA_STAT(tx_broadcast, TX_BROADCAST_PKTS),
- EF10_DMA_STAT(tx_lt64, TX_LT64_PKTS),
- EF10_DMA_STAT(tx_64, TX_64_PKTS),
- EF10_DMA_STAT(tx_65_to_127, TX_65_TO_127_PKTS),
- EF10_DMA_STAT(tx_128_to_255, TX_128_TO_255_PKTS),
- EF10_DMA_STAT(tx_256_to_511, TX_256_TO_511_PKTS),
- EF10_DMA_STAT(tx_512_to_1023, TX_512_TO_1023_PKTS),
- EF10_DMA_STAT(tx_1024_to_15xx, TX_1024_TO_15XX_PKTS),
- EF10_DMA_STAT(tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS),
- EF10_DMA_STAT(rx_bytes, RX_BYTES),
- EF10_DMA_INVIS_STAT(rx_bytes_minus_good_bytes, RX_BAD_BYTES),
- EF10_OTHER_STAT(rx_good_bytes),
- EF10_OTHER_STAT(rx_bad_bytes),
- EF10_DMA_STAT(rx_packets, RX_PKTS),
- EF10_DMA_STAT(rx_good, RX_GOOD_PKTS),
- EF10_DMA_STAT(rx_bad, RX_BAD_FCS_PKTS),
- EF10_DMA_STAT(rx_pause, RX_PAUSE_PKTS),
- EF10_DMA_STAT(rx_control, RX_CONTROL_PKTS),
- EF10_DMA_STAT(rx_unicast, RX_UNICAST_PKTS),
- EF10_DMA_STAT(rx_multicast, RX_MULTICAST_PKTS),
- EF10_DMA_STAT(rx_broadcast, RX_BROADCAST_PKTS),
- EF10_DMA_STAT(rx_lt64, RX_UNDERSIZE_PKTS),
- EF10_DMA_STAT(rx_64, RX_64_PKTS),
- EF10_DMA_STAT(rx_65_to_127, RX_65_TO_127_PKTS),
- EF10_DMA_STAT(rx_128_to_255, RX_128_TO_255_PKTS),
- EF10_DMA_STAT(rx_256_to_511, RX_256_TO_511_PKTS),
- EF10_DMA_STAT(rx_512_to_1023, RX_512_TO_1023_PKTS),
- EF10_DMA_STAT(rx_1024_to_15xx, RX_1024_TO_15XX_PKTS),
- EF10_DMA_STAT(rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS),
- EF10_DMA_STAT(rx_gtjumbo, RX_GTJUMBO_PKTS),
- EF10_DMA_STAT(rx_bad_gtjumbo, RX_JABBER_PKTS),
- EF10_DMA_STAT(rx_overflow, RX_OVERFLOW_PKTS),
- EF10_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS),
- EF10_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS),
- EF10_DMA_STAT(rx_nodesc_drops, RX_NODESC_DROPS),
+ EF10_DMA_STAT(port_tx_bytes, TX_BYTES),
+ EF10_DMA_STAT(port_tx_packets, TX_PKTS),
+ EF10_DMA_STAT(port_tx_pause, TX_PAUSE_PKTS),
+ EF10_DMA_STAT(port_tx_control, TX_CONTROL_PKTS),
+ EF10_DMA_STAT(port_tx_unicast, TX_UNICAST_PKTS),
+ EF10_DMA_STAT(port_tx_multicast, TX_MULTICAST_PKTS),
+ EF10_DMA_STAT(port_tx_broadcast, TX_BROADCAST_PKTS),
+ EF10_DMA_STAT(port_tx_lt64, TX_LT64_PKTS),
+ EF10_DMA_STAT(port_tx_64, TX_64_PKTS),
+ EF10_DMA_STAT(port_tx_65_to_127, TX_65_TO_127_PKTS),
+ EF10_DMA_STAT(port_tx_128_to_255, TX_128_TO_255_PKTS),
+ EF10_DMA_STAT(port_tx_256_to_511, TX_256_TO_511_PKTS),
+ EF10_DMA_STAT(port_tx_512_to_1023, TX_512_TO_1023_PKTS),
+ EF10_DMA_STAT(port_tx_1024_to_15xx, TX_1024_TO_15XX_PKTS),
+ EF10_DMA_STAT(port_tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS),
+ EF10_DMA_STAT(port_rx_bytes, RX_BYTES),
+ EF10_DMA_INVIS_STAT(port_rx_bytes_minus_good_bytes, RX_BAD_BYTES),
+ EF10_OTHER_STAT(port_rx_good_bytes),
+ EF10_OTHER_STAT(port_rx_bad_bytes),
+ EF10_DMA_STAT(port_rx_packets, RX_PKTS),
+ EF10_DMA_STAT(port_rx_good, RX_GOOD_PKTS),
+ EF10_DMA_STAT(port_rx_bad, RX_BAD_FCS_PKTS),
+ EF10_DMA_STAT(port_rx_pause, RX_PAUSE_PKTS),
+ EF10_DMA_STAT(port_rx_control, RX_CONTROL_PKTS),
+ EF10_DMA_STAT(port_rx_unicast, RX_UNICAST_PKTS),
+ EF10_DMA_STAT(port_rx_multicast, RX_MULTICAST_PKTS),
+ EF10_DMA_STAT(port_rx_broadcast, RX_BROADCAST_PKTS),
+ EF10_DMA_STAT(port_rx_lt64, RX_UNDERSIZE_PKTS),
+ EF10_DMA_STAT(port_rx_64, RX_64_PKTS),
+ EF10_DMA_STAT(port_rx_65_to_127, RX_65_TO_127_PKTS),
+ EF10_DMA_STAT(port_rx_128_to_255, RX_128_TO_255_PKTS),
+ EF10_DMA_STAT(port_rx_256_to_511, RX_256_TO_511_PKTS),
+ EF10_DMA_STAT(port_rx_512_to_1023, RX_512_TO_1023_PKTS),
+ EF10_DMA_STAT(port_rx_1024_to_15xx, RX_1024_TO_15XX_PKTS),
+ EF10_DMA_STAT(port_rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS),
+ EF10_DMA_STAT(port_rx_gtjumbo, RX_GTJUMBO_PKTS),
+ EF10_DMA_STAT(port_rx_bad_gtjumbo, RX_JABBER_PKTS),
+ EF10_DMA_STAT(port_rx_overflow, RX_OVERFLOW_PKTS),
+ EF10_DMA_STAT(port_rx_align_error, RX_ALIGN_ERROR_PKTS),
+ EF10_DMA_STAT(port_rx_length_error, RX_LENGTH_ERROR_PKTS),
+ EF10_DMA_STAT(port_rx_nodesc_drops, RX_NODESC_DROPS),
GENERIC_SW_STAT(rx_nodesc_trunc),
GENERIC_SW_STAT(rx_noskb_drops),
- EF10_DMA_STAT(rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW),
- EF10_DMA_STAT(rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW),
- EF10_DMA_STAT(rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL),
- EF10_DMA_STAT(rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL),
- EF10_DMA_STAT(rx_pm_trunc_qbb, PM_TRUNC_QBB),
- EF10_DMA_STAT(rx_pm_discard_qbb, PM_DISCARD_QBB),
- EF10_DMA_STAT(rx_pm_discard_mapping, PM_DISCARD_MAPPING),
- EF10_DMA_STAT(rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS),
- EF10_DMA_STAT(rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS),
- EF10_DMA_STAT(rx_dp_streaming_packets, RXDP_STREAMING_PKTS),
- EF10_DMA_STAT(rx_dp_hlb_fetch, RXDP_EMERGENCY_FETCH_CONDITIONS),
- EF10_DMA_STAT(rx_dp_hlb_wait, RXDP_EMERGENCY_WAIT_CONDITIONS),
+ EF10_DMA_STAT(port_rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW),
+ EF10_DMA_STAT(port_rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW),
+ EF10_DMA_STAT(port_rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL),
+ EF10_DMA_STAT(port_rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL),
+ EF10_DMA_STAT(port_rx_pm_trunc_qbb, PM_TRUNC_QBB),
+ EF10_DMA_STAT(port_rx_pm_discard_qbb, PM_DISCARD_QBB),
+ EF10_DMA_STAT(port_rx_pm_discard_mapping, PM_DISCARD_MAPPING),
+ EF10_DMA_STAT(port_rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS),
+ EF10_DMA_STAT(port_rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS),
+ EF10_DMA_STAT(port_rx_dp_streaming_packets, RXDP_STREAMING_PKTS),
+ EF10_DMA_STAT(port_rx_dp_hlb_fetch, RXDP_HLB_FETCH_CONDITIONS),
+ EF10_DMA_STAT(port_rx_dp_hlb_wait, RXDP_HLB_WAIT_CONDITIONS),
+ EF10_DMA_STAT(rx_unicast, VADAPTER_RX_UNICAST_PACKETS),
+ EF10_DMA_STAT(rx_unicast_bytes, VADAPTER_RX_UNICAST_BYTES),
+ EF10_DMA_STAT(rx_multicast, VADAPTER_RX_MULTICAST_PACKETS),
+ EF10_DMA_STAT(rx_multicast_bytes, VADAPTER_RX_MULTICAST_BYTES),
+ EF10_DMA_STAT(rx_broadcast, VADAPTER_RX_BROADCAST_PACKETS),
+ EF10_DMA_STAT(rx_broadcast_bytes, VADAPTER_RX_BROADCAST_BYTES),
+ EF10_DMA_STAT(rx_bad, VADAPTER_RX_BAD_PACKETS),
+ EF10_DMA_STAT(rx_bad_bytes, VADAPTER_RX_BAD_BYTES),
+ EF10_DMA_STAT(rx_overflow, VADAPTER_RX_OVERFLOW),
+ EF10_DMA_STAT(tx_unicast, VADAPTER_TX_UNICAST_PACKETS),
+ EF10_DMA_STAT(tx_unicast_bytes, VADAPTER_TX_UNICAST_BYTES),
+ EF10_DMA_STAT(tx_multicast, VADAPTER_TX_MULTICAST_PACKETS),
+ EF10_DMA_STAT(tx_multicast_bytes, VADAPTER_TX_MULTICAST_BYTES),
+ EF10_DMA_STAT(tx_broadcast, VADAPTER_TX_BROADCAST_PACKETS),
+ EF10_DMA_STAT(tx_broadcast_bytes, VADAPTER_TX_BROADCAST_BYTES),
+ EF10_DMA_STAT(tx_bad, VADAPTER_TX_BAD_PACKETS),
+ EF10_DMA_STAT(tx_bad_bytes, VADAPTER_TX_BAD_BYTES),
+ EF10_DMA_STAT(tx_overflow, VADAPTER_TX_OVERFLOW),
};
-#define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_tx_bytes) | \
- (1ULL << EF10_STAT_tx_packets) | \
- (1ULL << EF10_STAT_tx_pause) | \
- (1ULL << EF10_STAT_tx_unicast) | \
- (1ULL << EF10_STAT_tx_multicast) | \
- (1ULL << EF10_STAT_tx_broadcast) | \
- (1ULL << EF10_STAT_rx_bytes) | \
- (1ULL << EF10_STAT_rx_bytes_minus_good_bytes) | \
- (1ULL << EF10_STAT_rx_good_bytes) | \
- (1ULL << EF10_STAT_rx_bad_bytes) | \
- (1ULL << EF10_STAT_rx_packets) | \
- (1ULL << EF10_STAT_rx_good) | \
- (1ULL << EF10_STAT_rx_bad) | \
- (1ULL << EF10_STAT_rx_pause) | \
- (1ULL << EF10_STAT_rx_control) | \
- (1ULL << EF10_STAT_rx_unicast) | \
- (1ULL << EF10_STAT_rx_multicast) | \
- (1ULL << EF10_STAT_rx_broadcast) | \
- (1ULL << EF10_STAT_rx_lt64) | \
- (1ULL << EF10_STAT_rx_64) | \
- (1ULL << EF10_STAT_rx_65_to_127) | \
- (1ULL << EF10_STAT_rx_128_to_255) | \
- (1ULL << EF10_STAT_rx_256_to_511) | \
- (1ULL << EF10_STAT_rx_512_to_1023) | \
- (1ULL << EF10_STAT_rx_1024_to_15xx) | \
- (1ULL << EF10_STAT_rx_15xx_to_jumbo) | \
- (1ULL << EF10_STAT_rx_gtjumbo) | \
- (1ULL << EF10_STAT_rx_bad_gtjumbo) | \
- (1ULL << EF10_STAT_rx_overflow) | \
- (1ULL << EF10_STAT_rx_nodesc_drops) | \
+#define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_port_tx_bytes) | \
+ (1ULL << EF10_STAT_port_tx_packets) | \
+ (1ULL << EF10_STAT_port_tx_pause) | \
+ (1ULL << EF10_STAT_port_tx_unicast) | \
+ (1ULL << EF10_STAT_port_tx_multicast) | \
+ (1ULL << EF10_STAT_port_tx_broadcast) | \
+ (1ULL << EF10_STAT_port_rx_bytes) | \
+ (1ULL << \
+ EF10_STAT_port_rx_bytes_minus_good_bytes) | \
+ (1ULL << EF10_STAT_port_rx_good_bytes) | \
+ (1ULL << EF10_STAT_port_rx_bad_bytes) | \
+ (1ULL << EF10_STAT_port_rx_packets) | \
+ (1ULL << EF10_STAT_port_rx_good) | \
+ (1ULL << EF10_STAT_port_rx_bad) | \
+ (1ULL << EF10_STAT_port_rx_pause) | \
+ (1ULL << EF10_STAT_port_rx_control) | \
+ (1ULL << EF10_STAT_port_rx_unicast) | \
+ (1ULL << EF10_STAT_port_rx_multicast) | \
+ (1ULL << EF10_STAT_port_rx_broadcast) | \
+ (1ULL << EF10_STAT_port_rx_lt64) | \
+ (1ULL << EF10_STAT_port_rx_64) | \
+ (1ULL << EF10_STAT_port_rx_65_to_127) | \
+ (1ULL << EF10_STAT_port_rx_128_to_255) | \
+ (1ULL << EF10_STAT_port_rx_256_to_511) | \
+ (1ULL << EF10_STAT_port_rx_512_to_1023) |\
+ (1ULL << EF10_STAT_port_rx_1024_to_15xx) |\
+ (1ULL << EF10_STAT_port_rx_15xx_to_jumbo) |\
+ (1ULL << EF10_STAT_port_rx_gtjumbo) | \
+ (1ULL << EF10_STAT_port_rx_bad_gtjumbo) |\
+ (1ULL << EF10_STAT_port_rx_overflow) | \
+ (1ULL << EF10_STAT_port_rx_nodesc_drops) |\
(1ULL << GENERIC_STAT_rx_nodesc_trunc) | \
(1ULL << GENERIC_STAT_rx_noskb_drops))
@@ -1026,39 +1124,39 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
* switchable port we do not expose these because they might not
* include all the packets they should.
*/
-#define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_tx_control) | \
- (1ULL << EF10_STAT_tx_lt64) | \
- (1ULL << EF10_STAT_tx_64) | \
- (1ULL << EF10_STAT_tx_65_to_127) | \
- (1ULL << EF10_STAT_tx_128_to_255) | \
- (1ULL << EF10_STAT_tx_256_to_511) | \
- (1ULL << EF10_STAT_tx_512_to_1023) | \
- (1ULL << EF10_STAT_tx_1024_to_15xx) | \
- (1ULL << EF10_STAT_tx_15xx_to_jumbo))
+#define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_port_tx_control) | \
+ (1ULL << EF10_STAT_port_tx_lt64) | \
+ (1ULL << EF10_STAT_port_tx_64) | \
+ (1ULL << EF10_STAT_port_tx_65_to_127) |\
+ (1ULL << EF10_STAT_port_tx_128_to_255) |\
+ (1ULL << EF10_STAT_port_tx_256_to_511) |\
+ (1ULL << EF10_STAT_port_tx_512_to_1023) |\
+ (1ULL << EF10_STAT_port_tx_1024_to_15xx) |\
+ (1ULL << EF10_STAT_port_tx_15xx_to_jumbo))
/* These statistics are only provided by the 40G MAC. For a 10G/40G
* switchable port we do expose these because the errors will otherwise
* be silent.
*/
-#define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_rx_align_error) | \
- (1ULL << EF10_STAT_rx_length_error))
+#define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_port_rx_align_error) |\
+ (1ULL << EF10_STAT_port_rx_length_error))
/* These statistics are only provided if the firmware supports the
* capability PM_AND_RXDP_COUNTERS.
*/
#define HUNT_PM_AND_RXDP_STAT_MASK ( \
- (1ULL << EF10_STAT_rx_pm_trunc_bb_overflow) | \
- (1ULL << EF10_STAT_rx_pm_discard_bb_overflow) | \
- (1ULL << EF10_STAT_rx_pm_trunc_vfifo_full) | \
- (1ULL << EF10_STAT_rx_pm_discard_vfifo_full) | \
- (1ULL << EF10_STAT_rx_pm_trunc_qbb) | \
- (1ULL << EF10_STAT_rx_pm_discard_qbb) | \
- (1ULL << EF10_STAT_rx_pm_discard_mapping) | \
- (1ULL << EF10_STAT_rx_dp_q_disabled_packets) | \
- (1ULL << EF10_STAT_rx_dp_di_dropped_packets) | \
- (1ULL << EF10_STAT_rx_dp_streaming_packets) | \
- (1ULL << EF10_STAT_rx_dp_hlb_fetch) | \
- (1ULL << EF10_STAT_rx_dp_hlb_wait))
+ (1ULL << EF10_STAT_port_rx_pm_trunc_bb_overflow) | \
+ (1ULL << EF10_STAT_port_rx_pm_discard_bb_overflow) | \
+ (1ULL << EF10_STAT_port_rx_pm_trunc_vfifo_full) | \
+ (1ULL << EF10_STAT_port_rx_pm_discard_vfifo_full) | \
+ (1ULL << EF10_STAT_port_rx_pm_trunc_qbb) | \
+ (1ULL << EF10_STAT_port_rx_pm_discard_qbb) | \
+ (1ULL << EF10_STAT_port_rx_pm_discard_mapping) | \
+ (1ULL << EF10_STAT_port_rx_dp_q_disabled_packets) | \
+ (1ULL << EF10_STAT_port_rx_dp_di_dropped_packets) | \
+ (1ULL << EF10_STAT_port_rx_dp_streaming_packets) | \
+ (1ULL << EF10_STAT_port_rx_dp_hlb_fetch) | \
+ (1ULL << EF10_STAT_port_rx_dp_hlb_wait))
static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
{
@@ -1066,6 +1164,10 @@ static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
u32 port_caps = efx_mcdi_phy_get_caps(efx);
struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ if (!(efx->mcdi->fn_flags &
+ 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL))
+ return 0;
+
if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
raw_mask |= HUNT_40G_EXTRA_STAT_MASK;
else
@@ -1080,13 +1182,28 @@ static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask)
{
- u64 raw_mask = efx_ef10_raw_stat_mask(efx);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ u64 raw_mask[2];
+
+ raw_mask[0] = efx_ef10_raw_stat_mask(efx);
+
+ /* Only show vadaptor stats when EVB capability is present */
+ if (nic_data->datapath_caps &
+ (1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN)) {
+ raw_mask[0] |= ~((1ULL << EF10_STAT_rx_unicast) - 1);
+ raw_mask[1] = (1ULL << (EF10_STAT_COUNT - 63)) - 1;
+ } else {
+ raw_mask[1] = 0;
+ }
#if BITS_PER_LONG == 64
- mask[0] = raw_mask;
+ mask[0] = raw_mask[0];
+ mask[1] = raw_mask[1];
#else
- mask[0] = raw_mask & 0xffffffff;
- mask[1] = raw_mask >> 32;
+ mask[0] = raw_mask[0] & 0xffffffff;
+ mask[1] = raw_mask[0] >> 32;
+ mask[2] = raw_mask[1] & 0xffffffff;
+ mask[3] = raw_mask[1] >> 32;
#endif
}
@@ -1099,7 +1216,51 @@ static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names)
mask, names);
}
-static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
+static size_t efx_ef10_update_stats_common(struct efx_nic *efx, u64 *full_stats,
+ struct rtnl_link_stats64 *core_stats)
+{
+ DECLARE_BITMAP(mask, EF10_STAT_COUNT);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ u64 *stats = nic_data->stats;
+ size_t stats_count = 0, index;
+
+ efx_ef10_get_stat_mask(efx, mask);
+
+ if (full_stats) {
+ for_each_set_bit(index, mask, EF10_STAT_COUNT) {
+ if (efx_ef10_stat_desc[index].name) {
+ *full_stats++ = stats[index];
+ ++stats_count;
+ }
+ }
+ }
+
+ if (core_stats) {
+ core_stats->rx_packets = stats[EF10_STAT_rx_unicast] +
+ stats[EF10_STAT_rx_multicast] +
+ stats[EF10_STAT_rx_broadcast];
+ core_stats->tx_packets = stats[EF10_STAT_tx_unicast] +
+ stats[EF10_STAT_tx_multicast] +
+ stats[EF10_STAT_tx_broadcast];
+ core_stats->rx_bytes = stats[EF10_STAT_rx_unicast_bytes] +
+ stats[EF10_STAT_rx_multicast_bytes] +
+ stats[EF10_STAT_rx_broadcast_bytes];
+ core_stats->tx_bytes = stats[EF10_STAT_tx_unicast_bytes] +
+ stats[EF10_STAT_tx_multicast_bytes] +
+ stats[EF10_STAT_tx_broadcast_bytes];
+ core_stats->rx_dropped = stats[GENERIC_STAT_rx_nodesc_trunc] +
+ stats[GENERIC_STAT_rx_noskb_drops];
+ core_stats->multicast = stats[EF10_STAT_rx_multicast];
+ core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad];
+ core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow];
+ core_stats->rx_errors = core_stats->rx_crc_errors;
+ core_stats->tx_errors = stats[EF10_STAT_tx_bad];
+ }
+
+ return stats_count;
+}
+
+static int efx_ef10_try_update_nic_stats_pf(struct efx_nic *efx)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
DECLARE_BITMAP(mask, EF10_STAT_COUNT);
@@ -1124,67 +1285,114 @@ static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
return -EAGAIN;
/* Update derived statistics */
- efx_nic_fix_nodesc_drop_stat(efx, &stats[EF10_STAT_rx_nodesc_drops]);
- stats[EF10_STAT_rx_good_bytes] =
- stats[EF10_STAT_rx_bytes] -
- stats[EF10_STAT_rx_bytes_minus_good_bytes];
- efx_update_diff_stat(&stats[EF10_STAT_rx_bad_bytes],
- stats[EF10_STAT_rx_bytes_minus_good_bytes]);
+ efx_nic_fix_nodesc_drop_stat(efx,
+ &stats[EF10_STAT_port_rx_nodesc_drops]);
+ stats[EF10_STAT_port_rx_good_bytes] =
+ stats[EF10_STAT_port_rx_bytes] -
+ stats[EF10_STAT_port_rx_bytes_minus_good_bytes];
+ efx_update_diff_stat(&stats[EF10_STAT_port_rx_bad_bytes],
+ stats[EF10_STAT_port_rx_bytes_minus_good_bytes]);
efx_update_sw_stats(efx, stats);
return 0;
}
-static size_t efx_ef10_update_stats(struct efx_nic *efx, u64 *full_stats,
- struct rtnl_link_stats64 *core_stats)
+static size_t efx_ef10_update_stats_pf(struct efx_nic *efx, u64 *full_stats,
+ struct rtnl_link_stats64 *core_stats)
{
- DECLARE_BITMAP(mask, EF10_STAT_COUNT);
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
- u64 *stats = nic_data->stats;
- size_t stats_count = 0, index;
int retry;
- efx_ef10_get_stat_mask(efx, mask);
-
/* If we're unlucky enough to read statistics during the DMA, wait
* up to 10ms for it to finish (typically takes <500us)
*/
for (retry = 0; retry < 100; ++retry) {
- if (efx_ef10_try_update_nic_stats(efx) == 0)
+ if (efx_ef10_try_update_nic_stats_pf(efx) == 0)
break;
udelay(100);
}
- if (full_stats) {
- for_each_set_bit(index, mask, EF10_STAT_COUNT) {
- if (efx_ef10_stat_desc[index].name) {
- *full_stats++ = stats[index];
- ++stats_count;
- }
- }
+ return efx_ef10_update_stats_common(efx, full_stats, core_stats);
+}
+
+static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx)
+{
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ DECLARE_BITMAP(mask, EF10_STAT_COUNT);
+ __le64 generation_start, generation_end;
+ u64 *stats = nic_data->stats;
+ u32 dma_len = MC_CMD_MAC_NSTATS * sizeof(u64);
+ struct efx_buffer stats_buf;
+ __le64 *dma_stats;
+ int rc;
+
+ spin_unlock_bh(&efx->stats_lock);
+
+ if (in_interrupt()) {
+ /* If in atomic context, cannot update stats. Just update the
+ * software stats and return so the caller can continue.
+ */
+ spin_lock_bh(&efx->stats_lock);
+ efx_update_sw_stats(efx, stats);
+ return 0;
}
- if (core_stats) {
- core_stats->rx_packets = stats[EF10_STAT_rx_packets];
- core_stats->tx_packets = stats[EF10_STAT_tx_packets];
- core_stats->rx_bytes = stats[EF10_STAT_rx_bytes];
- core_stats->tx_bytes = stats[EF10_STAT_tx_bytes];
- core_stats->rx_dropped = stats[EF10_STAT_rx_nodesc_drops] +
- stats[GENERIC_STAT_rx_nodesc_trunc] +
- stats[GENERIC_STAT_rx_noskb_drops];
- core_stats->multicast = stats[EF10_STAT_rx_multicast];
- core_stats->rx_length_errors =
- stats[EF10_STAT_rx_gtjumbo] +
- stats[EF10_STAT_rx_length_error];
- core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad];
- core_stats->rx_frame_errors = stats[EF10_STAT_rx_align_error];
- core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow];
- core_stats->rx_errors = (core_stats->rx_length_errors +
- core_stats->rx_crc_errors +
- core_stats->rx_frame_errors);
+ efx_ef10_get_stat_mask(efx, mask);
+
+ rc = efx_nic_alloc_buffer(efx, &stats_buf, dma_len, GFP_ATOMIC);
+ if (rc) {
+ spin_lock_bh(&efx->stats_lock);
+ return rc;
}
- return stats_count;
+ dma_stats = stats_buf.addr;
+ dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID;
+
+ MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, stats_buf.dma_addr);
+ MCDI_POPULATE_DWORD_1(inbuf, MAC_STATS_IN_CMD,
+ MAC_STATS_IN_DMA, 1);
+ MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
+ MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
+
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ spin_lock_bh(&efx->stats_lock);
+ if (rc) {
+ /* Expect ENOENT if DMA queues have not been set up */
+ if (rc != -ENOENT || atomic_read(&efx->active_queues))
+ efx_mcdi_display_error(efx, MC_CMD_MAC_STATS,
+ sizeof(inbuf), NULL, 0, rc);
+ goto out;
+ }
+
+ generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
+ if (generation_end == EFX_MC_STATS_GENERATION_INVALID) {
+ WARN_ON_ONCE(1);
+ goto out;
+ }
+ rmb();
+ efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask,
+ stats, stats_buf.addr, false);
+ rmb();
+ generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
+ if (generation_end != generation_start) {
+ rc = -EAGAIN;
+ goto out;
+ }
+
+ efx_update_sw_stats(efx, stats);
+out:
+ efx_nic_free_buffer(efx, &stats_buf);
+ return rc;
+}
+
+static size_t efx_ef10_update_stats_vf(struct efx_nic *efx, u64 *full_stats,
+ struct rtnl_link_stats64 *core_stats)
+{
+ if (efx_ef10_try_update_nic_stats_vf(efx))
+ return 0;
+
+ return efx_ef10_update_stats_common(efx, full_stats, core_stats);
}
static void efx_ef10_push_irq_moderation(struct efx_channel *channel)
@@ -1313,7 +1521,7 @@ static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
/* MAC statistics have been cleared on the NIC; clear the local
* statistic that we update with efx_update_diff_stat().
*/
- nic_data->stats[EF10_STAT_rx_bad_bytes] = 0;
+ nic_data->stats[EF10_STAT_port_rx_bad_bytes] = 0;
return -EIO;
}
@@ -4029,7 +4237,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.prepare_flr = efx_ef10_prepare_flr,
.finish_flr = efx_port_dummy_op_void,
.describe_stats = efx_ef10_describe_stats,
- .update_stats = efx_ef10_update_stats,
+ .update_stats = efx_ef10_update_stats_vf,
.start_stats = efx_port_dummy_op_void,
.pull_stats = efx_port_dummy_op_void,
.stop_stats = efx_port_dummy_op_void,
@@ -4091,6 +4299,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.vswitching_probe = efx_ef10_vswitching_probe_vf,
.vswitching_restore = efx_ef10_vswitching_restore_vf,
.vswitching_remove = efx_ef10_vswitching_remove_vf,
+ .sriov_get_phys_port_id = efx_ef10_sriov_get_phys_port_id,
#endif
.get_mac_address = efx_ef10_get_mac_address_vf,
.set_mac_address = efx_ef10_set_mac_address,
@@ -4130,7 +4339,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.prepare_flr = efx_ef10_prepare_flr,
.finish_flr = efx_port_dummy_op_void,
.describe_stats = efx_ef10_describe_stats,
- .update_stats = efx_ef10_update_stats,
+ .update_stats = efx_ef10_update_stats_pf,
.start_stats = efx_mcdi_mac_start_stats,
.pull_stats = efx_mcdi_mac_pull_stats,
.stop_stats = efx_mcdi_mac_stop_stats,
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c
index 3969b1b..6c9b6e4 100644
--- a/drivers/net/ethernet/sfc/ef10_sriov.c
+++ b/drivers/net/ethernet/sfc/ef10_sriov.c
@@ -165,6 +165,11 @@ static void efx_ef10_sriov_free_vf_vports(struct efx_nic *efx)
for (i = 0; i < efx->vf_count; i++) {
struct ef10_vf *vf = nic_data->vf + i;
+ /* If VF is assigned, do not free the vport */
+ if (vf->pci_dev &&
+ vf->pci_dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
+ continue;
+
if (vf->vport_assigned) {
efx_ef10_evb_port_assign(efx, EVB_PORT_ID_NULL, i);
vf->vport_assigned = 0;
@@ -380,7 +385,9 @@ void efx_ef10_vswitching_remove_pf(struct efx_nic *efx)
efx_ef10_vport_free(efx, nic_data->vport_id);
nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
- efx_ef10_vswitch_free(efx, nic_data->vport_id);
+ /* Only free the vswitch if no VFs are assigned */
+ if (!pci_vfs_assigned(efx->pci_dev))
+ efx_ef10_vswitch_free(efx, nic_data->vport_id);
}
void efx_ef10_vswitching_remove_vf(struct efx_nic *efx)
@@ -413,11 +420,22 @@ fail1:
return rc;
}
-static int efx_ef10_pci_sriov_disable(struct efx_nic *efx)
+static int efx_ef10_pci_sriov_disable(struct efx_nic *efx, bool force)
{
struct pci_dev *dev = efx->pci_dev;
+ unsigned int vfs_assigned = 0;
+
+ vfs_assigned = pci_vfs_assigned(dev);
+
+ if (vfs_assigned && !force) {
+ netif_info(efx, drv, efx->net_dev, "VFs are assigned to guests; "
+ "please detach them before disabling SR-IOV\n");
+ return -EBUSY;
+ }
+
+ if (!vfs_assigned)
+ pci_disable_sriov(dev);
- pci_disable_sriov(dev);
efx_ef10_sriov_free_vf_vswitching(efx);
efx->vf_count = 0;
return 0;
@@ -426,7 +444,7 @@ static int efx_ef10_pci_sriov_disable(struct efx_nic *efx)
int efx_ef10_sriov_configure(struct efx_nic *efx, int num_vfs)
{
if (num_vfs == 0)
- return efx_ef10_pci_sriov_disable(efx);
+ return efx_ef10_pci_sriov_disable(efx, false);
else
return efx_ef10_pci_sriov_enable(efx, num_vfs);
}
@@ -439,12 +457,25 @@ int efx_ef10_sriov_init(struct efx_nic *efx)
void efx_ef10_sriov_fini(struct efx_nic *efx)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ unsigned int i;
int rc;
- if (!nic_data->vf)
+ if (!nic_data->vf) {
+ /* Remove any un-assigned orphaned VFs */
+ if (pci_num_vf(efx->pci_dev) && !pci_vfs_assigned(efx->pci_dev))
+ pci_disable_sriov(efx->pci_dev);
return;
+ }
+
+ /* Remove any VFs in the host */
+ for (i = 0; i < efx->vf_count; ++i) {
+ struct efx_nic *vf_efx = nic_data->vf[i].efx;
+
+ if (vf_efx)
+ vf_efx->pci_dev->driver->remove(vf_efx->pci_dev);
+ }
- rc = efx_ef10_pci_sriov_disable(efx);
+ rc = efx_ef10_pci_sriov_disable(efx, true);
if (rc)
netif_dbg(efx, drv, efx->net_dev,
"Disabling SRIOV was not successful rc=%d\n", rc);
@@ -736,3 +767,17 @@ int efx_ef10_sriov_get_vf_config(struct efx_nic *efx, int vf_i,
return 0;
}
+
+int efx_ef10_sriov_get_phys_port_id(struct efx_nic *efx,
+ struct netdev_phys_item_id *ppid)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+
+ if (!is_valid_ether_addr(nic_data->port_id))
+ return -EOPNOTSUPP;
+
+ ppid->id_len = ETH_ALEN;
+ memcpy(ppid->id, nic_data->port_id, ppid->id_len);
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.h b/drivers/net/ethernet/sfc/ef10_sriov.h
index b985576..db4ef53 100644
--- a/drivers/net/ethernet/sfc/ef10_sriov.h
+++ b/drivers/net/ethernet/sfc/ef10_sriov.h
@@ -15,6 +15,7 @@
/**
* struct ef10_vf - PF's store of VF data
* @efx: efx_nic struct for the current VF
+ * @pci_dev: the pci_dev struct for the VF, retained while the VF is assigned
* @vport_id: vport ID for the VF
* @vport_assigned: record whether the vport is currently assigned to the VF
* @mac: MAC address for the VF, zero when address is removed from the vport
@@ -22,6 +23,7 @@
*/
struct ef10_vf {
struct efx_nic *efx;
+ struct pci_dev *pci_dev;
unsigned int vport_id;
unsigned int vport_assigned;
u8 mac[ETH_ALEN];
@@ -54,6 +56,9 @@ int efx_ef10_sriov_get_vf_config(struct efx_nic *efx, int vf_i,
int efx_ef10_sriov_set_vf_link_state(struct efx_nic *efx, int vf_i,
int link_state);
+int efx_ef10_sriov_get_phys_port_id(struct efx_nic *efx,
+ struct netdev_phys_item_id *ppid);
+
int efx_ef10_vswitching_probe_pf(struct efx_nic *efx);
int efx_ef10_vswitching_probe_vf(struct efx_nic *efx);
int efx_ef10_vswitching_restore_pf(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 9eafa39..0c42ed9 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1298,7 +1298,9 @@ static void efx_fini_io(struct efx_nic *efx)
efx->membase_phys = 0;
}
- pci_disable_device(efx->pci_dev);
+ /* Don't disable bus-mastering if VFs are assigned */
+ if (!pci_vfs_assigned(efx->pci_dev))
+ pci_disable_device(efx->pci_dev);
}
void efx_set_default_rx_indir_table(struct efx_nic *efx)
@@ -2282,6 +2284,7 @@ static const struct net_device_ops efx_netdev_ops = {
.ndo_set_vf_spoofchk = efx_sriov_set_vf_spoofchk,
.ndo_get_vf_config = efx_sriov_get_vf_config,
.ndo_set_vf_link_state = efx_sriov_set_vf_link_state,
+ .ndo_get_phys_port_id = efx_sriov_get_phys_port_id,
#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = efx_netpoll,
@@ -2326,6 +2329,28 @@ show_phy_type(struct device *dev, struct device_attribute *attr, char *buf)
}
static DEVICE_ATTR(phy_type, 0444, show_phy_type, NULL);
+#ifdef CONFIG_SFC_MCDI_LOGGING
+static ssize_t show_mcdi_log(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", mcdi->logging_enabled);
+}
+static ssize_t set_mcdi_log(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+ bool enable = count > 0 && *buf != '0';
+
+ mcdi->logging_enabled = enable;
+ return count;
+}
+static DEVICE_ATTR(mcdi_logging, 0644, show_mcdi_log, set_mcdi_log);
+#endif
+
static int efx_register_netdev(struct efx_nic *efx)
{
struct net_device *net_dev = efx->net_dev;
@@ -2383,9 +2408,21 @@ static int efx_register_netdev(struct efx_nic *efx)
"failed to init net dev attributes\n");
goto fail_registered;
}
+#ifdef CONFIG_SFC_MCDI_LOGGING
+ rc = device_create_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging);
+ if (rc) {
+ netif_err(efx, drv, efx->net_dev,
+ "failed to init net dev attributes\n");
+ goto fail_attr_mcdi_logging;
+ }
+#endif
return 0;
+#ifdef CONFIG_SFC_MCDI_LOGGING
+fail_attr_mcdi_logging:
+ device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
+#endif
fail_registered:
rtnl_lock();
efx_dissociate(efx);
@@ -2404,13 +2441,14 @@ static void efx_unregister_netdev(struct efx_nic *efx)
BUG_ON(netdev_priv(efx->net_dev) != efx);
- strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
- device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
-
- rtnl_lock();
- unregister_netdevice(efx->net_dev);
- efx->state = STATE_UNINIT;
- rtnl_unlock();
+ if (efx_dev_registered(efx)) {
+ strlcpy(efx->name, pci_name(efx->pci_dev), sizeof(efx->name));
+#ifdef CONFIG_SFC_MCDI_LOGGING
+ device_remove_file(&efx->pci_dev->dev, &dev_attr_mcdi_logging);
+#endif
+ device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_type);
+ unregister_netdev(efx->net_dev);
+ }
}
/**************************************************************************
@@ -2866,7 +2904,8 @@ static void efx_pci_remove_main(struct efx_nic *efx)
}
/* Final NIC shutdown
- * This is called only at module unload (or hotplug removal).
+ * This is called only at module unload (or hotplug removal). A PF can call
+ * this on its VFs to ensure they are unbound first.
*/
static void efx_pci_remove(struct pci_dev *pci_dev)
{
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 8267a1c..81640f8 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -8,6 +8,7 @@
*/
#include <linux/delay.h>
+#include <linux/moduleparam.h>
#include <asm/cmpxchg.h>
#include "net_driver.h"
#include "nic.h"
@@ -54,18 +55,32 @@ static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
static bool efx_mcdi_poll_once(struct efx_nic *efx);
static void efx_mcdi_abandon(struct efx_nic *efx);
+#ifdef CONFIG_SFC_MCDI_LOGGING
+static bool mcdi_logging_default;
+module_param(mcdi_logging_default, bool, 0644);
+MODULE_PARM_DESC(mcdi_logging_default,
+ "Enable MCDI logging on newly-probed functions");
+#endif
+
int efx_mcdi_init(struct efx_nic *efx)
{
struct efx_mcdi_iface *mcdi;
bool already_attached;
- int rc;
+ int rc = -ENOMEM;
efx->mcdi = kzalloc(sizeof(*efx->mcdi), GFP_KERNEL);
if (!efx->mcdi)
- return -ENOMEM;
+ goto fail;
mcdi = efx_mcdi(efx);
mcdi->efx = efx;
+#ifdef CONFIG_SFC_MCDI_LOGGING
+ /* consuming code assumes buffer is page-sized */
+ mcdi->logging_buffer = (char *)__get_free_page(GFP_KERNEL);
+ if (!mcdi->logging_buffer)
+ goto fail1;
+ mcdi->logging_enabled = mcdi_logging_default;
+#endif
init_waitqueue_head(&mcdi->wq);
spin_lock_init(&mcdi->iface_lock);
mcdi->state = MCDI_STATE_QUIESCENT;
@@ -81,7 +96,7 @@ int efx_mcdi_init(struct efx_nic *efx)
/* Recover from a failed assertion before probing */
rc = efx_mcdi_handle_assertion(efx);
if (rc)
- return rc;
+ goto fail2;
/* Let the MC (and BMC, if this is a LOM) know that the driver
* is loaded. We should do this before we reset the NIC.
@@ -90,7 +105,7 @@ int efx_mcdi_init(struct efx_nic *efx)
if (rc) {
netif_err(efx, probe, efx->net_dev,
"Unable to register driver with MCPU\n");
- return rc;
+ goto fail2;
}
if (already_attached)
/* Not a fatal error */
@@ -102,6 +117,15 @@ int efx_mcdi_init(struct efx_nic *efx)
efx->primary = efx;
return 0;
+fail2:
+#ifdef CONFIG_SFC_MCDI_LOGGING
+ free_page((unsigned long)mcdi->logging_buffer);
+fail1:
+#endif
+ kfree(efx->mcdi);
+ efx->mcdi = NULL;
+fail:
+ return rc;
}
void efx_mcdi_fini(struct efx_nic *efx)
@@ -114,6 +138,10 @@ void efx_mcdi_fini(struct efx_nic *efx)
/* Relinquish the device (back to the BMC, if this is a LOM) */
efx_mcdi_drv_attach(efx, false, NULL);
+#ifdef CONFIG_SFC_MCDI_LOGGING
+ free_page((unsigned long)efx->mcdi->iface.logging_buffer);
+#endif
+
kfree(efx->mcdi);
}
@@ -121,6 +149,9 @@ static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd,
const efx_dword_t *inbuf, size_t inlen)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+#ifdef CONFIG_SFC_MCDI_LOGGING
+ char *buf = mcdi->logging_buffer; /* page-sized */
+#endif
efx_dword_t hdr[2];
size_t hdr_len;
u32 xflags, seqno;
@@ -165,6 +196,31 @@ static void efx_mcdi_send_request(struct efx_nic *efx, unsigned cmd,
hdr_len = 8;
}
+#ifdef CONFIG_SFC_MCDI_LOGGING
+ if (mcdi->logging_enabled && !WARN_ON_ONCE(!buf)) {
+ int bytes = 0;
+ int i;
+ /* Lengths should always be a whole number of dwords, so scream
+ * if they're not.
+ */
+ WARN_ON_ONCE(hdr_len % 4);
+ WARN_ON_ONCE(inlen % 4);
+
+ /* We own the logging buffer, as only one MCDI can be in
+ * progress on a NIC at any one time. So no need for locking.
+ */
+ for (i = 0; i < hdr_len / 4 && bytes < PAGE_SIZE; i++)
+ bytes += snprintf(buf + bytes, PAGE_SIZE - bytes,
+ " %08x", le32_to_cpu(hdr[i].u32[0]));
+
+ for (i = 0; i < inlen / 4 && bytes < PAGE_SIZE; i++)
+ bytes += snprintf(buf + bytes, PAGE_SIZE - bytes,
+ " %08x", le32_to_cpu(inbuf[i].u32[0]));
+
+ netif_info(efx, hw, efx->net_dev, "MCDI RPC REQ:%s\n", buf);
+ }
+#endif
+
efx->type->mcdi_request(efx, hdr, hdr_len, inbuf, inlen);
mcdi->new_epoch = false;
@@ -206,6 +262,9 @@ static void efx_mcdi_read_response_header(struct efx_nic *efx)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
unsigned int respseq, respcmd, error;
+#ifdef CONFIG_SFC_MCDI_LOGGING
+ char *buf = mcdi->logging_buffer; /* page-sized */
+#endif
efx_dword_t hdr;
efx->type->mcdi_read_response(efx, &hdr, 0, 4);
@@ -223,6 +282,39 @@ static void efx_mcdi_read_response_header(struct efx_nic *efx)
EFX_DWORD_FIELD(hdr, MC_CMD_V2_EXTN_IN_ACTUAL_LEN);
}
+#ifdef CONFIG_SFC_MCDI_LOGGING
+ if (mcdi->logging_enabled && !WARN_ON_ONCE(!buf)) {
+ size_t hdr_len, data_len;
+ int bytes = 0;
+ int i;
+
+ WARN_ON_ONCE(mcdi->resp_hdr_len % 4);
+ hdr_len = mcdi->resp_hdr_len / 4;
+ /* MCDI_DECLARE_BUF ensures that underlying buffer is padded
+ * to dword size, and the MCDI buffer is always dword size
+ */
+ data_len = DIV_ROUND_UP(mcdi->resp_data_len, 4);
+
+ /* We own the logging buffer, as only one MCDI can be in
+ * progress on a NIC at any one time. So no need for locking.
+ */
+ for (i = 0; i < hdr_len && bytes < PAGE_SIZE; i++) {
+ efx->type->mcdi_read_response(efx, &hdr, (i * 4), 4);
+ bytes += snprintf(buf + bytes, PAGE_SIZE - bytes,
+ " %08x", le32_to_cpu(hdr.u32[0]));
+ }
+
+ for (i = 0; i < data_len && bytes < PAGE_SIZE; i++) {
+ efx->type->mcdi_read_response(efx, &hdr,
+ mcdi->resp_hdr_len + (i * 4), 4);
+ bytes += snprintf(buf + bytes, PAGE_SIZE - bytes,
+ " %08x", le32_to_cpu(hdr.u32[0]));
+ }
+
+ netif_info(efx, hw, efx->net_dev, "MCDI RPC RESP:%s\n", buf);
+ }
+#endif
+
if (error && mcdi->resp_data_len == 0) {
netif_err(efx, hw, efx->net_dev, "MC rebooted\n");
mcdi->resprc = -EIO;
diff --git a/drivers/net/ethernet/sfc/mcdi.h b/drivers/net/ethernet/sfc/mcdi.h
index 7afab2f..1838afe 100644
--- a/drivers/net/ethernet/sfc/mcdi.h
+++ b/drivers/net/ethernet/sfc/mcdi.h
@@ -58,6 +58,8 @@ enum efx_mcdi_mode {
* enabled
* @async_list: Queue of asynchronous requests
* @async_timer: Timer for asynchronous request timeout
+ * @logging_buffer: buffer that may be used to build MCDI tracing messages
+ * @logging_enabled: whether to trace MCDI
*/
struct efx_mcdi_iface {
struct efx_nic *efx;
@@ -74,6 +76,10 @@ struct efx_mcdi_iface {
spinlock_t async_lock;
struct list_head async_list;
struct timer_list async_timer;
+#ifdef CONFIG_SFC_MCDI_LOGGING
+ char *logging_buffer;
+ bool logging_enabled;
+#endif
};
struct efx_mcdi_mon {
diff --git a/drivers/net/ethernet/sfc/mcdi_pcol.h b/drivers/net/ethernet/sfc/mcdi_pcol.h
index 9efdf0a..45fca9f 100644
--- a/drivers/net/ethernet/sfc/mcdi_pcol.h
+++ b/drivers/net/ethernet/sfc/mcdi_pcol.h
@@ -2755,7 +2755,7 @@
#define MC_CMD_0x2e_PRIVILEGE_CTG SRIOV_CTG_GENERAL
/* MC_CMD_MAC_STATS_IN msgrequest */
-#define MC_CMD_MAC_STATS_IN_LEN 16
+#define MC_CMD_MAC_STATS_IN_LEN 20
/* ??? */
#define MC_CMD_MAC_STATS_IN_DMA_ADDR_OFST 0
#define MC_CMD_MAC_STATS_IN_DMA_ADDR_LEN 8
@@ -2777,6 +2777,8 @@
#define MC_CMD_MAC_STATS_IN_PERIOD_MS_LBN 16
#define MC_CMD_MAC_STATS_IN_PERIOD_MS_WIDTH 16
#define MC_CMD_MAC_STATS_IN_DMA_LEN_OFST 12
+/* port id so vadapter stats can be provided */
+#define MC_CMD_MAC_STATS_IN_PORT_ID_OFST 16
/* MC_CMD_MAC_STATS_OUT_DMA msgresponse */
#define MC_CMD_MAC_STATS_OUT_DMA_LEN 0
@@ -2891,11 +2893,31 @@
/* enum: RXDP counter: Number of times an emergency descriptor fetch was
* performed. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
*/
-#define MC_CMD_MAC_RXDP_EMERGENCY_FETCH_CONDITIONS 0x47
+#define MC_CMD_MAC_RXDP_HLB_FETCH_CONDITIONS 0x47
/* enum: RXDP counter: Number of times the DPCPU waited for an existing
* descriptor fetch. Valid for EF10 with PM_AND_RXDP_COUNTERS capability only.
*/
-#define MC_CMD_MAC_RXDP_EMERGENCY_WAIT_CONDITIONS 0x48
+#define MC_CMD_MAC_RXDP_HLB_WAIT_CONDITIONS 0x48
+#define MC_CMD_MAC_VADAPTER_RX_DMABUF_START 0x4c /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_UNICAST_PACKETS 0x4c /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_UNICAST_BYTES 0x4d /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_MULTICAST_PACKETS 0x4e /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_MULTICAST_BYTES 0x4f /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_BROADCAST_PACKETS 0x50 /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_BROADCAST_BYTES 0x51 /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_BAD_PACKETS 0x52 /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_BAD_BYTES 0x53 /* enum */
+#define MC_CMD_MAC_VADAPTER_RX_OVERFLOW 0x54 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_DMABUF_START 0x57 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_UNICAST_PACKETS 0x57 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_UNICAST_BYTES 0x58 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_MULTICAST_PACKETS 0x59 /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_MULTICAST_BYTES 0x5a /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_BROADCAST_PACKETS 0x5b /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_BROADCAST_BYTES 0x5c /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_BAD_PACKETS 0x5d /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_BAD_BYTES 0x5e /* enum */
+#define MC_CMD_MAC_VADAPTER_TX_OVERFLOW 0x5f /* enum */
/* enum: Start of GMAC stats buffer space, for Siena only. */
#define MC_CMD_GMAC_DMABUF_START 0x40
/* enum: End of GMAC stats buffer space, for Siena only. */
@@ -5578,6 +5600,8 @@
#define MC_CMD_GET_CAPABILITIES_OUT_MCAST_FILTER_CHAINING_WIDTH 1
#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN 27
#define MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_WIDTH 1
+#define MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN 30
+#define MC_CMD_GET_CAPABILITIES_OUT_EVB_WIDTH 1
/* RxDPCPU firmware id. */
#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_OFST 4
#define MC_CMD_GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID_LEN 2
diff --git a/drivers/net/ethernet/sfc/mcdi_port.c b/drivers/net/ethernet/sfc/mcdi_port.c
index 9bf04cb..7f295c4 100644
--- a/drivers/net/ethernet/sfc/mcdi_port.c
+++ b/drivers/net/ethernet/sfc/mcdi_port.c
@@ -924,6 +924,7 @@ enum efx_stats_action {
static int efx_mcdi_mac_stats(struct efx_nic *efx,
enum efx_stats_action action, int clear)
{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
int rc;
int change = action == EFX_STATS_PULL ? 0 : 1;
@@ -945,9 +946,14 @@ static int efx_mcdi_mac_stats(struct efx_nic *efx,
MAC_STATS_IN_PERIODIC_NOEVENT, 1,
MAC_STATS_IN_PERIOD_MS, period);
MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
-
- rc = efx_mcdi_rpc(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
- NULL, 0, NULL);
+ MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, nic_data->vport_id);
+
+ rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
+ NULL, 0, NULL);
+ /* Expect ENOENT if DMA queues have not been set up */
+ if (rc && (rc != -ENOENT || atomic_read(&efx->active_queues)))
+ efx_mcdi_display_error(efx, MC_CMD_MAC_STATS, sizeof(inbuf),
+ NULL, 0, rc);
return rc;
}
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index a468a22..d72f522 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -1350,6 +1350,8 @@ struct efx_nic_type {
struct ifla_vf_info *ivi);
int (*sriov_set_vf_link_state)(struct efx_nic *efx, int vf_i,
int link_state);
+ int (*sriov_get_phys_port_id)(struct efx_nic *efx,
+ struct netdev_phys_item_id *ppid);
int (*vswitching_probe)(struct efx_nic *efx);
int (*vswitching_restore)(struct efx_nic *efx);
void (*vswitching_remove)(struct efx_nic *efx);
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index db8562e..31ff908 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -407,59 +407,77 @@ struct siena_nic_data {
};
enum {
- EF10_STAT_tx_bytes = GENERIC_STAT_COUNT,
- EF10_STAT_tx_packets,
- EF10_STAT_tx_pause,
- EF10_STAT_tx_control,
- EF10_STAT_tx_unicast,
- EF10_STAT_tx_multicast,
- EF10_STAT_tx_broadcast,
- EF10_STAT_tx_lt64,
- EF10_STAT_tx_64,
- EF10_STAT_tx_65_to_127,
- EF10_STAT_tx_128_to_255,
- EF10_STAT_tx_256_to_511,
- EF10_STAT_tx_512_to_1023,
- EF10_STAT_tx_1024_to_15xx,
- EF10_STAT_tx_15xx_to_jumbo,
- EF10_STAT_rx_bytes,
- EF10_STAT_rx_bytes_minus_good_bytes,
- EF10_STAT_rx_good_bytes,
- EF10_STAT_rx_bad_bytes,
- EF10_STAT_rx_packets,
- EF10_STAT_rx_good,
- EF10_STAT_rx_bad,
- EF10_STAT_rx_pause,
- EF10_STAT_rx_control,
+ EF10_STAT_port_tx_bytes = GENERIC_STAT_COUNT,
+ EF10_STAT_port_tx_packets,
+ EF10_STAT_port_tx_pause,
+ EF10_STAT_port_tx_control,
+ EF10_STAT_port_tx_unicast,
+ EF10_STAT_port_tx_multicast,
+ EF10_STAT_port_tx_broadcast,
+ EF10_STAT_port_tx_lt64,
+ EF10_STAT_port_tx_64,
+ EF10_STAT_port_tx_65_to_127,
+ EF10_STAT_port_tx_128_to_255,
+ EF10_STAT_port_tx_256_to_511,
+ EF10_STAT_port_tx_512_to_1023,
+ EF10_STAT_port_tx_1024_to_15xx,
+ EF10_STAT_port_tx_15xx_to_jumbo,
+ EF10_STAT_port_rx_bytes,
+ EF10_STAT_port_rx_bytes_minus_good_bytes,
+ EF10_STAT_port_rx_good_bytes,
+ EF10_STAT_port_rx_bad_bytes,
+ EF10_STAT_port_rx_packets,
+ EF10_STAT_port_rx_good,
+ EF10_STAT_port_rx_bad,
+ EF10_STAT_port_rx_pause,
+ EF10_STAT_port_rx_control,
+ EF10_STAT_port_rx_unicast,
+ EF10_STAT_port_rx_multicast,
+ EF10_STAT_port_rx_broadcast,
+ EF10_STAT_port_rx_lt64,
+ EF10_STAT_port_rx_64,
+ EF10_STAT_port_rx_65_to_127,
+ EF10_STAT_port_rx_128_to_255,
+ EF10_STAT_port_rx_256_to_511,
+ EF10_STAT_port_rx_512_to_1023,
+ EF10_STAT_port_rx_1024_to_15xx,
+ EF10_STAT_port_rx_15xx_to_jumbo,
+ EF10_STAT_port_rx_gtjumbo,
+ EF10_STAT_port_rx_bad_gtjumbo,
+ EF10_STAT_port_rx_overflow,
+ EF10_STAT_port_rx_align_error,
+ EF10_STAT_port_rx_length_error,
+ EF10_STAT_port_rx_nodesc_drops,
+ EF10_STAT_port_rx_pm_trunc_bb_overflow,
+ EF10_STAT_port_rx_pm_discard_bb_overflow,
+ EF10_STAT_port_rx_pm_trunc_vfifo_full,
+ EF10_STAT_port_rx_pm_discard_vfifo_full,
+ EF10_STAT_port_rx_pm_trunc_qbb,
+ EF10_STAT_port_rx_pm_discard_qbb,
+ EF10_STAT_port_rx_pm_discard_mapping,
+ EF10_STAT_port_rx_dp_q_disabled_packets,
+ EF10_STAT_port_rx_dp_di_dropped_packets,
+ EF10_STAT_port_rx_dp_streaming_packets,
+ EF10_STAT_port_rx_dp_hlb_fetch,
+ EF10_STAT_port_rx_dp_hlb_wait,
EF10_STAT_rx_unicast,
+ EF10_STAT_rx_unicast_bytes,
EF10_STAT_rx_multicast,
+ EF10_STAT_rx_multicast_bytes,
EF10_STAT_rx_broadcast,
- EF10_STAT_rx_lt64,
- EF10_STAT_rx_64,
- EF10_STAT_rx_65_to_127,
- EF10_STAT_rx_128_to_255,
- EF10_STAT_rx_256_to_511,
- EF10_STAT_rx_512_to_1023,
- EF10_STAT_rx_1024_to_15xx,
- EF10_STAT_rx_15xx_to_jumbo,
- EF10_STAT_rx_gtjumbo,
- EF10_STAT_rx_bad_gtjumbo,
+ EF10_STAT_rx_broadcast_bytes,
+ EF10_STAT_rx_bad,
+ EF10_STAT_rx_bad_bytes,
EF10_STAT_rx_overflow,
- EF10_STAT_rx_align_error,
- EF10_STAT_rx_length_error,
- EF10_STAT_rx_nodesc_drops,
- EF10_STAT_rx_pm_trunc_bb_overflow,
- EF10_STAT_rx_pm_discard_bb_overflow,
- EF10_STAT_rx_pm_trunc_vfifo_full,
- EF10_STAT_rx_pm_discard_vfifo_full,
- EF10_STAT_rx_pm_trunc_qbb,
- EF10_STAT_rx_pm_discard_qbb,
- EF10_STAT_rx_pm_discard_mapping,
- EF10_STAT_rx_dp_q_disabled_packets,
- EF10_STAT_rx_dp_di_dropped_packets,
- EF10_STAT_rx_dp_streaming_packets,
- EF10_STAT_rx_dp_hlb_fetch,
- EF10_STAT_rx_dp_hlb_wait,
+ EF10_STAT_tx_unicast,
+ EF10_STAT_tx_unicast_bytes,
+ EF10_STAT_tx_multicast,
+ EF10_STAT_tx_multicast_bytes,
+ EF10_STAT_tx_broadcast,
+ EF10_STAT_tx_broadcast_bytes,
+ EF10_STAT_tx_bad,
+ EF10_STAT_tx_bad_bytes,
+ EF10_STAT_tx_overflow,
EF10_STAT_COUNT
};
@@ -524,6 +542,7 @@ struct efx_ef10_nic_data {
unsigned int vport_id;
bool must_probe_vswitching;
unsigned int pf_index;
+ u8 port_id[ETH_ALEN];
#ifdef CONFIG_SFC_SRIOV
unsigned int vf_index;
struct ef10_vf *vf;
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index c0ad95d..809ea461 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -224,12 +224,17 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
}
}
-static void efx_free_rx_buffer(struct efx_rx_buffer *rx_buf)
+static void efx_free_rx_buffers(struct efx_rx_queue *rx_queue,
+ struct efx_rx_buffer *rx_buf,
+ unsigned int num_bufs)
{
- if (rx_buf->page) {
- put_page(rx_buf->page);
- rx_buf->page = NULL;
- }
+ do {
+ if (rx_buf->page) {
+ put_page(rx_buf->page);
+ rx_buf->page = NULL;
+ }
+ rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
+ } while (--num_bufs);
}
/* Attempt to recycle the page if there is an RX recycle ring; the page can
@@ -278,7 +283,7 @@ static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
/* If this is the last buffer in a page, unmap and free it. */
if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
- efx_free_rx_buffer(rx_buf);
+ efx_free_rx_buffers(rx_queue, rx_buf, 1);
}
rx_buf->page = NULL;
}
@@ -304,10 +309,7 @@ static void efx_discard_rx_packet(struct efx_channel *channel,
efx_recycle_rx_pages(channel, rx_buf, n_frags);
- do {
- efx_free_rx_buffer(rx_buf);
- rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
- } while (--n_frags);
+ efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
}
/**
@@ -431,11 +433,10 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
skb = napi_get_frags(napi);
if (unlikely(!skb)) {
- while (n_frags--) {
- put_page(rx_buf->page);
- rx_buf->page = NULL;
- rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
- }
+ struct efx_rx_queue *rx_queue;
+
+ rx_queue = efx_channel_get_rx_queue(channel);
+ efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
return;
}
@@ -622,7 +623,10 @@ static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len);
if (unlikely(skb == NULL)) {
- efx_free_rx_buffer(rx_buf);
+ struct efx_rx_queue *rx_queue;
+
+ rx_queue = efx_channel_get_rx_queue(channel);
+ efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
return;
}
skb_record_rx_queue(skb, channel->rx_queue.core_index);
@@ -661,8 +665,12 @@ void __efx_rx_packet(struct efx_channel *channel)
* loopback layer, and free the rx_buf here
*/
if (unlikely(efx->loopback_selftest)) {
+ struct efx_rx_queue *rx_queue;
+
efx_loopback_rx_packet(efx, eh, rx_buf->len);
- efx_free_rx_buffer(rx_buf);
+ rx_queue = efx_channel_get_rx_queue(channel);
+ efx_free_rx_buffers(rx_queue, rx_buf,
+ channel->rx_pkt_n_frags);
goto out;
}
diff --git a/drivers/net/ethernet/sfc/sriov.c b/drivers/net/ethernet/sfc/sriov.c
index 6c5edbd..816c446 100644
--- a/drivers/net/ethernet/sfc/sriov.c
+++ b/drivers/net/ethernet/sfc/sriov.c
@@ -70,3 +70,14 @@ int efx_sriov_set_vf_link_state(struct net_device *net_dev, int vf_i,
else
return -EOPNOTSUPP;
}
+
+int efx_sriov_get_phys_port_id(struct net_device *net_dev,
+ struct netdev_phys_item_id *ppid)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ if (efx->type->sriov_get_phys_port_id)
+ return efx->type->sriov_get_phys_port_id(efx, ppid);
+ else
+ return -EOPNOTSUPP;
+}
diff --git a/drivers/net/ethernet/sfc/sriov.h b/drivers/net/ethernet/sfc/sriov.h
index 3be15a5..400df52 100644
--- a/drivers/net/ethernet/sfc/sriov.h
+++ b/drivers/net/ethernet/sfc/sriov.h
@@ -23,6 +23,8 @@ int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
struct ifla_vf_info *ivi);
int efx_sriov_set_vf_link_state(struct net_device *net_dev, int vf_i,
int link_state);
+int efx_sriov_get_phys_port_id(struct net_device *net_dev,
+ struct netdev_phys_item_id *ppid);
#endif /* CONFIG_SFC_SRIOV */
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 731e045..cec147d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -16,6 +16,7 @@ if STMMAC_ETH
config STMMAC_PLATFORM
tristate "STMMAC Platform bus support"
depends on STMMAC_ETH
+ select MFD_SYSCON
default y
---help---
This selects the platform specific bus support for the stmmac driver.
@@ -36,6 +37,19 @@ config DWMAC_GENERIC
platform specific code to function or is using platform
data for setup.
+config DWMAC_IPQ806X
+ tristate "QCA IPQ806x DWMAC support"
+ default ARCH_QCOM
+ depends on OF
+ select MFD_SYSCON
+ help
+ Support for QCA IPQ806X DWMAC Ethernet.
+
+ This selects the IPQ806x SoC glue layer support for the stmmac
+ device driver. This driver does not use any of the hardware
+ acceleration features available on this SoC. Network devices
+ will behave like standard non-accelerated ethernet interfaces.
+
config DWMAC_LPC18XX
tristate "NXP LPC18xx/43xx DWMAC support"
default ARCH_LPC18XX
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index 92e714a..b390161 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -6,6 +6,7 @@ stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
# Ordering matters. Generic driver must be last.
obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o
+obj-$(CONFIG_DWMAC_IPQ806X) += dwmac-ipq806x.o
obj-$(CONFIG_DWMAC_LPC18XX) += dwmac-lpc18xx.o
obj-$(CONFIG_DWMAC_MESON) += dwmac-meson.o
obj-$(CONFIG_DWMAC_ROCKCHIP) += dwmac-rk.o
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
new file mode 100644
index 0000000..7e3129e
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
@@ -0,0 +1,365 @@
+/*
+ * Qualcomm Atheros IPQ806x GMAC glue layer
+ *
+ * Copyright (C) 2015 The Linux Foundation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/phy.h>
+#include <linux/regmap.h>
+#include <linux/clk.h>
+#include <linux/reset.h>
+#include <linux/of_net.h>
+#include <linux/mfd/syscon.h>
+#include <linux/stmmac.h>
+#include <linux/of_mdio.h>
+#include <linux/module.h>
+
+#include "stmmac_platform.h"
+
+#define NSS_COMMON_CLK_GATE 0x8
+#define NSS_COMMON_CLK_GATE_PTP_EN(x) BIT(0x10 + x)
+#define NSS_COMMON_CLK_GATE_RGMII_RX_EN(x) BIT(0x9 + (x * 2))
+#define NSS_COMMON_CLK_GATE_RGMII_TX_EN(x) BIT(0x8 + (x * 2))
+#define NSS_COMMON_CLK_GATE_GMII_RX_EN(x) BIT(0x4 + x)
+#define NSS_COMMON_CLK_GATE_GMII_TX_EN(x) BIT(0x0 + x)
+
+#define NSS_COMMON_CLK_DIV0 0xC
+#define NSS_COMMON_CLK_DIV_OFFSET(x) (x * 8)
+#define NSS_COMMON_CLK_DIV_MASK 0x7f
+
+#define NSS_COMMON_CLK_SRC_CTRL 0x14
+#define NSS_COMMON_CLK_SRC_CTRL_OFFSET(x) (1 << x)
+/* Mode is coded on 1 bit but is different depending on the MAC ID:
+ * MAC0: QSGMII=0 RGMII=1
+ * MAC1: QSGMII=0 SGMII=0 RGMII=1
+ * MAC2 & MAC3: QSGMII=0 SGMII=1
+ */
+#define NSS_COMMON_CLK_SRC_CTRL_RGMII(x) 1
+#define NSS_COMMON_CLK_SRC_CTRL_SGMII(x) ((x >= 2) ? 1 : 0)
+
+#define NSS_COMMON_MACSEC_CTL 0x28
+#define NSS_COMMON_MACSEC_CTL_EXT_BYPASS_EN(x) (1 << x)
+
+#define NSS_COMMON_GMAC_CTL(x) (0x30 + (x * 4))
+#define NSS_COMMON_GMAC_CTL_CSYS_REQ BIT(19)
+#define NSS_COMMON_GMAC_CTL_PHY_IFACE_SEL BIT(16)
+#define NSS_COMMON_GMAC_CTL_IFG_LIMIT_OFFSET 8
+#define NSS_COMMON_GMAC_CTL_IFG_OFFSET 0
+#define NSS_COMMON_GMAC_CTL_IFG_MASK 0x3f
+
+#define NSS_COMMON_CLK_DIV_RGMII_1000 1
+#define NSS_COMMON_CLK_DIV_RGMII_100 9
+#define NSS_COMMON_CLK_DIV_RGMII_10 99
+#define NSS_COMMON_CLK_DIV_SGMII_1000 0
+#define NSS_COMMON_CLK_DIV_SGMII_100 4
+#define NSS_COMMON_CLK_DIV_SGMII_10 49
+
+#define QSGMII_PCS_MODE_CTL 0x68
+#define QSGMII_PCS_MODE_CTL_AUTONEG_EN(x) BIT((x * 8) + 7)
+
+#define QSGMII_PCS_CAL_LCKDT_CTL 0x120
+#define QSGMII_PCS_CAL_LCKDT_CTL_RST BIT(19)
+
+/* Only GMAC1/2/3 support SGMII and their CTL register are not contiguous */
+#define QSGMII_PHY_SGMII_CTL(x) ((x == 1) ? 0x134 : \
+ (0x13c + (4 * (x - 2))))
+#define QSGMII_PHY_CDR_EN BIT(0)
+#define QSGMII_PHY_RX_FRONT_EN BIT(1)
+#define QSGMII_PHY_RX_SIGNAL_DETECT_EN BIT(2)
+#define QSGMII_PHY_TX_DRIVER_EN BIT(3)
+#define QSGMII_PHY_QSGMII_EN BIT(7)
+#define QSGMII_PHY_PHASE_LOOP_GAIN_OFFSET 12
+#define QSGMII_PHY_PHASE_LOOP_GAIN_MASK 0x7
+#define QSGMII_PHY_RX_DC_BIAS_OFFSET 18
+#define QSGMII_PHY_RX_DC_BIAS_MASK 0x3
+#define QSGMII_PHY_RX_INPUT_EQU_OFFSET 20
+#define QSGMII_PHY_RX_INPUT_EQU_MASK 0x3
+#define QSGMII_PHY_CDR_PI_SLEW_OFFSET 22
+#define QSGMII_PHY_CDR_PI_SLEW_MASK 0x3
+#define QSGMII_PHY_TX_DRV_AMP_OFFSET 28
+#define QSGMII_PHY_TX_DRV_AMP_MASK 0xf
+
+struct ipq806x_gmac {
+ struct platform_device *pdev;
+ struct regmap *nss_common;
+ struct regmap *qsgmii_csr;
+ uint32_t id;
+ struct clk *core_clk;
+ phy_interface_t phy_mode;
+};
+
+static int get_clk_div_sgmii(struct ipq806x_gmac *gmac, unsigned int speed)
+{
+ struct device *dev = &gmac->pdev->dev;
+ int div;
+
+ switch (speed) {
+ case SPEED_1000:
+ div = NSS_COMMON_CLK_DIV_SGMII_1000;
+ break;
+
+ case SPEED_100:
+ div = NSS_COMMON_CLK_DIV_SGMII_100;
+ break;
+
+ case SPEED_10:
+ div = NSS_COMMON_CLK_DIV_SGMII_10;
+ break;
+
+ default:
+ dev_err(dev, "Speed %dMbps not supported in SGMII\n", speed);
+ return -EINVAL;
+ }
+
+ return div;
+}
+
+static int get_clk_div_rgmii(struct ipq806x_gmac *gmac, unsigned int speed)
+{
+ struct device *dev = &gmac->pdev->dev;
+ int div;
+
+ switch (speed) {
+ case SPEED_1000:
+ div = NSS_COMMON_CLK_DIV_RGMII_1000;
+ break;
+
+ case SPEED_100:
+ div = NSS_COMMON_CLK_DIV_RGMII_100;
+ break;
+
+ case SPEED_10:
+ div = NSS_COMMON_CLK_DIV_RGMII_10;
+ break;
+
+ default:
+ dev_err(dev, "Speed %dMbps not supported in RGMII\n", speed);
+ return -EINVAL;
+ }
+
+ return div;
+}
+
+static int ipq806x_gmac_set_speed(struct ipq806x_gmac *gmac, unsigned int speed)
+{
+ uint32_t clk_bits, val;
+ int div;
+
+ switch (gmac->phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ div = get_clk_div_rgmii(gmac, speed);
+ clk_bits = NSS_COMMON_CLK_GATE_RGMII_RX_EN(gmac->id) |
+ NSS_COMMON_CLK_GATE_RGMII_TX_EN(gmac->id);
+ break;
+
+ case PHY_INTERFACE_MODE_SGMII:
+ div = get_clk_div_sgmii(gmac, speed);
+ clk_bits = NSS_COMMON_CLK_GATE_GMII_RX_EN(gmac->id) |
+ NSS_COMMON_CLK_GATE_GMII_TX_EN(gmac->id);
+ break;
+
+ default:
+ dev_err(&gmac->pdev->dev, "Unsupported PHY mode: \"%s\"\n",
+ phy_modes(gmac->phy_mode));
+ return -EINVAL;
+ }
+
+ /* Disable the clocks */
+ regmap_read(gmac->nss_common, NSS_COMMON_CLK_GATE, &val);
+ val &= ~clk_bits;
+ regmap_write(gmac->nss_common, NSS_COMMON_CLK_GATE, val);
+
+ /* Set the divider */
+ regmap_read(gmac->nss_common, NSS_COMMON_CLK_DIV0, &val);
+ val &= ~(NSS_COMMON_CLK_DIV_MASK
+ << NSS_COMMON_CLK_DIV_OFFSET(gmac->id));
+ val |= div << NSS_COMMON_CLK_DIV_OFFSET(gmac->id);
+ regmap_write(gmac->nss_common, NSS_COMMON_CLK_DIV0, val);
+
+ /* Enable the clock back */
+ regmap_read(gmac->nss_common, NSS_COMMON_CLK_GATE, &val);
+ val |= clk_bits;
+ regmap_write(gmac->nss_common, NSS_COMMON_CLK_GATE, val);
+
+ return 0;
+}
+
+static void *ipq806x_gmac_of_parse(struct ipq806x_gmac *gmac)
+{
+ struct device *dev = &gmac->pdev->dev;
+
+ gmac->phy_mode = of_get_phy_mode(dev->of_node);
+ if (gmac->phy_mode < 0) {
+ dev_err(dev, "missing phy mode property\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (of_property_read_u32(dev->of_node, "qcom,id", &gmac->id) < 0) {
+ dev_err(dev, "missing qcom id property\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* The GMACs are called 1 to 4 in the documentation, but to simplify the
+ * code and keep it consistent with the Linux convention, we'll number
+ * them from 0 to 3 here.
+ */
+ if (gmac->id < 0 || gmac->id > 3) {
+ dev_err(dev, "invalid gmac id\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ gmac->core_clk = devm_clk_get(dev, "stmmaceth");
+ if (IS_ERR(gmac->core_clk)) {
+ dev_err(dev, "missing stmmaceth clk property\n");
+ return gmac->core_clk;
+ }
+ clk_set_rate(gmac->core_clk, 266000000);
+
+ /* Setup the register map for the nss common registers */
+ gmac->nss_common = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "qcom,nss-common");
+ if (IS_ERR(gmac->nss_common)) {
+ dev_err(dev, "missing nss-common node\n");
+ return gmac->nss_common;
+ }
+
+ /* Setup the register map for the qsgmii csr registers */
+ gmac->qsgmii_csr = syscon_regmap_lookup_by_phandle(dev->of_node,
+ "qcom,qsgmii-csr");
+ if (IS_ERR(gmac->qsgmii_csr)) {
+ dev_err(dev, "missing qsgmii-csr node\n");
+ return gmac->qsgmii_csr;
+ }
+
+ return NULL;
+}
+
+static void *ipq806x_gmac_setup(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct ipq806x_gmac *gmac;
+ int val;
+ void *err;
+
+ gmac = devm_kzalloc(dev, sizeof(*gmac), GFP_KERNEL);
+ if (!gmac)
+ return ERR_PTR(-ENOMEM);
+
+ gmac->pdev = pdev;
+
+ err = ipq806x_gmac_of_parse(gmac);
+ if (err) {
+ dev_err(dev, "device tree parsing error\n");
+ return err;
+ }
+
+ regmap_write(gmac->qsgmii_csr, QSGMII_PCS_CAL_LCKDT_CTL,
+ QSGMII_PCS_CAL_LCKDT_CTL_RST);
+
+ /* Inter frame gap is set to 12 */
+ val = 12 << NSS_COMMON_GMAC_CTL_IFG_OFFSET |
+ 12 << NSS_COMMON_GMAC_CTL_IFG_LIMIT_OFFSET;
+ /* We also initiate an AXI low power exit request */
+ val |= NSS_COMMON_GMAC_CTL_CSYS_REQ;
+ switch (gmac->phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ val |= NSS_COMMON_GMAC_CTL_PHY_IFACE_SEL;
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ val &= ~NSS_COMMON_GMAC_CTL_PHY_IFACE_SEL;
+ break;
+ default:
+ dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n",
+ phy_modes(gmac->phy_mode));
+ return NULL;
+ }
+ regmap_write(gmac->nss_common, NSS_COMMON_GMAC_CTL(gmac->id), val);
+
+ /* Configure the clock src according to the mode */
+ regmap_read(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, &val);
+ val &= ~NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id);
+ switch (gmac->phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ val |= NSS_COMMON_CLK_SRC_CTRL_RGMII(gmac->id) <<
+ NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id);
+ break;
+ case PHY_INTERFACE_MODE_SGMII:
+ val |= NSS_COMMON_CLK_SRC_CTRL_SGMII(gmac->id) <<
+ NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id);
+ break;
+ default:
+ dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n",
+ phy_modes(gmac->phy_mode));
+ return NULL;
+ }
+ regmap_write(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, val);
+
+ /* Enable PTP clock */
+ regmap_read(gmac->nss_common, NSS_COMMON_CLK_GATE, &val);
+ val |= NSS_COMMON_CLK_GATE_PTP_EN(gmac->id);
+ regmap_write(gmac->nss_common, NSS_COMMON_CLK_GATE, val);
+
+ if (gmac->phy_mode == PHY_INTERFACE_MODE_SGMII) {
+ regmap_write(gmac->qsgmii_csr, QSGMII_PHY_SGMII_CTL(gmac->id),
+ QSGMII_PHY_CDR_EN |
+ QSGMII_PHY_RX_FRONT_EN |
+ QSGMII_PHY_RX_SIGNAL_DETECT_EN |
+ QSGMII_PHY_TX_DRIVER_EN |
+ QSGMII_PHY_QSGMII_EN |
+ 0x4 << QSGMII_PHY_PHASE_LOOP_GAIN_OFFSET |
+ 0x3 << QSGMII_PHY_RX_DC_BIAS_OFFSET |
+ 0x1 << QSGMII_PHY_RX_INPUT_EQU_OFFSET |
+ 0x2 << QSGMII_PHY_CDR_PI_SLEW_OFFSET |
+ 0xC << QSGMII_PHY_TX_DRV_AMP_OFFSET);
+ }
+
+ return gmac;
+}
+
+static void ipq806x_gmac_fix_mac_speed(void *priv, unsigned int speed)
+{
+ struct ipq806x_gmac *gmac = priv;
+
+ ipq806x_gmac_set_speed(gmac, speed);
+}
+
+static const struct stmmac_of_data ipq806x_gmac_data = {
+ .has_gmac = 1,
+ .setup = ipq806x_gmac_setup,
+ .fix_mac_speed = ipq806x_gmac_fix_mac_speed,
+};
+
+static const struct of_device_id ipq806x_gmac_dwmac_match[] = {
+ { .compatible = "qcom,ipq806x-gmac", .data = &ipq806x_gmac_data },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ipq806x_gmac_dwmac_match);
+
+static struct platform_driver ipq806x_gmac_dwmac_driver = {
+ .probe = stmmac_pltfr_probe,
+ .remove = stmmac_pltfr_remove,
+ .driver = {
+ .name = "ipq806x-gmac-dwmac",
+ .pm = &stmmac_pltfr_pm_ops,
+ .of_match_table = ipq806x_gmac_dwmac_match,
+ },
+};
+module_platform_driver(ipq806x_gmac_dwmac_driver);
+
+MODULE_AUTHOR("Mathieu Olivari <mathieu@codeaurora.org>");
+MODULE_DESCRIPTION("Qualcomm Atheros IPQ806x DWMAC specific glue layer");
+MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 9cbcae2..1f3b33a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -125,6 +125,12 @@ struct stmmac_priv {
int use_riwt;
int irq_wake;
spinlock_t ptp_lock;
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *dbgfs_dir;
+ struct dentry *dbgfs_rings_status;
+ struct dentry *dbgfs_dma_cap;
+#endif
};
int stmmac_mdio_unregister(struct net_device *ndev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index e4f2739..a515673 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -52,6 +52,7 @@
#include "stmmac_ptp.h"
#include "stmmac.h"
#include <linux/reset.h>
+#include <linux/of_mdio.h>
#define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
@@ -118,7 +119,7 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
#ifdef CONFIG_DEBUG_FS
static int stmmac_init_fs(struct net_device *dev);
-static void stmmac_exit_fs(void);
+static void stmmac_exit_fs(struct net_device *dev);
#endif
#define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
@@ -816,18 +817,25 @@ static int stmmac_init_phy(struct net_device *dev)
priv->speed = 0;
priv->oldduplex = -1;
- if (priv->plat->phy_bus_name)
- snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x",
- priv->plat->phy_bus_name, priv->plat->bus_id);
- else
- snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
- priv->plat->bus_id);
+ if (priv->plat->phy_node) {
+ phydev = of_phy_connect(dev, priv->plat->phy_node,
+ &stmmac_adjust_link, 0, interface);
+ } else {
+ if (priv->plat->phy_bus_name)
+ snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x",
+ priv->plat->phy_bus_name, priv->plat->bus_id);
+ else
+ snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
+ priv->plat->bus_id);
- snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
- priv->plat->phy_addr);
- pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id_fmt);
+ snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
+ priv->plat->phy_addr);
+ pr_debug("stmmac_init_phy: trying to attach to %s\n",
+ phy_id_fmt);
- phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link, interface);
+ phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link,
+ interface);
+ }
if (IS_ERR(phydev)) {
pr_err("%s: Could not attach to PHY\n", dev->name);
@@ -848,7 +856,7 @@ static int stmmac_init_phy(struct net_device *dev)
* device as well.
* Note: phydev->phy_id is the result of reading the UID PHY registers.
*/
- if (phydev->phy_id == 0) {
+ if (!priv->plat->phy_node && phydev->phy_id == 0) {
phy_disconnect(phydev);
return -ENODEV;
}
@@ -1914,7 +1922,7 @@ static int stmmac_release(struct net_device *dev)
netif_carrier_off(dev);
#ifdef CONFIG_DEBUG_FS
- stmmac_exit_fs();
+ stmmac_exit_fs(dev);
#endif
stmmac_release_ptp(priv);
@@ -2506,8 +2514,6 @@ static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
#ifdef CONFIG_DEBUG_FS
static struct dentry *stmmac_fs_dir;
-static struct dentry *stmmac_rings_status;
-static struct dentry *stmmac_dma_cap;
static void sysfs_display_ring(void *head, int size, int extend_desc,
struct seq_file *seq)
@@ -2646,36 +2652,39 @@ static const struct file_operations stmmac_dma_cap_fops = {
static int stmmac_init_fs(struct net_device *dev)
{
- /* Create debugfs entries */
- stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ /* Create per netdev entries */
+ priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
- if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
- pr_err("ERROR %s, debugfs create directory failed\n",
- STMMAC_RESOURCE_NAME);
+ if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
+ pr_err("ERROR %s/%s, debugfs create directory failed\n",
+ STMMAC_RESOURCE_NAME, dev->name);
return -ENOMEM;
}
/* Entry to report DMA RX/TX rings */
- stmmac_rings_status = debugfs_create_file("descriptors_status",
- S_IRUGO, stmmac_fs_dir, dev,
- &stmmac_rings_status_fops);
+ priv->dbgfs_rings_status =
+ debugfs_create_file("descriptors_status", S_IRUGO,
+ priv->dbgfs_dir, dev,
+ &stmmac_rings_status_fops);
- if (!stmmac_rings_status || IS_ERR(stmmac_rings_status)) {
+ if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
pr_info("ERROR creating stmmac ring debugfs file\n");
- debugfs_remove(stmmac_fs_dir);
+ debugfs_remove_recursive(priv->dbgfs_dir);
return -ENOMEM;
}
/* Entry to report the DMA HW features */
- stmmac_dma_cap = debugfs_create_file("dma_cap", S_IRUGO, stmmac_fs_dir,
- dev, &stmmac_dma_cap_fops);
+ priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", S_IRUGO,
+ priv->dbgfs_dir,
+ dev, &stmmac_dma_cap_fops);
- if (!stmmac_dma_cap || IS_ERR(stmmac_dma_cap)) {
+ if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
pr_info("ERROR creating stmmac MMC debugfs file\n");
- debugfs_remove(stmmac_rings_status);
- debugfs_remove(stmmac_fs_dir);
+ debugfs_remove_recursive(priv->dbgfs_dir);
return -ENOMEM;
}
@@ -2683,11 +2692,11 @@ static int stmmac_init_fs(struct net_device *dev)
return 0;
}
-static void stmmac_exit_fs(void)
+static void stmmac_exit_fs(struct net_device *dev)
{
- debugfs_remove(stmmac_rings_status);
- debugfs_remove(stmmac_dma_cap);
- debugfs_remove(stmmac_fs_dir);
+ struct stmmac_priv *priv = netdev_priv(dev);
+
+ debugfs_remove_recursive(priv->dbgfs_dir);
}
#endif /* CONFIG_DEBUG_FS */
@@ -3155,6 +3164,35 @@ err:
__setup("stmmaceth=", stmmac_cmdline_opt);
#endif /* MODULE */
+static int __init stmmac_init(void)
+{
+#ifdef CONFIG_DEBUG_FS
+ /* Create debugfs main directory if it doesn't exist yet */
+ if (!stmmac_fs_dir) {
+ stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
+
+ if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
+ pr_err("ERROR %s, debugfs create directory failed\n",
+ STMMAC_RESOURCE_NAME);
+
+ return -ENOMEM;
+ }
+ }
+#endif
+
+ return 0;
+}
+
+static void __exit stmmac_exit(void)
+{
+#ifdef CONFIG_DEBUG_FS
+ debugfs_remove_recursive(stmmac_fs_dir);
+#endif
+}
+
+module_init(stmmac_init)
+module_exit(stmmac_exit)
+
MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 1664c01..f3918c7 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -28,6 +28,7 @@
#include <linux/of.h>
#include <linux/of_net.h>
#include <linux/of_device.h>
+#include <linux/of_mdio.h>
#include "stmmac.h"
#include "stmmac_platform.h"
@@ -144,13 +145,24 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
/* Default to phy auto-detection */
plat->phy_addr = -1;
+ /* If we find a phy-handle property, use it as the PHY */
+ plat->phy_node = of_parse_phandle(np, "phy-handle", 0);
+
+ /* If phy-handle is not specified, check if we have a fixed-phy */
+ if (!plat->phy_node && of_phy_is_fixed_link(np)) {
+ if ((of_phy_register_fixed_link(np) < 0))
+ return -ENODEV;
+
+ plat->phy_node = of_node_get(np);
+ }
+
/* "snps,phy-addr" is not a standard property. Mark it as deprecated
* and warn of its use. Remove this when phy node support is added.
*/
if (of_property_read_u32(np, "snps,phy-addr", &plat->phy_addr) == 0)
dev_warn(&pdev->dev, "snps,phy-addr property is deprecated\n");
- if (plat->phy_bus_name)
+ if (plat->phy_node || plat->phy_bus_name)
plat->mdio_bus_data = NULL;
else
plat->mdio_bus_data =
@@ -208,8 +220,10 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
if (of_find_property(np, "snps,pbl", NULL)) {
dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*dma_cfg),
GFP_KERNEL);
- if (!dma_cfg)
+ if (!dma_cfg) {
+ of_node_put(np);
return -ENOMEM;
+ }
plat->dma_cfg = dma_cfg;
of_property_read_u32(np, "snps,pbl", &dma_cfg->pbl);
dma_cfg->fixed_burst =
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index b536b4c..4628205 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1361,7 +1361,6 @@ static int cpsw_ndo_stop(struct net_device *ndev)
if (cpsw_common_res_usage_state(priv) <= 1) {
cpts_unregister(priv->cpts);
cpsw_intr_disable(priv);
- cpdma_ctlr_int_ctrl(priv->dma, false);
cpdma_ctlr_stop(priv->dma);
cpsw_ale_stop(priv->ale);
}
@@ -1456,7 +1455,7 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
if (priv->cpts->rx_enable)
ctrl |= CTRL_V2_RX_TS_BITS;
- break;
+ break;
case CPSW_VERSION_3:
default:
ctrl &= ~CTRL_V3_ALL_TS_MASK;
@@ -1466,7 +1465,7 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
if (priv->cpts->rx_enable)
ctrl |= CTRL_V3_RX_TS_BITS;
- break;
+ break;
}
mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS;
@@ -1589,10 +1588,8 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev)
cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n");
ndev->stats.tx_errors++;
cpsw_intr_disable(priv);
- cpdma_ctlr_int_ctrl(priv->dma, false);
cpdma_chan_stop(priv->txch);
cpdma_chan_start(priv->txch);
- cpdma_ctlr_int_ctrl(priv->dma, true);
cpsw_intr_enable(priv);
}
@@ -1629,10 +1626,8 @@ static void cpsw_ndo_poll_controller(struct net_device *ndev)
struct cpsw_priv *priv = netdev_priv(ndev);
cpsw_intr_disable(priv);
- cpdma_ctlr_int_ctrl(priv->dma, false);
cpsw_rx_interrupt(priv->irqs_table[0], priv);
cpsw_tx_interrupt(priv->irqs_table[1], priv);
- cpdma_ctlr_int_ctrl(priv->dma, true);
cpsw_intr_enable(priv);
}
#endif
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 6e927b4..43b061b 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -268,39 +268,6 @@ int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid)
}
EXPORT_SYMBOL_GPL(cpsw_ale_flush_multicast);
-static void cpsw_ale_flush_ucast(struct cpsw_ale *ale, u32 *ale_entry,
- int port_mask)
-{
- int port;
-
- port = cpsw_ale_get_port_num(ale_entry);
- if ((BIT(port) & port_mask) == 0)
- return; /* ports dont intersect, not interested */
- cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
-}
-
-int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask)
-{
- u32 ale_entry[ALE_ENTRY_WORDS];
- int ret, idx;
-
- for (idx = 0; idx < ale->params.ale_entries; idx++) {
- cpsw_ale_read(ale, idx, ale_entry);
- ret = cpsw_ale_get_entry_type(ale_entry);
- if (ret != ALE_TYPE_ADDR && ret != ALE_TYPE_VLAN_ADDR)
- continue;
-
- if (cpsw_ale_get_mcast(ale_entry))
- cpsw_ale_flush_mcast(ale, ale_entry, port_mask);
- else
- cpsw_ale_flush_ucast(ale, ale_entry, port_mask);
-
- cpsw_ale_write(ale, idx, ale_entry);
- }
- return 0;
-}
-EXPORT_SYMBOL_GPL(cpsw_ale_flush);
-
static inline void cpsw_ale_set_vlan_entry_type(u32 *ale_entry,
int flags, u16 vid)
{
@@ -752,18 +719,6 @@ static void cpsw_ale_timer(unsigned long arg)
}
}
-int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout)
-{
- del_timer_sync(&ale->timer);
- ale->ageout = ageout * HZ;
- if (ale->ageout) {
- ale->timer.expires = jiffies + ale->ageout;
- add_timer(&ale->timer);
- }
- return 0;
-}
-EXPORT_SYMBOL_GPL(cpsw_ale_set_ageout);
-
void cpsw_ale_start(struct cpsw_ale *ale)
{
u32 rev;
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
index af1e7ec..a700189 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.h
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
@@ -90,8 +90,6 @@ int cpsw_ale_destroy(struct cpsw_ale *ale);
void cpsw_ale_start(struct cpsw_ale *ale);
void cpsw_ale_stop(struct cpsw_ale *ale);
-int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout);
-int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask);
int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid);
int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port,
int flags, u16 vid);
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index b7eafa4..78d49d1 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -44,7 +44,9 @@ struct geneve_dev {
struct net *net; /* netns for packet i/o */
struct net_device *dev; /* netdev for geneve tunnel */
struct geneve_sock *sock; /* socket used for geneve tunnel */
- u8 vni[3]; /* virtual network ID for tunnel */
+ u8 vni[3]; /* virtual network ID for tunnel */
+ u8 ttl; /* TTL override */
+ u8 tos; /* TOS override */
struct sockaddr_in remote; /* IPv4 address for link partner */
struct list_head next; /* geneve's per namespace list */
};
@@ -184,7 +186,7 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
struct flowi4 fl4;
int err;
__be16 sport;
- __u8 tos, ttl = 0;
+ __u8 tos, ttl;
iip = ip_hdr(skb);
@@ -193,7 +195,12 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
/* TODO: port min/max limits should be configurable */
sport = udp_flow_src_port(dev_net(dev), skb, 0, 0, true);
+ tos = geneve->tos;
+ if (tos == 1)
+ tos = ip_tunnel_get_dsfield(iip, skb);
+
memset(&fl4, 0, sizeof(fl4));
+ fl4.flowi4_tos = RT_TOS(tos);
fl4.daddr = geneve->remote.sin_addr.s_addr;
rt = ip_route_output_key(geneve->net, &fl4);
if (IS_ERR(rt)) {
@@ -207,11 +214,10 @@ static netdev_tx_t geneve_xmit(struct sk_buff *skb, struct net_device *dev)
goto rt_tx_error;
}
- /* TODO: tos and ttl should be configurable */
-
- tos = ip_tunnel_ecn_encap(0, iip, skb);
+ tos = ip_tunnel_ecn_encap(tos, iip, skb);
- if (IN_MULTICAST(ntohl(fl4.daddr)))
+ ttl = geneve->ttl;
+ if (!ttl && IN_MULTICAST(ntohl(fl4.daddr)))
ttl = 1;
ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
@@ -297,6 +303,8 @@ static void geneve_setup(struct net_device *dev)
static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = {
[IFLA_GENEVE_ID] = { .type = NLA_U32 },
[IFLA_GENEVE_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
+ [IFLA_GENEVE_TTL] = { .type = NLA_U8 },
+ [IFLA_GENEVE_TOS] = { .type = NLA_U8 },
};
static int geneve_validate(struct nlattr *tb[], struct nlattr *data[])
@@ -364,6 +372,12 @@ static int geneve_newlink(struct net *net, struct net_device *dev,
if (err)
return err;
+ if (data[IFLA_GENEVE_TTL])
+ geneve->ttl = nla_get_u8(data[IFLA_GENEVE_TTL]);
+
+ if (data[IFLA_GENEVE_TOS])
+ geneve->tos = nla_get_u8(data[IFLA_GENEVE_TOS]);
+
list_add(&geneve->next, &gn->geneve_list);
hlist_add_head_rcu(&geneve->hlist, &gn->vni_list[hash]);
@@ -386,6 +400,8 @@ static size_t geneve_get_size(const struct net_device *dev)
{
return nla_total_size(sizeof(__u32)) + /* IFLA_GENEVE_ID */
nla_total_size(sizeof(struct in_addr)) + /* IFLA_GENEVE_REMOTE */
+ nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TTL */
+ nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TOS */
0;
}
@@ -402,6 +418,10 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev)
geneve->remote.sin_addr.s_addr))
goto nla_put_failure;
+ if (nla_put_u8(skb, IFLA_GENEVE_TTL, geneve->ttl) ||
+ nla_put_u8(skb, IFLA_GENEVE_TOS, geneve->tos))
+ goto nla_put_failure;
+
return 0;
nla_put_failure:
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index ddcc7f8..dd45440 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -161,6 +161,7 @@ struct netvsc_device_info {
unsigned char mac_adr[ETH_ALEN];
bool link_state; /* 0 - link up, 1 - link down */
int ring_size;
+ u32 max_num_vrss_chns;
};
enum rndis_device_state {
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index b024968..06de98a 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -227,13 +227,18 @@ static int netvsc_init_buf(struct hv_device *device)
struct netvsc_device *net_device;
struct nvsp_message *init_packet;
struct net_device *ndev;
+ int node;
net_device = get_outbound_net_device(device);
if (!net_device)
return -ENODEV;
ndev = net_device->ndev;
- net_device->recv_buf = vzalloc(net_device->recv_buf_size);
+ node = cpu_to_node(device->channel->target_cpu);
+ net_device->recv_buf = vzalloc_node(net_device->recv_buf_size, node);
+ if (!net_device->recv_buf)
+ net_device->recv_buf = vzalloc(net_device->recv_buf_size);
+
if (!net_device->recv_buf) {
netdev_err(ndev, "unable to allocate receive "
"buffer of size %d\n", net_device->recv_buf_size);
@@ -321,7 +326,9 @@ static int netvsc_init_buf(struct hv_device *device)
/* Now setup the send buffer.
*/
- net_device->send_buf = vzalloc(net_device->send_buf_size);
+ net_device->send_buf = vzalloc_node(net_device->send_buf_size, node);
+ if (!net_device->send_buf)
+ net_device->send_buf = vzalloc(net_device->send_buf_size);
if (!net_device->send_buf) {
netdev_err(ndev, "unable to allocate send "
"buffer of size %d\n", net_device->send_buf_size);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index d9c88bc..358475e 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -46,6 +46,8 @@ static int ring_size = 128;
module_param(ring_size, int, S_IRUGO);
MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
+static int max_num_vrss_chns = 8;
+
static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
NETIF_MSG_LINK | NETIF_MSG_IFUP |
NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR |
@@ -755,6 +757,7 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
ndevctx->device_ctx = hdev;
hv_set_drvdata(hdev, ndev);
device_info.ring_size = ring_size;
+ device_info.max_num_vrss_chns = max_num_vrss_chns;
rndis_filter_device_add(hdev, &device_info);
netif_tx_wake_all_queues(ndev);
@@ -975,6 +978,7 @@ static int netvsc_probe(struct hv_device *dev,
/* Notify the netvsc driver of the new device */
device_info.ring_size = ring_size;
+ device_info.max_num_vrss_chns = max_num_vrss_chns;
ret = rndis_filter_device_add(dev, &device_info);
if (ret != 0) {
netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 9118cea..006c1b8 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -1013,6 +1013,9 @@ int rndis_filter_device_add(struct hv_device *dev,
struct ndis_recv_scale_cap rsscap;
u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
u32 mtu, size;
+ u32 num_rss_qs;
+ const struct cpumask *node_cpu_mask;
+ u32 num_possible_rss_qs;
rndis_device = get_rndis_device();
if (!rndis_device)
@@ -1100,9 +1103,18 @@ int rndis_filter_device_add(struct hv_device *dev,
if (ret || rsscap.num_recv_que < 2)
goto out;
+ num_rss_qs = min(device_info->max_num_vrss_chns, rsscap.num_recv_que);
+
net_device->max_chn = rsscap.num_recv_que;
- net_device->num_chn = (num_online_cpus() < rsscap.num_recv_que) ?
- num_online_cpus() : rsscap.num_recv_que;
+
+ /*
+ * We will limit the VRSS channels to the number CPUs in the NUMA node
+ * the primary channel is currently bound to.
+ */
+ node_cpu_mask = cpumask_of_node(cpu_to_node(dev->channel->target_cpu));
+ num_possible_rss_qs = cpumask_weight(node_cpu_mask);
+ net_device->num_chn = min(num_possible_rss_qs, num_rss_qs);
+
if (net_device->num_chn == 1)
goto out;
diff --git a/drivers/net/ieee802154/Kconfig b/drivers/net/ieee802154/Kconfig
index 1a3c3e5..1dd5ab8 100644
--- a/drivers/net/ieee802154/Kconfig
+++ b/drivers/net/ieee802154/Kconfig
@@ -53,3 +53,13 @@ config IEEE802154_CC2520
This driver can also be built as a module. To do so, say M here.
the module will be called 'cc2520'.
+
+config IEEE802154_ATUSB
+ tristate "ATUSB transceiver driver"
+ depends on IEEE802154_DRIVERS && MAC802154 && USB
+ ---help---
+ Say Y here to enable the ATUSB IEEE 802.15.4 wireless
+ controller.
+
+ This driver can also be built as a module. To do so say M here.
+ The module will be called 'atusb'.
diff --git a/drivers/net/ieee802154/Makefile b/drivers/net/ieee802154/Makefile
index d77fa4d..cf1d2a6 100644
--- a/drivers/net/ieee802154/Makefile
+++ b/drivers/net/ieee802154/Makefile
@@ -2,3 +2,4 @@ obj-$(CONFIG_IEEE802154_FAKELB) += fakelb.o
obj-$(CONFIG_IEEE802154_AT86RF230) += at86rf230.o
obj-$(CONFIG_IEEE802154_MRF24J40) += mrf24j40.o
obj-$(CONFIG_IEEE802154_CC2520) += cc2520.o
+obj-$(CONFIG_IEEE802154_ATUSB) += atusb.o
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 67d00fb..2f25a5e 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -35,6 +35,8 @@
#include <net/mac802154.h>
#include <net/cfg802154.h>
+#include "at86rf230.h"
+
struct at86rf230_local;
/* at86rf2xx chip depend data.
* All timings are in us.
@@ -50,7 +52,7 @@ struct at86rf2xx_chip_data {
int rssi_base_val;
int (*set_channel)(struct at86rf230_local *, u8, u8);
- int (*get_desense_steps)(struct at86rf230_local *, s32);
+ int (*set_txpower)(struct at86rf230_local *, s32);
};
#define AT86RF2XX_MAX_BUF (127 + 3)
@@ -102,200 +104,6 @@ struct at86rf230_local {
struct at86rf230_state_change tx;
};
-#define RG_TRX_STATUS (0x01)
-#define SR_TRX_STATUS 0x01, 0x1f, 0
-#define SR_RESERVED_01_3 0x01, 0x20, 5
-#define SR_CCA_STATUS 0x01, 0x40, 6
-#define SR_CCA_DONE 0x01, 0x80, 7
-#define RG_TRX_STATE (0x02)
-#define SR_TRX_CMD 0x02, 0x1f, 0
-#define SR_TRAC_STATUS 0x02, 0xe0, 5
-#define RG_TRX_CTRL_0 (0x03)
-#define SR_CLKM_CTRL 0x03, 0x07, 0
-#define SR_CLKM_SHA_SEL 0x03, 0x08, 3
-#define SR_PAD_IO_CLKM 0x03, 0x30, 4
-#define SR_PAD_IO 0x03, 0xc0, 6
-#define RG_TRX_CTRL_1 (0x04)
-#define SR_IRQ_POLARITY 0x04, 0x01, 0
-#define SR_IRQ_MASK_MODE 0x04, 0x02, 1
-#define SR_SPI_CMD_MODE 0x04, 0x0c, 2
-#define SR_RX_BL_CTRL 0x04, 0x10, 4
-#define SR_TX_AUTO_CRC_ON 0x04, 0x20, 5
-#define SR_IRQ_2_EXT_EN 0x04, 0x40, 6
-#define SR_PA_EXT_EN 0x04, 0x80, 7
-#define RG_PHY_TX_PWR (0x05)
-#define SR_TX_PWR 0x05, 0x0f, 0
-#define SR_PA_LT 0x05, 0x30, 4
-#define SR_PA_BUF_LT 0x05, 0xc0, 6
-#define RG_PHY_RSSI (0x06)
-#define SR_RSSI 0x06, 0x1f, 0
-#define SR_RND_VALUE 0x06, 0x60, 5
-#define SR_RX_CRC_VALID 0x06, 0x80, 7
-#define RG_PHY_ED_LEVEL (0x07)
-#define SR_ED_LEVEL 0x07, 0xff, 0
-#define RG_PHY_CC_CCA (0x08)
-#define SR_CHANNEL 0x08, 0x1f, 0
-#define SR_CCA_MODE 0x08, 0x60, 5
-#define SR_CCA_REQUEST 0x08, 0x80, 7
-#define RG_CCA_THRES (0x09)
-#define SR_CCA_ED_THRES 0x09, 0x0f, 0
-#define SR_RESERVED_09_1 0x09, 0xf0, 4
-#define RG_RX_CTRL (0x0a)
-#define SR_PDT_THRES 0x0a, 0x0f, 0
-#define SR_RESERVED_0a_1 0x0a, 0xf0, 4
-#define RG_SFD_VALUE (0x0b)
-#define SR_SFD_VALUE 0x0b, 0xff, 0
-#define RG_TRX_CTRL_2 (0x0c)
-#define SR_OQPSK_DATA_RATE 0x0c, 0x03, 0
-#define SR_SUB_MODE 0x0c, 0x04, 2
-#define SR_BPSK_QPSK 0x0c, 0x08, 3
-#define SR_OQPSK_SUB1_RC_EN 0x0c, 0x10, 4
-#define SR_RESERVED_0c_5 0x0c, 0x60, 5
-#define SR_RX_SAFE_MODE 0x0c, 0x80, 7
-#define RG_ANT_DIV (0x0d)
-#define SR_ANT_CTRL 0x0d, 0x03, 0
-#define SR_ANT_EXT_SW_EN 0x0d, 0x04, 2
-#define SR_ANT_DIV_EN 0x0d, 0x08, 3
-#define SR_RESERVED_0d_2 0x0d, 0x70, 4
-#define SR_ANT_SEL 0x0d, 0x80, 7
-#define RG_IRQ_MASK (0x0e)
-#define SR_IRQ_MASK 0x0e, 0xff, 0
-#define RG_IRQ_STATUS (0x0f)
-#define SR_IRQ_0_PLL_LOCK 0x0f, 0x01, 0
-#define SR_IRQ_1_PLL_UNLOCK 0x0f, 0x02, 1
-#define SR_IRQ_2_RX_START 0x0f, 0x04, 2
-#define SR_IRQ_3_TRX_END 0x0f, 0x08, 3
-#define SR_IRQ_4_CCA_ED_DONE 0x0f, 0x10, 4
-#define SR_IRQ_5_AMI 0x0f, 0x20, 5
-#define SR_IRQ_6_TRX_UR 0x0f, 0x40, 6
-#define SR_IRQ_7_BAT_LOW 0x0f, 0x80, 7
-#define RG_VREG_CTRL (0x10)
-#define SR_RESERVED_10_6 0x10, 0x03, 0
-#define SR_DVDD_OK 0x10, 0x04, 2
-#define SR_DVREG_EXT 0x10, 0x08, 3
-#define SR_RESERVED_10_3 0x10, 0x30, 4
-#define SR_AVDD_OK 0x10, 0x40, 6
-#define SR_AVREG_EXT 0x10, 0x80, 7
-#define RG_BATMON (0x11)
-#define SR_BATMON_VTH 0x11, 0x0f, 0
-#define SR_BATMON_HR 0x11, 0x10, 4
-#define SR_BATMON_OK 0x11, 0x20, 5
-#define SR_RESERVED_11_1 0x11, 0xc0, 6
-#define RG_XOSC_CTRL (0x12)
-#define SR_XTAL_TRIM 0x12, 0x0f, 0
-#define SR_XTAL_MODE 0x12, 0xf0, 4
-#define RG_RX_SYN (0x15)
-#define SR_RX_PDT_LEVEL 0x15, 0x0f, 0
-#define SR_RESERVED_15_2 0x15, 0x70, 4
-#define SR_RX_PDT_DIS 0x15, 0x80, 7
-#define RG_XAH_CTRL_1 (0x17)
-#define SR_RESERVED_17_8 0x17, 0x01, 0
-#define SR_AACK_PROM_MODE 0x17, 0x02, 1
-#define SR_AACK_ACK_TIME 0x17, 0x04, 2
-#define SR_RESERVED_17_5 0x17, 0x08, 3
-#define SR_AACK_UPLD_RES_FT 0x17, 0x10, 4
-#define SR_AACK_FLTR_RES_FT 0x17, 0x20, 5
-#define SR_CSMA_LBT_MODE 0x17, 0x40, 6
-#define SR_RESERVED_17_1 0x17, 0x80, 7
-#define RG_FTN_CTRL (0x18)
-#define SR_RESERVED_18_2 0x18, 0x7f, 0
-#define SR_FTN_START 0x18, 0x80, 7
-#define RG_PLL_CF (0x1a)
-#define SR_RESERVED_1a_2 0x1a, 0x7f, 0
-#define SR_PLL_CF_START 0x1a, 0x80, 7
-#define RG_PLL_DCU (0x1b)
-#define SR_RESERVED_1b_3 0x1b, 0x3f, 0
-#define SR_RESERVED_1b_2 0x1b, 0x40, 6
-#define SR_PLL_DCU_START 0x1b, 0x80, 7
-#define RG_PART_NUM (0x1c)
-#define SR_PART_NUM 0x1c, 0xff, 0
-#define RG_VERSION_NUM (0x1d)
-#define SR_VERSION_NUM 0x1d, 0xff, 0
-#define RG_MAN_ID_0 (0x1e)
-#define SR_MAN_ID_0 0x1e, 0xff, 0
-#define RG_MAN_ID_1 (0x1f)
-#define SR_MAN_ID_1 0x1f, 0xff, 0
-#define RG_SHORT_ADDR_0 (0x20)
-#define SR_SHORT_ADDR_0 0x20, 0xff, 0
-#define RG_SHORT_ADDR_1 (0x21)
-#define SR_SHORT_ADDR_1 0x21, 0xff, 0
-#define RG_PAN_ID_0 (0x22)
-#define SR_PAN_ID_0 0x22, 0xff, 0
-#define RG_PAN_ID_1 (0x23)
-#define SR_PAN_ID_1 0x23, 0xff, 0
-#define RG_IEEE_ADDR_0 (0x24)
-#define SR_IEEE_ADDR_0 0x24, 0xff, 0
-#define RG_IEEE_ADDR_1 (0x25)
-#define SR_IEEE_ADDR_1 0x25, 0xff, 0
-#define RG_IEEE_ADDR_2 (0x26)
-#define SR_IEEE_ADDR_2 0x26, 0xff, 0
-#define RG_IEEE_ADDR_3 (0x27)
-#define SR_IEEE_ADDR_3 0x27, 0xff, 0
-#define RG_IEEE_ADDR_4 (0x28)
-#define SR_IEEE_ADDR_4 0x28, 0xff, 0
-#define RG_IEEE_ADDR_5 (0x29)
-#define SR_IEEE_ADDR_5 0x29, 0xff, 0
-#define RG_IEEE_ADDR_6 (0x2a)
-#define SR_IEEE_ADDR_6 0x2a, 0xff, 0
-#define RG_IEEE_ADDR_7 (0x2b)
-#define SR_IEEE_ADDR_7 0x2b, 0xff, 0
-#define RG_XAH_CTRL_0 (0x2c)
-#define SR_SLOTTED_OPERATION 0x2c, 0x01, 0
-#define SR_MAX_CSMA_RETRIES 0x2c, 0x0e, 1
-#define SR_MAX_FRAME_RETRIES 0x2c, 0xf0, 4
-#define RG_CSMA_SEED_0 (0x2d)
-#define SR_CSMA_SEED_0 0x2d, 0xff, 0
-#define RG_CSMA_SEED_1 (0x2e)
-#define SR_CSMA_SEED_1 0x2e, 0x07, 0
-#define SR_AACK_I_AM_COORD 0x2e, 0x08, 3
-#define SR_AACK_DIS_ACK 0x2e, 0x10, 4
-#define SR_AACK_SET_PD 0x2e, 0x20, 5
-#define SR_AACK_FVN_MODE 0x2e, 0xc0, 6
-#define RG_CSMA_BE (0x2f)
-#define SR_MIN_BE 0x2f, 0x0f, 0
-#define SR_MAX_BE 0x2f, 0xf0, 4
-
-#define CMD_REG 0x80
-#define CMD_REG_MASK 0x3f
-#define CMD_WRITE 0x40
-#define CMD_FB 0x20
-
-#define IRQ_BAT_LOW (1 << 7)
-#define IRQ_TRX_UR (1 << 6)
-#define IRQ_AMI (1 << 5)
-#define IRQ_CCA_ED (1 << 4)
-#define IRQ_TRX_END (1 << 3)
-#define IRQ_RX_START (1 << 2)
-#define IRQ_PLL_UNL (1 << 1)
-#define IRQ_PLL_LOCK (1 << 0)
-
-#define IRQ_ACTIVE_HIGH 0
-#define IRQ_ACTIVE_LOW 1
-
-#define STATE_P_ON 0x00 /* BUSY */
-#define STATE_BUSY_RX 0x01
-#define STATE_BUSY_TX 0x02
-#define STATE_FORCE_TRX_OFF 0x03
-#define STATE_FORCE_TX_ON 0x04 /* IDLE */
-/* 0x05 */ /* INVALID_PARAMETER */
-#define STATE_RX_ON 0x06
-/* 0x07 */ /* SUCCESS */
-#define STATE_TRX_OFF 0x08
-#define STATE_TX_ON 0x09
-/* 0x0a - 0x0e */ /* 0x0a - UNSUPPORTED_ATTRIBUTE */
-#define STATE_SLEEP 0x0F
-#define STATE_PREP_DEEP_SLEEP 0x10
-#define STATE_BUSY_RX_AACK 0x11
-#define STATE_BUSY_TX_ARET 0x12
-#define STATE_RX_AACK_ON 0x16
-#define STATE_TX_ARET_ON 0x19
-#define STATE_RX_ON_NOCLK 0x1C
-#define STATE_RX_AACK_ON_NOCLK 0x1D
-#define STATE_BUSY_RX_AACK_NOCLK 0x1E
-#define STATE_TRANSITION_IN_PROGRESS 0x1F
-
-#define TRX_STATE_MASK (0x1F)
-
#define AT86RF2XX_NUMREGS 0x3F
static void
@@ -1010,7 +818,7 @@ at86rf230_xmit_start(void *context)
if (lp->is_tx_from_off) {
lp->is_tx_from_off = false;
at86rf230_async_state_change(lp, ctx, STATE_TX_ARET_ON,
- at86rf230_xmit_tx_on,
+ at86rf230_write_frame,
false);
} else {
at86rf230_async_state_change(lp, ctx, STATE_TX_ON,
@@ -1076,6 +884,50 @@ at86rf23x_set_channel(struct at86rf230_local *lp, u8 page, u8 channel)
return at86rf230_write_subreg(lp, SR_CHANNEL, channel);
}
+#define AT86RF2XX_MAX_ED_LEVELS 0xF
+static const s32 at86rf23x_ed_levels[AT86RF2XX_MAX_ED_LEVELS + 1] = {
+ -9100, -8900, -8700, -8500, -8300, -8100, -7900, -7700, -7500, -7300,
+ -7100, -6900, -6700, -6500, -6300, -6100,
+};
+
+static const s32 at86rf212_ed_levels_100[AT86RF2XX_MAX_ED_LEVELS + 1] = {
+ -10000, -9800, -9600, -9400, -9200, -9000, -8800, -8600, -8400, -8200,
+ -8000, -7800, -7600, -7400, -7200, -7000,
+};
+
+static const s32 at86rf212_ed_levels_98[AT86RF2XX_MAX_ED_LEVELS + 1] = {
+ -9800, -9600, -9400, -9200, -9000, -8800, -8600, -8400, -8200, -8000,
+ -7800, -7600, -7400, -7200, -7000, -6800,
+};
+
+static inline int
+at86rf212_update_cca_ed_level(struct at86rf230_local *lp, int rssi_base_val)
+{
+ unsigned int cca_ed_thres;
+ int rc;
+
+ rc = at86rf230_read_subreg(lp, SR_CCA_ED_THRES, &cca_ed_thres);
+ if (rc < 0)
+ return rc;
+
+ switch (rssi_base_val) {
+ case -98:
+ lp->hw->phy->supported.cca_ed_levels = at86rf212_ed_levels_98;
+ lp->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(at86rf212_ed_levels_98);
+ lp->hw->phy->cca_ed_level = at86rf212_ed_levels_98[cca_ed_thres];
+ break;
+ case -100:
+ lp->hw->phy->supported.cca_ed_levels = at86rf212_ed_levels_100;
+ lp->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(at86rf212_ed_levels_100);
+ lp->hw->phy->cca_ed_level = at86rf212_ed_levels_100[cca_ed_thres];
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ return 0;
+}
+
static int
at86rf212_set_channel(struct at86rf230_local *lp, u8 page, u8 channel)
{
@@ -1098,6 +950,10 @@ at86rf212_set_channel(struct at86rf230_local *lp, u8 page, u8 channel)
if (rc < 0)
return rc;
+ rc = at86rf212_update_cca_ed_level(lp, lp->data->rssi_base_val);
+ if (rc < 0)
+ return rc;
+
/* This sets the symbol_duration according frequency on the 212.
* TODO move this handling while set channel and page in cfg802154.
* We can do that, this timings are according 802.15.4 standard.
@@ -1193,23 +1049,56 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw,
return 0;
}
+#define AT86RF23X_MAX_TX_POWERS 0xF
+static const s32 at86rf233_powers[AT86RF23X_MAX_TX_POWERS + 1] = {
+ 400, 370, 340, 300, 250, 200, 100, 0, -100, -200, -300, -400, -600,
+ -800, -1200, -1700,
+};
+
+static const s32 at86rf231_powers[AT86RF23X_MAX_TX_POWERS + 1] = {
+ 300, 280, 230, 180, 130, 70, 0, -100, -200, -300, -400, -500, -700,
+ -900, -1200, -1700,
+};
+
+#define AT86RF212_MAX_TX_POWERS 0x1F
+static const s32 at86rf212_powers[AT86RF212_MAX_TX_POWERS + 1] = {
+ 500, 400, 300, 200, 100, 0, -100, -200, -300, -400, -500, -600, -700,
+ -800, -900, -1000, -1100, -1200, -1300, -1400, -1500, -1600, -1700,
+ -1800, -1900, -2000, -2100, -2200, -2300, -2400, -2500, -2600,
+};
+
+static int
+at86rf23x_set_txpower(struct at86rf230_local *lp, s32 mbm)
+{
+ u32 i;
+
+ for (i = 0; i < lp->hw->phy->supported.tx_powers_size; i++) {
+ if (lp->hw->phy->supported.tx_powers[i] == mbm)
+ return at86rf230_write_subreg(lp, SR_TX_PWR_23X, i);
+ }
+
+ return -EINVAL;
+}
+
static int
-at86rf230_set_txpower(struct ieee802154_hw *hw, s8 db)
+at86rf212_set_txpower(struct at86rf230_local *lp, s32 mbm)
{
- struct at86rf230_local *lp = hw->priv;
+ u32 i;
- /* typical maximum output is 5dBm with RG_PHY_TX_PWR 0x60, lower five
- * bits decrease power in 1dB steps. 0x60 represents extra PA gain of
- * 0dB.
- * thus, supported values for db range from -26 to 5, for 31dB of
- * reduction to 0dB of reduction.
- */
- if (db > 5 || db < -26)
- return -EINVAL;
+ for (i = 0; i < lp->hw->phy->supported.tx_powers_size; i++) {
+ if (lp->hw->phy->supported.tx_powers[i] == mbm)
+ return at86rf230_write_subreg(lp, SR_TX_PWR_212, i);
+ }
- db = -(db - 5);
+ return -EINVAL;
+}
+
+static int
+at86rf230_set_txpower(struct ieee802154_hw *hw, s32 mbm)
+{
+ struct at86rf230_local *lp = hw->priv;
- return __at86rf230_write(lp, RG_PHY_TX_PWR, 0x60 | db);
+ return lp->data->set_txpower(lp, mbm);
}
static int
@@ -1254,28 +1143,19 @@ at86rf230_set_cca_mode(struct ieee802154_hw *hw,
return at86rf230_write_subreg(lp, SR_CCA_MODE, val);
}
-static int
-at86rf212_get_desens_steps(struct at86rf230_local *lp, s32 level)
-{
- return (level - lp->data->rssi_base_val) * 100 / 207;
-}
-
-static int
-at86rf23x_get_desens_steps(struct at86rf230_local *lp, s32 level)
-{
- return (level - lp->data->rssi_base_val) / 2;
-}
static int
-at86rf230_set_cca_ed_level(struct ieee802154_hw *hw, s32 level)
+at86rf230_set_cca_ed_level(struct ieee802154_hw *hw, s32 mbm)
{
struct at86rf230_local *lp = hw->priv;
+ u32 i;
- if (level < lp->data->rssi_base_val || level > 30)
- return -EINVAL;
+ for (i = 0; i < hw->phy->supported.cca_ed_levels_size; i++) {
+ if (hw->phy->supported.cca_ed_levels[i] == mbm)
+ return at86rf230_write_subreg(lp, SR_CCA_ED_THRES, i);
+ }
- return at86rf230_write_subreg(lp, SR_CCA_ED_THRES,
- lp->data->get_desense_steps(lp, level));
+ return -EINVAL;
}
static int
@@ -1365,7 +1245,7 @@ static struct at86rf2xx_chip_data at86rf233_data = {
.t_p_ack = 545,
.rssi_base_val = -91,
.set_channel = at86rf23x_set_channel,
- .get_desense_steps = at86rf23x_get_desens_steps
+ .set_txpower = at86rf23x_set_txpower,
};
static struct at86rf2xx_chip_data at86rf231_data = {
@@ -1378,7 +1258,7 @@ static struct at86rf2xx_chip_data at86rf231_data = {
.t_p_ack = 545,
.rssi_base_val = -91,
.set_channel = at86rf23x_set_channel,
- .get_desense_steps = at86rf23x_get_desens_steps
+ .set_txpower = at86rf23x_set_txpower,
};
static struct at86rf2xx_chip_data at86rf212_data = {
@@ -1391,7 +1271,7 @@ static struct at86rf2xx_chip_data at86rf212_data = {
.t_p_ack = 545,
.rssi_base_val = -100,
.set_channel = at86rf212_set_channel,
- .get_desense_steps = at86rf212_get_desens_steps
+ .set_txpower = at86rf212_set_txpower,
};
static int at86rf230_hw_init(struct at86rf230_local *lp, u8 xtal_trim)
@@ -1564,8 +1444,21 @@ at86rf230_detect_device(struct at86rf230_local *lp)
}
lp->hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AACK |
- IEEE802154_HW_TXPOWER | IEEE802154_HW_ARET |
- IEEE802154_HW_AFILT | IEEE802154_HW_PROMISCUOUS;
+ IEEE802154_HW_CSMA_PARAMS |
+ IEEE802154_HW_FRAME_RETRIES | IEEE802154_HW_AFILT |
+ IEEE802154_HW_PROMISCUOUS;
+
+ lp->hw->phy->flags = WPAN_PHY_FLAG_TXPOWER |
+ WPAN_PHY_FLAG_CCA_ED_LEVEL |
+ WPAN_PHY_FLAG_CCA_MODE;
+
+ lp->hw->phy->supported.cca_modes = BIT(NL802154_CCA_ENERGY) |
+ BIT(NL802154_CCA_CARRIER) | BIT(NL802154_CCA_ENERGY_CARRIER);
+ lp->hw->phy->supported.cca_opts = BIT(NL802154_CCA_OPT_ENERGY_CARRIER_AND) |
+ BIT(NL802154_CCA_OPT_ENERGY_CARRIER_OR);
+
+ lp->hw->phy->supported.cca_ed_levels = at86rf23x_ed_levels;
+ lp->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(at86rf23x_ed_levels);
lp->hw->phy->cca.mode = NL802154_CCA_ENERGY;
@@ -1573,36 +1466,49 @@ at86rf230_detect_device(struct at86rf230_local *lp)
case 2:
chip = "at86rf230";
rc = -ENOTSUPP;
- break;
+ goto not_supp;
case 3:
chip = "at86rf231";
lp->data = &at86rf231_data;
- lp->hw->phy->channels_supported[0] = 0x7FFF800;
+ lp->hw->phy->supported.channels[0] = 0x7FFF800;
lp->hw->phy->current_channel = 11;
lp->hw->phy->symbol_duration = 16;
+ lp->hw->phy->supported.tx_powers = at86rf231_powers;
+ lp->hw->phy->supported.tx_powers_size = ARRAY_SIZE(at86rf231_powers);
break;
case 7:
chip = "at86rf212";
lp->data = &at86rf212_data;
lp->hw->flags |= IEEE802154_HW_LBT;
- lp->hw->phy->channels_supported[0] = 0x00007FF;
- lp->hw->phy->channels_supported[2] = 0x00007FF;
+ lp->hw->phy->supported.channels[0] = 0x00007FF;
+ lp->hw->phy->supported.channels[2] = 0x00007FF;
lp->hw->phy->current_channel = 5;
lp->hw->phy->symbol_duration = 25;
+ lp->hw->phy->supported.lbt = NL802154_SUPPORTED_BOOL_BOTH;
+ lp->hw->phy->supported.tx_powers = at86rf212_powers;
+ lp->hw->phy->supported.tx_powers_size = ARRAY_SIZE(at86rf212_powers);
+ lp->hw->phy->supported.cca_ed_levels = at86rf212_ed_levels_100;
+ lp->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(at86rf212_ed_levels_100);
break;
case 11:
chip = "at86rf233";
lp->data = &at86rf233_data;
- lp->hw->phy->channels_supported[0] = 0x7FFF800;
+ lp->hw->phy->supported.channels[0] = 0x7FFF800;
lp->hw->phy->current_channel = 13;
lp->hw->phy->symbol_duration = 16;
+ lp->hw->phy->supported.tx_powers = at86rf233_powers;
+ lp->hw->phy->supported.tx_powers_size = ARRAY_SIZE(at86rf233_powers);
break;
default:
chip = "unknown";
rc = -ENOTSUPP;
- break;
+ goto not_supp;
}
+ lp->hw->phy->cca_ed_level = lp->hw->phy->supported.cca_ed_levels[7];
+ lp->hw->phy->transmit_power = lp->hw->phy->supported.tx_powers[0];
+
+not_supp:
dev_info(&lp->spi->dev, "Detected %s chip version %d\n", chip, version);
return rc;
diff --git a/drivers/net/ieee802154/at86rf230.h b/drivers/net/ieee802154/at86rf230.h
new file mode 100644
index 0000000..1e6d1cc
--- /dev/null
+++ b/drivers/net/ieee802154/at86rf230.h
@@ -0,0 +1,220 @@
+/*
+ * AT86RF230/RF231 driver
+ *
+ * Copyright (C) 2009-2012 Siemens AG
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Written by:
+ * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+ * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
+ */
+
+#ifndef _AT86RF230_H
+#define _AT86RF230_H
+
+#define RG_TRX_STATUS (0x01)
+#define SR_TRX_STATUS 0x01, 0x1f, 0
+#define SR_RESERVED_01_3 0x01, 0x20, 5
+#define SR_CCA_STATUS 0x01, 0x40, 6
+#define SR_CCA_DONE 0x01, 0x80, 7
+#define RG_TRX_STATE (0x02)
+#define SR_TRX_CMD 0x02, 0x1f, 0
+#define SR_TRAC_STATUS 0x02, 0xe0, 5
+#define RG_TRX_CTRL_0 (0x03)
+#define SR_CLKM_CTRL 0x03, 0x07, 0
+#define SR_CLKM_SHA_SEL 0x03, 0x08, 3
+#define SR_PAD_IO_CLKM 0x03, 0x30, 4
+#define SR_PAD_IO 0x03, 0xc0, 6
+#define RG_TRX_CTRL_1 (0x04)
+#define SR_IRQ_POLARITY 0x04, 0x01, 0
+#define SR_IRQ_MASK_MODE 0x04, 0x02, 1
+#define SR_SPI_CMD_MODE 0x04, 0x0c, 2
+#define SR_RX_BL_CTRL 0x04, 0x10, 4
+#define SR_TX_AUTO_CRC_ON 0x04, 0x20, 5
+#define SR_IRQ_2_EXT_EN 0x04, 0x40, 6
+#define SR_PA_EXT_EN 0x04, 0x80, 7
+#define RG_PHY_TX_PWR (0x05)
+#define SR_TX_PWR_23X 0x05, 0x0f, 0
+#define SR_PA_LT_230 0x05, 0x30, 4
+#define SR_PA_BUF_LT_230 0x05, 0xc0, 6
+#define SR_TX_PWR_212 0x05, 0x1f, 0
+#define SR_GC_PA_212 0x05, 0x60, 5
+#define SR_PA_BOOST_LT_212 0x05, 0x80, 7
+#define RG_PHY_RSSI (0x06)
+#define SR_RSSI 0x06, 0x1f, 0
+#define SR_RND_VALUE 0x06, 0x60, 5
+#define SR_RX_CRC_VALID 0x06, 0x80, 7
+#define RG_PHY_ED_LEVEL (0x07)
+#define SR_ED_LEVEL 0x07, 0xff, 0
+#define RG_PHY_CC_CCA (0x08)
+#define SR_CHANNEL 0x08, 0x1f, 0
+#define SR_CCA_MODE 0x08, 0x60, 5
+#define SR_CCA_REQUEST 0x08, 0x80, 7
+#define RG_CCA_THRES (0x09)
+#define SR_CCA_ED_THRES 0x09, 0x0f, 0
+#define SR_RESERVED_09_1 0x09, 0xf0, 4
+#define RG_RX_CTRL (0x0a)
+#define SR_PDT_THRES 0x0a, 0x0f, 0
+#define SR_RESERVED_0a_1 0x0a, 0xf0, 4
+#define RG_SFD_VALUE (0x0b)
+#define SR_SFD_VALUE 0x0b, 0xff, 0
+#define RG_TRX_CTRL_2 (0x0c)
+#define SR_OQPSK_DATA_RATE 0x0c, 0x03, 0
+#define SR_SUB_MODE 0x0c, 0x04, 2
+#define SR_BPSK_QPSK 0x0c, 0x08, 3
+#define SR_OQPSK_SUB1_RC_EN 0x0c, 0x10, 4
+#define SR_RESERVED_0c_5 0x0c, 0x60, 5
+#define SR_RX_SAFE_MODE 0x0c, 0x80, 7
+#define RG_ANT_DIV (0x0d)
+#define SR_ANT_CTRL 0x0d, 0x03, 0
+#define SR_ANT_EXT_SW_EN 0x0d, 0x04, 2
+#define SR_ANT_DIV_EN 0x0d, 0x08, 3
+#define SR_RESERVED_0d_2 0x0d, 0x70, 4
+#define SR_ANT_SEL 0x0d, 0x80, 7
+#define RG_IRQ_MASK (0x0e)
+#define SR_IRQ_MASK 0x0e, 0xff, 0
+#define RG_IRQ_STATUS (0x0f)
+#define SR_IRQ_0_PLL_LOCK 0x0f, 0x01, 0
+#define SR_IRQ_1_PLL_UNLOCK 0x0f, 0x02, 1
+#define SR_IRQ_2_RX_START 0x0f, 0x04, 2
+#define SR_IRQ_3_TRX_END 0x0f, 0x08, 3
+#define SR_IRQ_4_CCA_ED_DONE 0x0f, 0x10, 4
+#define SR_IRQ_5_AMI 0x0f, 0x20, 5
+#define SR_IRQ_6_TRX_UR 0x0f, 0x40, 6
+#define SR_IRQ_7_BAT_LOW 0x0f, 0x80, 7
+#define RG_VREG_CTRL (0x10)
+#define SR_RESERVED_10_6 0x10, 0x03, 0
+#define SR_DVDD_OK 0x10, 0x04, 2
+#define SR_DVREG_EXT 0x10, 0x08, 3
+#define SR_RESERVED_10_3 0x10, 0x30, 4
+#define SR_AVDD_OK 0x10, 0x40, 6
+#define SR_AVREG_EXT 0x10, 0x80, 7
+#define RG_BATMON (0x11)
+#define SR_BATMON_VTH 0x11, 0x0f, 0
+#define SR_BATMON_HR 0x11, 0x10, 4
+#define SR_BATMON_OK 0x11, 0x20, 5
+#define SR_RESERVED_11_1 0x11, 0xc0, 6
+#define RG_XOSC_CTRL (0x12)
+#define SR_XTAL_TRIM 0x12, 0x0f, 0
+#define SR_XTAL_MODE 0x12, 0xf0, 4
+#define RG_RX_SYN (0x15)
+#define SR_RX_PDT_LEVEL 0x15, 0x0f, 0
+#define SR_RESERVED_15_2 0x15, 0x70, 4
+#define SR_RX_PDT_DIS 0x15, 0x80, 7
+#define RG_XAH_CTRL_1 (0x17)
+#define SR_RESERVED_17_8 0x17, 0x01, 0
+#define SR_AACK_PROM_MODE 0x17, 0x02, 1
+#define SR_AACK_ACK_TIME 0x17, 0x04, 2
+#define SR_RESERVED_17_5 0x17, 0x08, 3
+#define SR_AACK_UPLD_RES_FT 0x17, 0x10, 4
+#define SR_AACK_FLTR_RES_FT 0x17, 0x20, 5
+#define SR_CSMA_LBT_MODE 0x17, 0x40, 6
+#define SR_RESERVED_17_1 0x17, 0x80, 7
+#define RG_FTN_CTRL (0x18)
+#define SR_RESERVED_18_2 0x18, 0x7f, 0
+#define SR_FTN_START 0x18, 0x80, 7
+#define RG_PLL_CF (0x1a)
+#define SR_RESERVED_1a_2 0x1a, 0x7f, 0
+#define SR_PLL_CF_START 0x1a, 0x80, 7
+#define RG_PLL_DCU (0x1b)
+#define SR_RESERVED_1b_3 0x1b, 0x3f, 0
+#define SR_RESERVED_1b_2 0x1b, 0x40, 6
+#define SR_PLL_DCU_START 0x1b, 0x80, 7
+#define RG_PART_NUM (0x1c)
+#define SR_PART_NUM 0x1c, 0xff, 0
+#define RG_VERSION_NUM (0x1d)
+#define SR_VERSION_NUM 0x1d, 0xff, 0
+#define RG_MAN_ID_0 (0x1e)
+#define SR_MAN_ID_0 0x1e, 0xff, 0
+#define RG_MAN_ID_1 (0x1f)
+#define SR_MAN_ID_1 0x1f, 0xff, 0
+#define RG_SHORT_ADDR_0 (0x20)
+#define SR_SHORT_ADDR_0 0x20, 0xff, 0
+#define RG_SHORT_ADDR_1 (0x21)
+#define SR_SHORT_ADDR_1 0x21, 0xff, 0
+#define RG_PAN_ID_0 (0x22)
+#define SR_PAN_ID_0 0x22, 0xff, 0
+#define RG_PAN_ID_1 (0x23)
+#define SR_PAN_ID_1 0x23, 0xff, 0
+#define RG_IEEE_ADDR_0 (0x24)
+#define SR_IEEE_ADDR_0 0x24, 0xff, 0
+#define RG_IEEE_ADDR_1 (0x25)
+#define SR_IEEE_ADDR_1 0x25, 0xff, 0
+#define RG_IEEE_ADDR_2 (0x26)
+#define SR_IEEE_ADDR_2 0x26, 0xff, 0
+#define RG_IEEE_ADDR_3 (0x27)
+#define SR_IEEE_ADDR_3 0x27, 0xff, 0
+#define RG_IEEE_ADDR_4 (0x28)
+#define SR_IEEE_ADDR_4 0x28, 0xff, 0
+#define RG_IEEE_ADDR_5 (0x29)
+#define SR_IEEE_ADDR_5 0x29, 0xff, 0
+#define RG_IEEE_ADDR_6 (0x2a)
+#define SR_IEEE_ADDR_6 0x2a, 0xff, 0
+#define RG_IEEE_ADDR_7 (0x2b)
+#define SR_IEEE_ADDR_7 0x2b, 0xff, 0
+#define RG_XAH_CTRL_0 (0x2c)
+#define SR_SLOTTED_OPERATION 0x2c, 0x01, 0
+#define SR_MAX_CSMA_RETRIES 0x2c, 0x0e, 1
+#define SR_MAX_FRAME_RETRIES 0x2c, 0xf0, 4
+#define RG_CSMA_SEED_0 (0x2d)
+#define SR_CSMA_SEED_0 0x2d, 0xff, 0
+#define RG_CSMA_SEED_1 (0x2e)
+#define SR_CSMA_SEED_1 0x2e, 0x07, 0
+#define SR_AACK_I_AM_COORD 0x2e, 0x08, 3
+#define SR_AACK_DIS_ACK 0x2e, 0x10, 4
+#define SR_AACK_SET_PD 0x2e, 0x20, 5
+#define SR_AACK_FVN_MODE 0x2e, 0xc0, 6
+#define RG_CSMA_BE (0x2f)
+#define SR_MIN_BE 0x2f, 0x0f, 0
+#define SR_MAX_BE 0x2f, 0xf0, 4
+
+#define CMD_REG 0x80
+#define CMD_REG_MASK 0x3f
+#define CMD_WRITE 0x40
+#define CMD_FB 0x20
+
+#define IRQ_BAT_LOW BIT(7)
+#define IRQ_TRX_UR BIT(6)
+#define IRQ_AMI BIT(5)
+#define IRQ_CCA_ED BIT(4)
+#define IRQ_TRX_END BIT(3)
+#define IRQ_RX_START BIT(2)
+#define IRQ_PLL_UNL BIT(1)
+#define IRQ_PLL_LOCK BIT(0)
+
+#define IRQ_ACTIVE_HIGH 0
+#define IRQ_ACTIVE_LOW 1
+
+#define STATE_P_ON 0x00 /* BUSY */
+#define STATE_BUSY_RX 0x01
+#define STATE_BUSY_TX 0x02
+#define STATE_FORCE_TRX_OFF 0x03
+#define STATE_FORCE_TX_ON 0x04 /* IDLE */
+/* 0x05 */ /* INVALID_PARAMETER */
+#define STATE_RX_ON 0x06
+/* 0x07 */ /* SUCCESS */
+#define STATE_TRX_OFF 0x08
+#define STATE_TX_ON 0x09
+/* 0x0a - 0x0e */ /* 0x0a - UNSUPPORTED_ATTRIBUTE */
+#define STATE_SLEEP 0x0F
+#define STATE_PREP_DEEP_SLEEP 0x10
+#define STATE_BUSY_RX_AACK 0x11
+#define STATE_BUSY_TX_ARET 0x12
+#define STATE_RX_AACK_ON 0x16
+#define STATE_TX_ARET_ON 0x19
+#define STATE_RX_ON_NOCLK 0x1C
+#define STATE_RX_AACK_ON_NOCLK 0x1D
+#define STATE_BUSY_RX_AACK_NOCLK 0x1E
+#define STATE_TRANSITION_IN_PROGRESS 0x1F
+
+#define TRX_STATE_MASK (0x1F)
+
+#endif /* !_AT86RF230_H */
diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c
new file mode 100644
index 0000000..5b6bb9a
--- /dev/null
+++ b/drivers/net/ieee802154/atusb.c
@@ -0,0 +1,699 @@
+/*
+ * atusb.c - Driver for the ATUSB IEEE 802.15.4 dongle
+ *
+ * Written 2013 by Werner Almesberger <werner@almesberger.net>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2
+ *
+ * Based on at86rf230.c and spi_atusb.c.
+ * at86rf230.c is
+ * Copyright (C) 2009 Siemens AG
+ * Written by: Dmitry Eremin-Solenikov <dmitry.baryshkov@siemens.com>
+ *
+ * spi_atusb.c is
+ * Copyright (c) 2011 Richard Sharpe <realrichardsharpe@gmail.com>
+ * Copyright (c) 2011 Stefan Schmidt <stefan@datenfreihafen.org>
+ * Copyright (c) 2011 Werner Almesberger <werner@almesberger.net>
+ *
+ * USB initialization is
+ * Copyright (c) 2013 Alexander Aring <alex.aring@gmail.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/usb.h>
+#include <linux/skbuff.h>
+
+#include <net/cfg802154.h>
+#include <net/mac802154.h>
+
+#include "at86rf230.h"
+#include "atusb.h"
+
+#define ATUSB_JEDEC_ATMEL 0x1f /* JEDEC manufacturer ID */
+
+#define ATUSB_NUM_RX_URBS 4 /* allow for a bit of local latency */
+#define ATUSB_ALLOC_DELAY_MS 100 /* delay after failed allocation */
+#define ATUSB_TX_TIMEOUT_MS 200 /* on the air timeout */
+
+struct atusb {
+ struct ieee802154_hw *hw;
+ struct usb_device *usb_dev;
+ int shutdown; /* non-zero if shutting down */
+ int err; /* set by first error */
+
+ /* RX variables */
+ struct delayed_work work; /* memory allocations */
+ struct usb_anchor idle_urbs; /* URBs waiting to be submitted */
+ struct usb_anchor rx_urbs; /* URBs waiting for reception */
+
+ /* TX variables */
+ struct usb_ctrlrequest tx_dr;
+ struct urb *tx_urb;
+ struct sk_buff *tx_skb;
+ uint8_t tx_ack_seq; /* current TX ACK sequence number */
+};
+
+/* at86rf230.h defines values as <reg, mask, shift> tuples. We use the more
+ * traditional style of having registers and or-able values. SR_REG extracts
+ * the register number. SR_VALUE uses the shift to prepare a value accordingly.
+ */
+
+#define __SR_REG(reg, mask, shift) (reg)
+#define SR_REG(sr) __SR_REG(sr)
+
+#define __SR_VALUE(reg, mask, shift, val) ((val) << (shift))
+#define SR_VALUE(sr, val) __SR_VALUE(sr, (val))
+
+/* ----- USB commands without data ----------------------------------------- */
+
+/* To reduce the number of error checks in the code, we record the first error
+ * in atusb->err and reject all subsequent requests until the error is cleared.
+ */
+
+static int atusb_control_msg(struct atusb *atusb, unsigned int pipe,
+ __u8 request, __u8 requesttype,
+ __u16 value, __u16 index,
+ void *data, __u16 size, int timeout)
+{
+ struct usb_device *usb_dev = atusb->usb_dev;
+ int ret;
+
+ if (atusb->err)
+ return atusb->err;
+
+ ret = usb_control_msg(usb_dev, pipe, request, requesttype,
+ value, index, data, size, timeout);
+ if (ret < 0) {
+ atusb->err = ret;
+ dev_err(&usb_dev->dev,
+ "atusb_control_msg: req 0x%02x val 0x%x idx 0x%x, error %d\n",
+ request, value, index, ret);
+ }
+ return ret;
+}
+
+static int atusb_command(struct atusb *atusb, uint8_t cmd, uint8_t arg)
+{
+ struct usb_device *usb_dev = atusb->usb_dev;
+
+ dev_dbg(&usb_dev->dev, "atusb_command: cmd = 0x%x\n", cmd);
+ return atusb_control_msg(atusb, usb_sndctrlpipe(usb_dev, 0),
+ cmd, ATUSB_REQ_TO_DEV, arg, 0, NULL, 0, 1000);
+}
+
+static int atusb_write_reg(struct atusb *atusb, uint8_t reg, uint8_t value)
+{
+ struct usb_device *usb_dev = atusb->usb_dev;
+
+ dev_dbg(&usb_dev->dev, "atusb_write_reg: 0x%02x <- 0x%02x\n",
+ reg, value);
+ return atusb_control_msg(atusb, usb_sndctrlpipe(usb_dev, 0),
+ ATUSB_REG_WRITE, ATUSB_REQ_TO_DEV,
+ value, reg, NULL, 0, 1000);
+}
+
+static int atusb_read_reg(struct atusb *atusb, uint8_t reg)
+{
+ struct usb_device *usb_dev = atusb->usb_dev;
+ int ret;
+ uint8_t value;
+
+ dev_dbg(&usb_dev->dev, "atusb: reg = 0x%x\n", reg);
+ ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
+ ATUSB_REG_READ, ATUSB_REQ_FROM_DEV,
+ 0, reg, &value, 1, 1000);
+ return ret >= 0 ? value : ret;
+}
+
+static int atusb_get_and_clear_error(struct atusb *atusb)
+{
+ int err = atusb->err;
+
+ atusb->err = 0;
+ return err;
+}
+
+/* ----- skb allocation ---------------------------------------------------- */
+
+#define MAX_PSDU 127
+#define MAX_RX_XFER (1 + MAX_PSDU + 2 + 1) /* PHR+PSDU+CRC+LQI */
+
+#define SKB_ATUSB(skb) (*(struct atusb **)(skb)->cb)
+
+static void atusb_in(struct urb *urb);
+
+static int atusb_submit_rx_urb(struct atusb *atusb, struct urb *urb)
+{
+ struct usb_device *usb_dev = atusb->usb_dev;
+ struct sk_buff *skb = urb->context;
+ int ret;
+
+ if (!skb) {
+ skb = alloc_skb(MAX_RX_XFER, GFP_KERNEL);
+ if (!skb) {
+ dev_warn_ratelimited(&usb_dev->dev,
+ "atusb_in: can't allocate skb\n");
+ return -ENOMEM;
+ }
+ skb_put(skb, MAX_RX_XFER);
+ SKB_ATUSB(skb) = atusb;
+ }
+
+ usb_fill_bulk_urb(urb, usb_dev, usb_rcvbulkpipe(usb_dev, 1),
+ skb->data, MAX_RX_XFER, atusb_in, skb);
+ usb_anchor_urb(urb, &atusb->rx_urbs);
+
+ ret = usb_submit_urb(urb, GFP_KERNEL);
+ if (ret) {
+ usb_unanchor_urb(urb);
+ kfree_skb(skb);
+ urb->context = NULL;
+ }
+ return ret;
+}
+
+static void atusb_work_urbs(struct work_struct *work)
+{
+ struct atusb *atusb =
+ container_of(to_delayed_work(work), struct atusb, work);
+ struct usb_device *usb_dev = atusb->usb_dev;
+ struct urb *urb;
+ int ret;
+
+ if (atusb->shutdown)
+ return;
+
+ do {
+ urb = usb_get_from_anchor(&atusb->idle_urbs);
+ if (!urb)
+ return;
+ ret = atusb_submit_rx_urb(atusb, urb);
+ } while (!ret);
+
+ usb_anchor_urb(urb, &atusb->idle_urbs);
+ dev_warn_ratelimited(&usb_dev->dev,
+ "atusb_in: can't allocate/submit URB (%d)\n", ret);
+ schedule_delayed_work(&atusb->work,
+ msecs_to_jiffies(ATUSB_ALLOC_DELAY_MS) + 1);
+}
+
+/* ----- Asynchronous USB -------------------------------------------------- */
+
+static void atusb_tx_done(struct atusb *atusb, uint8_t seq)
+{
+ struct usb_device *usb_dev = atusb->usb_dev;
+ uint8_t expect = atusb->tx_ack_seq;
+
+ dev_dbg(&usb_dev->dev, "atusb_tx_done (0x%02x/0x%02x)\n", seq, expect);
+ if (seq == expect) {
+ /* TODO check for ifs handling in firmware */
+ ieee802154_xmit_complete(atusb->hw, atusb->tx_skb, false);
+ } else {
+ /* TODO I experience this case when atusb has a tx complete
+ * irq before probing, we should fix the firmware it's an
+ * unlikely case now that seq == expect is then true, but can
+ * happen and fail with a tx_skb = NULL;
+ */
+ ieee802154_wake_queue(atusb->hw);
+ if (atusb->tx_skb)
+ dev_kfree_skb_irq(atusb->tx_skb);
+ }
+}
+
+static void atusb_in_good(struct urb *urb)
+{
+ struct usb_device *usb_dev = urb->dev;
+ struct sk_buff *skb = urb->context;
+ struct atusb *atusb = SKB_ATUSB(skb);
+ uint8_t len, lqi;
+
+ if (!urb->actual_length) {
+ dev_dbg(&usb_dev->dev, "atusb_in: zero-sized URB ?\n");
+ return;
+ }
+
+ len = *skb->data;
+
+ if (urb->actual_length == 1) {
+ atusb_tx_done(atusb, len);
+ return;
+ }
+
+ if (len + 1 > urb->actual_length - 1) {
+ dev_dbg(&usb_dev->dev, "atusb_in: frame len %d+1 > URB %u-1\n",
+ len, urb->actual_length);
+ return;
+ }
+
+ if (!ieee802154_is_valid_psdu_len(len)) {
+ dev_dbg(&usb_dev->dev, "atusb_in: frame corrupted\n");
+ return;
+ }
+
+ lqi = skb->data[len + 1];
+ dev_dbg(&usb_dev->dev, "atusb_in: rx len %d lqi 0x%02x\n", len, lqi);
+ skb_pull(skb, 1); /* remove PHR */
+ skb_trim(skb, len); /* get payload only */
+ ieee802154_rx_irqsafe(atusb->hw, skb, lqi);
+ urb->context = NULL; /* skb is gone */
+}
+
+static void atusb_in(struct urb *urb)
+{
+ struct usb_device *usb_dev = urb->dev;
+ struct sk_buff *skb = urb->context;
+ struct atusb *atusb = SKB_ATUSB(skb);
+
+ dev_dbg(&usb_dev->dev, "atusb_in: status %d len %d\n",
+ urb->status, urb->actual_length);
+ if (urb->status) {
+ if (urb->status == -ENOENT) { /* being killed */
+ kfree_skb(skb);
+ urb->context = NULL;
+ return;
+ }
+ dev_dbg(&usb_dev->dev, "atusb_in: URB error %d\n", urb->status);
+ } else {
+ atusb_in_good(urb);
+ }
+
+ usb_anchor_urb(urb, &atusb->idle_urbs);
+ if (!atusb->shutdown)
+ schedule_delayed_work(&atusb->work, 0);
+}
+
+/* ----- URB allocation/deallocation --------------------------------------- */
+
+static void atusb_free_urbs(struct atusb *atusb)
+{
+ struct urb *urb;
+
+ while (1) {
+ urb = usb_get_from_anchor(&atusb->idle_urbs);
+ if (!urb)
+ break;
+ if (urb->context)
+ kfree_skb(urb->context);
+ usb_free_urb(urb);
+ }
+}
+
+static int atusb_alloc_urbs(struct atusb *atusb, int n)
+{
+ struct urb *urb;
+
+ while (n) {
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb) {
+ atusb_free_urbs(atusb);
+ return -ENOMEM;
+ }
+ usb_anchor_urb(urb, &atusb->idle_urbs);
+ n--;
+ }
+ return 0;
+}
+
+/* ----- IEEE 802.15.4 interface operations -------------------------------- */
+
+static void atusb_xmit_complete(struct urb *urb)
+{
+ dev_dbg(&urb->dev->dev, "atusb_xmit urb completed");
+}
+
+static int atusb_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
+{
+ struct atusb *atusb = hw->priv;
+ struct usb_device *usb_dev = atusb->usb_dev;
+ int ret;
+
+ dev_dbg(&usb_dev->dev, "atusb_xmit (%d)\n", skb->len);
+ atusb->tx_skb = skb;
+ atusb->tx_ack_seq++;
+ atusb->tx_dr.wIndex = cpu_to_le16(atusb->tx_ack_seq);
+ atusb->tx_dr.wLength = cpu_to_le16(skb->len);
+
+ usb_fill_control_urb(atusb->tx_urb, usb_dev,
+ usb_sndctrlpipe(usb_dev, 0),
+ (unsigned char *)&atusb->tx_dr, skb->data,
+ skb->len, atusb_xmit_complete, NULL);
+ ret = usb_submit_urb(atusb->tx_urb, GFP_ATOMIC);
+ dev_dbg(&usb_dev->dev, "atusb_xmit done (%d)\n", ret);
+ return ret;
+}
+
+static int atusb_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
+{
+ struct atusb *atusb = hw->priv;
+ int ret;
+
+ /* This implicitly sets the CCA (Clear Channel Assessment) mode to 0,
+ * "Mode 3a, Carrier sense OR energy above threshold".
+ * We should probably make this configurable. @@@
+ */
+ ret = atusb_write_reg(atusb, RG_PHY_CC_CCA, channel);
+ if (ret < 0)
+ return ret;
+ msleep(1); /* @@@ ugly synchronization */
+ return 0;
+}
+
+static int atusb_ed(struct ieee802154_hw *hw, u8 *level)
+{
+ BUG_ON(!level);
+ *level = 0xbe;
+ return 0;
+}
+
+static int atusb_set_hw_addr_filt(struct ieee802154_hw *hw,
+ struct ieee802154_hw_addr_filt *filt,
+ unsigned long changed)
+{
+ struct atusb *atusb = hw->priv;
+ struct device *dev = &atusb->usb_dev->dev;
+ uint8_t reg;
+
+ if (changed & IEEE802154_AFILT_SADDR_CHANGED) {
+ u16 addr = le16_to_cpu(filt->short_addr);
+
+ dev_vdbg(dev, "atusb_set_hw_addr_filt called for saddr\n");
+ atusb_write_reg(atusb, RG_SHORT_ADDR_0, addr);
+ atusb_write_reg(atusb, RG_SHORT_ADDR_1, addr >> 8);
+ }
+
+ if (changed & IEEE802154_AFILT_PANID_CHANGED) {
+ u16 pan = le16_to_cpu(filt->pan_id);
+
+ dev_vdbg(dev, "atusb_set_hw_addr_filt called for pan id\n");
+ atusb_write_reg(atusb, RG_PAN_ID_0, pan);
+ atusb_write_reg(atusb, RG_PAN_ID_1, pan >> 8);
+ }
+
+ if (changed & IEEE802154_AFILT_IEEEADDR_CHANGED) {
+ u8 i, addr[IEEE802154_EXTENDED_ADDR_LEN];
+
+ memcpy(addr, &filt->ieee_addr, IEEE802154_EXTENDED_ADDR_LEN);
+ dev_vdbg(dev, "atusb_set_hw_addr_filt called for IEEE addr\n");
+ for (i = 0; i < 8; i++)
+ atusb_write_reg(atusb, RG_IEEE_ADDR_0 + i, addr[i]);
+ }
+
+ if (changed & IEEE802154_AFILT_PANC_CHANGED) {
+ dev_vdbg(dev,
+ "atusb_set_hw_addr_filt called for panc change\n");
+ reg = atusb_read_reg(atusb, SR_REG(SR_AACK_I_AM_COORD));
+ if (filt->pan_coord)
+ reg |= SR_VALUE(SR_AACK_I_AM_COORD, 1);
+ else
+ reg &= ~SR_VALUE(SR_AACK_I_AM_COORD, 1);
+ atusb_write_reg(atusb, SR_REG(SR_AACK_I_AM_COORD), reg);
+ }
+
+ return atusb_get_and_clear_error(atusb);
+}
+
+static int atusb_start(struct ieee802154_hw *hw)
+{
+ struct atusb *atusb = hw->priv;
+ struct usb_device *usb_dev = atusb->usb_dev;
+ int ret;
+
+ dev_dbg(&usb_dev->dev, "atusb_start\n");
+ schedule_delayed_work(&atusb->work, 0);
+ atusb_command(atusb, ATUSB_RX_MODE, 1);
+ ret = atusb_get_and_clear_error(atusb);
+ if (ret < 0)
+ usb_kill_anchored_urbs(&atusb->idle_urbs);
+ return ret;
+}
+
+static void atusb_stop(struct ieee802154_hw *hw)
+{
+ struct atusb *atusb = hw->priv;
+ struct usb_device *usb_dev = atusb->usb_dev;
+
+ dev_dbg(&usb_dev->dev, "atusb_stop\n");
+ usb_kill_anchored_urbs(&atusb->idle_urbs);
+ atusb_command(atusb, ATUSB_RX_MODE, 0);
+ atusb_get_and_clear_error(atusb);
+}
+
+static struct ieee802154_ops atusb_ops = {
+ .owner = THIS_MODULE,
+ .xmit_async = atusb_xmit,
+ .ed = atusb_ed,
+ .set_channel = atusb_channel,
+ .start = atusb_start,
+ .stop = atusb_stop,
+ .set_hw_addr_filt = atusb_set_hw_addr_filt,
+};
+
+/* ----- Firmware and chip version information ----------------------------- */
+
+static int atusb_get_and_show_revision(struct atusb *atusb)
+{
+ struct usb_device *usb_dev = atusb->usb_dev;
+ unsigned char buffer[3];
+ int ret;
+
+ /* Get a couple of the ATMega Firmware values */
+ ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
+ ATUSB_ID, ATUSB_REQ_FROM_DEV, 0, 0,
+ buffer, 3, 1000);
+ if (ret >= 0)
+ dev_info(&usb_dev->dev,
+ "Firmware: major: %u, minor: %u, hardware type: %u\n",
+ buffer[0], buffer[1], buffer[2]);
+ if (buffer[0] == 0 && buffer[1] < 2) {
+ dev_info(&usb_dev->dev,
+ "Firmware version (%u.%u) is predates our first public release.",
+ buffer[0], buffer[1]);
+ dev_info(&usb_dev->dev, "Please update to version 0.2 or newer");
+ }
+
+ return ret;
+}
+
+static int atusb_get_and_show_build(struct atusb *atusb)
+{
+ struct usb_device *usb_dev = atusb->usb_dev;
+ char build[ATUSB_BUILD_SIZE + 1];
+ int ret;
+
+ ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
+ ATUSB_BUILD, ATUSB_REQ_FROM_DEV, 0, 0,
+ build, ATUSB_BUILD_SIZE, 1000);
+ if (ret >= 0) {
+ build[ret] = 0;
+ dev_info(&usb_dev->dev, "Firmware: build %s\n", build);
+ }
+
+ return ret;
+}
+
+static int atusb_get_and_show_chip(struct atusb *atusb)
+{
+ struct usb_device *usb_dev = atusb->usb_dev;
+ uint8_t man_id_0, man_id_1, part_num, version_num;
+
+ man_id_0 = atusb_read_reg(atusb, RG_MAN_ID_0);
+ man_id_1 = atusb_read_reg(atusb, RG_MAN_ID_1);
+ part_num = atusb_read_reg(atusb, RG_PART_NUM);
+ version_num = atusb_read_reg(atusb, RG_VERSION_NUM);
+
+ if (atusb->err)
+ return atusb->err;
+
+ if ((man_id_1 << 8 | man_id_0) != ATUSB_JEDEC_ATMEL) {
+ dev_err(&usb_dev->dev,
+ "non-Atmel transceiver xxxx%02x%02x\n",
+ man_id_1, man_id_0);
+ goto fail;
+ }
+ if (part_num != 3 && part_num != 2) {
+ dev_err(&usb_dev->dev,
+ "unexpected transceiver, part 0x%02x version 0x%02x\n",
+ part_num, version_num);
+ goto fail;
+ }
+
+ dev_info(&usb_dev->dev, "ATUSB: AT86RF231 version %d\n", version_num);
+
+ return 0;
+
+fail:
+ atusb->err = -ENODEV;
+ return -ENODEV;
+}
+
+/* ----- Setup ------------------------------------------------------------- */
+
+static int atusb_probe(struct usb_interface *interface,
+ const struct usb_device_id *id)
+{
+ struct usb_device *usb_dev = interface_to_usbdev(interface);
+ struct ieee802154_hw *hw;
+ struct atusb *atusb = NULL;
+ int ret = -ENOMEM;
+
+ hw = ieee802154_alloc_hw(sizeof(struct atusb), &atusb_ops);
+ if (!hw)
+ return -ENOMEM;
+
+ atusb = hw->priv;
+ atusb->hw = hw;
+ atusb->usb_dev = usb_get_dev(usb_dev);
+ usb_set_intfdata(interface, atusb);
+
+ atusb->shutdown = 0;
+ atusb->err = 0;
+ INIT_DELAYED_WORK(&atusb->work, atusb_work_urbs);
+ init_usb_anchor(&atusb->idle_urbs);
+ init_usb_anchor(&atusb->rx_urbs);
+
+ if (atusb_alloc_urbs(atusb, ATUSB_NUM_RX_URBS))
+ goto fail;
+
+ atusb->tx_dr.bRequestType = ATUSB_REQ_TO_DEV;
+ atusb->tx_dr.bRequest = ATUSB_TX;
+ atusb->tx_dr.wValue = cpu_to_le16(0);
+
+ atusb->tx_urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!atusb->tx_urb)
+ goto fail;
+
+ hw->parent = &usb_dev->dev;
+ hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AFILT |
+ IEEE802154_HW_AACK;
+
+ hw->phy->current_page = 0;
+ hw->phy->current_channel = 11; /* reset default */
+ hw->phy->supported.channels[0] = 0x7FFF800;
+ ieee802154_random_extended_addr(&hw->phy->perm_extended_addr);
+
+ atusb_command(atusb, ATUSB_RF_RESET, 0);
+ atusb_get_and_show_chip(atusb);
+ atusb_get_and_show_revision(atusb);
+ atusb_get_and_show_build(atusb);
+ ret = atusb_get_and_clear_error(atusb);
+ if (ret) {
+ dev_err(&atusb->usb_dev->dev,
+ "%s: initialization failed, error = %d\n",
+ __func__, ret);
+ goto fail;
+ }
+
+ ret = ieee802154_register_hw(hw);
+ if (ret)
+ goto fail;
+
+ /* If we just powered on, we're now in P_ON and need to enter TRX_OFF
+ * explicitly. Any resets after that will send us straight to TRX_OFF,
+ * making the command below redundant.
+ */
+ atusb_write_reg(atusb, RG_TRX_STATE, STATE_FORCE_TRX_OFF);
+ msleep(1); /* reset => TRX_OFF, tTR13 = 37 us */
+
+#if 0
+ /* Calculating the maximum time available to empty the frame buffer
+ * on reception:
+ *
+ * According to [1], the inter-frame gap is
+ * R * 20 * 16 us + 128 us
+ * where R is a random number from 0 to 7. Furthermore, we have 20 bit
+ * times (80 us at 250 kbps) of SHR of the next frame before the
+ * transceiver begins storing data in the frame buffer.
+ *
+ * This yields a minimum time of 208 us between the last data of a
+ * frame and the first data of the next frame. This time is further
+ * reduced by interrupt latency in the atusb firmware.
+ *
+ * atusb currently needs about 500 us to retrieve a maximum-sized
+ * frame. We therefore have to allow reception of a new frame to begin
+ * while we retrieve the previous frame.
+ *
+ * [1] "JN-AN-1035 Calculating data rates in an IEEE 802.15.4-based
+ * network", Jennic 2006.
+ * http://www.jennic.com/download_file.php?supportFile=JN-AN-1035%20Calculating%20802-15-4%20Data%20Rates-1v0.pdf
+ */
+
+ atusb_write_reg(atusb,
+ SR_REG(SR_RX_SAFE_MODE), SR_VALUE(SR_RX_SAFE_MODE, 1));
+#endif
+ atusb_write_reg(atusb, RG_IRQ_MASK, 0xff);
+
+ ret = atusb_get_and_clear_error(atusb);
+ if (!ret)
+ return 0;
+
+ dev_err(&atusb->usb_dev->dev,
+ "%s: setup failed, error = %d\n",
+ __func__, ret);
+
+ ieee802154_unregister_hw(hw);
+fail:
+ atusb_free_urbs(atusb);
+ usb_kill_urb(atusb->tx_urb);
+ usb_free_urb(atusb->tx_urb);
+ usb_put_dev(usb_dev);
+ ieee802154_free_hw(hw);
+ return ret;
+}
+
+static void atusb_disconnect(struct usb_interface *interface)
+{
+ struct atusb *atusb = usb_get_intfdata(interface);
+
+ dev_dbg(&atusb->usb_dev->dev, "atusb_disconnect\n");
+
+ atusb->shutdown = 1;
+ cancel_delayed_work_sync(&atusb->work);
+
+ usb_kill_anchored_urbs(&atusb->rx_urbs);
+ atusb_free_urbs(atusb);
+ usb_kill_urb(atusb->tx_urb);
+ usb_free_urb(atusb->tx_urb);
+
+ ieee802154_unregister_hw(atusb->hw);
+
+ ieee802154_free_hw(atusb->hw);
+
+ usb_set_intfdata(interface, NULL);
+ usb_put_dev(atusb->usb_dev);
+
+ pr_debug("atusb_disconnect done\n");
+}
+
+/* The devices we work with */
+static const struct usb_device_id atusb_device_table[] = {
+ {
+ .match_flags = USB_DEVICE_ID_MATCH_DEVICE |
+ USB_DEVICE_ID_MATCH_INT_INFO,
+ .idVendor = ATUSB_VENDOR_ID,
+ .idProduct = ATUSB_PRODUCT_ID,
+ .bInterfaceClass = USB_CLASS_VENDOR_SPEC
+ },
+ /* end with null element */
+ {}
+};
+MODULE_DEVICE_TABLE(usb, atusb_device_table);
+
+static struct usb_driver atusb_driver = {
+ .name = "atusb",
+ .probe = atusb_probe,
+ .disconnect = atusb_disconnect,
+ .id_table = atusb_device_table,
+};
+module_usb_driver(atusb_driver);
+
+MODULE_AUTHOR("Alexander Aring <alex.aring@gmail.com>");
+MODULE_AUTHOR("Richard Sharpe <realrichardsharpe@gmail.com>");
+MODULE_AUTHOR("Stefan Schmidt <stefan@datenfreihafen.org>");
+MODULE_AUTHOR("Werner Almesberger <werner@almesberger.net>");
+MODULE_DESCRIPTION("ATUSB IEEE 802.15.4 Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ieee802154/atusb.h b/drivers/net/ieee802154/atusb.h
new file mode 100644
index 0000000..0690edc
--- /dev/null
+++ b/drivers/net/ieee802154/atusb.h
@@ -0,0 +1,84 @@
+/*
+ * atusb.h - Definitions shared between kernel and ATUSB firmware
+ *
+ * Written 2013 by Werner Almesberger <werner@almesberger.net>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2, or
+ * (at your option) any later version.
+ *
+ * This file should be identical for kernel and firmware.
+ * Kernel: drivers/net/ieee802154/atusb.h
+ * Firmware: ben-wpan/atusb/fw/include/atusb/atusb.h
+ */
+
+#ifndef _ATUSB_H
+#define _ATUSB_H
+
+#define ATUSB_VENDOR_ID 0x20b7 /* Qi Hardware*/
+#define ATUSB_PRODUCT_ID 0x1540 /* 802.15.4, device 0 */
+ /* -- - - */
+
+#define ATUSB_BUILD_SIZE 256 /* maximum build version/date message length */
+
+/* Commands to our device. Make sure this is synced with the firmware */
+enum atusb_requests {
+ ATUSB_ID = 0x00, /* system status/control grp */
+ ATUSB_BUILD,
+ ATUSB_RESET,
+ ATUSB_RF_RESET = 0x10, /* debug/test group */
+ ATUSB_POLL_INT,
+ ATUSB_TEST, /* atusb-sil only */
+ ATUSB_TIMER,
+ ATUSB_GPIO,
+ ATUSB_SLP_TR,
+ ATUSB_GPIO_CLEANUP,
+ ATUSB_REG_WRITE = 0x20, /* transceiver group */
+ ATUSB_REG_READ,
+ ATUSB_BUF_WRITE,
+ ATUSB_BUF_READ,
+ ATUSB_SRAM_WRITE,
+ ATUSB_SRAM_READ,
+ ATUSB_SPI_WRITE = 0x30, /* SPI group */
+ ATUSB_SPI_READ1,
+ ATUSB_SPI_READ2,
+ ATUSB_SPI_WRITE2_SYNC,
+ ATUSB_RX_MODE = 0x40, /* HardMAC group */
+ ATUSB_TX,
+};
+
+/* Direction bRequest wValue wIndex wLength
+ *
+ * ->host ATUSB_ID - - 3
+ * ->host ATUSB_BUILD - - #bytes
+ * host-> ATUSB_RESET - - 0
+ *
+ * host-> ATUSB_RF_RESET - - 0
+ * ->host ATUSB_POLL_INT - - 1
+ * host-> ATUSB_TEST - - 0
+ * ->host ATUSB_TIMER - - #bytes (6)
+ * ->host ATUSB_GPIO dir+data mask+p# 3
+ * host-> ATUSB_SLP_TR - - 0
+ * host-> ATUSB_GPIO_CLEANUP - - 0
+ *
+ * host-> ATUSB_REG_WRITE value addr 0
+ * ->host ATUSB_REG_READ - addr 1
+ * host-> ATUSB_BUF_WRITE - - #bytes
+ * ->host ATUSB_BUF_READ - - #bytes
+ * host-> ATUSB_SRAM_WRITE - addr #bytes
+ * ->host ATUSB_SRAM_READ - addr #bytes
+ *
+ * host-> ATUSB_SPI_WRITE byte0 byte1 #bytes
+ * ->host ATUSB_SPI_READ1 byte0 - #bytes
+ * ->host ATUSB_SPI_READ2 byte0 byte1 #bytes
+ * ->host ATUSB_SPI_WRITE2_SYNC byte0 byte1 0/1
+ *
+ * host-> ATUSB_RX_MODE on - 0
+ * host-> ATUSB_TX flags ack_seq #bytes
+ */
+
+#define ATUSB_REQ_FROM_DEV (USB_TYPE_VENDOR | USB_DIR_IN)
+#define ATUSB_REQ_TO_DEV (USB_TYPE_VENDOR | USB_DIR_OUT)
+
+#endif /* !_ATUSB_H */
diff --git a/drivers/net/ieee802154/cc2520.c b/drivers/net/ieee802154/cc2520.c
index f833b8b..84b28a0 100644
--- a/drivers/net/ieee802154/cc2520.c
+++ b/drivers/net/ieee802154/cc2520.c
@@ -653,7 +653,7 @@ static int cc2520_register(struct cc2520_private *priv)
ieee802154_random_extended_addr(&priv->hw->phy->perm_extended_addr);
/* We do support only 2.4 Ghz */
- priv->hw->phy->channels_supported[0] = 0x7FFF800;
+ priv->hw->phy->supported.channels[0] = 0x7FFF800;
priv->hw->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AACK |
IEEE802154_HW_AFILT;
diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c
index dc2bfb6..9d0da4e 100644
--- a/drivers/net/ieee802154/fakelb.c
+++ b/drivers/net/ieee802154/fakelb.c
@@ -27,25 +27,25 @@
#include <net/mac802154.h>
#include <net/cfg802154.h>
-static int numlbs = 1;
+static int numlbs = 2;
-struct fakelb_dev_priv {
- struct ieee802154_hw *hw;
+static LIST_HEAD(fakelb_phys);
+static DEFINE_SPINLOCK(fakelb_phys_lock);
- struct list_head list;
- struct fakelb_priv *fake;
+static LIST_HEAD(fakelb_ifup_phys);
+static DEFINE_RWLOCK(fakelb_ifup_phys_lock);
- spinlock_t lock;
- bool working;
-};
+struct fakelb_phy {
+ struct ieee802154_hw *hw;
+
+ u8 page;
+ u8 channel;
-struct fakelb_priv {
struct list_head list;
- rwlock_t lock;
+ struct list_head list_ifup;
};
-static int
-fakelb_hw_ed(struct ieee802154_hw *hw, u8 *level)
+static int fakelb_hw_ed(struct ieee802154_hw *hw, u8 *level)
{
BUG_ON(!level);
*level = 0xbe;
@@ -53,78 +53,63 @@ fakelb_hw_ed(struct ieee802154_hw *hw, u8 *level)
return 0;
}
-static int
-fakelb_hw_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
+static int fakelb_hw_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
{
- pr_debug("set channel to %d\n", channel);
+ struct fakelb_phy *phy = hw->priv;
+ write_lock_bh(&fakelb_ifup_phys_lock);
+ phy->page = page;
+ phy->channel = channel;
+ write_unlock_bh(&fakelb_ifup_phys_lock);
return 0;
}
-static void
-fakelb_hw_deliver(struct fakelb_dev_priv *priv, struct sk_buff *skb)
+static int fakelb_hw_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
{
- struct sk_buff *newskb;
+ struct fakelb_phy *current_phy = hw->priv, *phy;
- spin_lock(&priv->lock);
- if (priv->working) {
- newskb = pskb_copy(skb, GFP_ATOMIC);
- ieee802154_rx_irqsafe(priv->hw, newskb, 0xcc);
- }
- spin_unlock(&priv->lock);
-}
+ read_lock_bh(&fakelb_ifup_phys_lock);
+ list_for_each_entry(phy, &fakelb_ifup_phys, list_ifup) {
+ if (current_phy == phy)
+ continue;
-static int
-fakelb_hw_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
-{
- struct fakelb_dev_priv *priv = hw->priv;
- struct fakelb_priv *fake = priv->fake;
-
- read_lock_bh(&fake->lock);
- if (priv->list.next == priv->list.prev) {
- /* we are the only one device */
- fakelb_hw_deliver(priv, skb);
- } else {
- struct fakelb_dev_priv *dp;
- list_for_each_entry(dp, &priv->fake->list, list) {
- if (dp != priv &&
- (dp->hw->phy->current_channel ==
- priv->hw->phy->current_channel))
- fakelb_hw_deliver(dp, skb);
+ if (current_phy->page == phy->page &&
+ current_phy->channel == phy->channel) {
+ struct sk_buff *newskb = pskb_copy(skb, GFP_ATOMIC);
+
+ if (newskb)
+ ieee802154_rx_irqsafe(phy->hw, newskb, 0xcc);
}
}
- read_unlock_bh(&fake->lock);
+ read_unlock_bh(&fakelb_ifup_phys_lock);
+ ieee802154_xmit_complete(hw, skb, false);
return 0;
}
-static int
-fakelb_hw_start(struct ieee802154_hw *hw) {
- struct fakelb_dev_priv *priv = hw->priv;
- int ret = 0;
+static int fakelb_hw_start(struct ieee802154_hw *hw)
+{
+ struct fakelb_phy *phy = hw->priv;
- spin_lock(&priv->lock);
- if (priv->working)
- ret = -EBUSY;
- else
- priv->working = 1;
- spin_unlock(&priv->lock);
+ write_lock_bh(&fakelb_ifup_phys_lock);
+ list_add(&phy->list_ifup, &fakelb_ifup_phys);
+ write_unlock_bh(&fakelb_ifup_phys_lock);
- return ret;
+ return 0;
}
-static void
-fakelb_hw_stop(struct ieee802154_hw *hw) {
- struct fakelb_dev_priv *priv = hw->priv;
+static void fakelb_hw_stop(struct ieee802154_hw *hw)
+{
+ struct fakelb_phy *phy = hw->priv;
- spin_lock(&priv->lock);
- priv->working = 0;
- spin_unlock(&priv->lock);
+ write_lock_bh(&fakelb_ifup_phys_lock);
+ list_del(&phy->list_ifup);
+ write_unlock_bh(&fakelb_ifup_phys_lock);
}
static const struct ieee802154_ops fakelb_ops = {
.owner = THIS_MODULE,
- .xmit_sync = fakelb_hw_xmit,
+ .xmit_async = fakelb_hw_xmit,
.ed = fakelb_hw_ed,
.set_channel = fakelb_hw_channel,
.start = fakelb_hw_start,
@@ -135,54 +120,54 @@ static const struct ieee802154_ops fakelb_ops = {
module_param(numlbs, int, 0);
MODULE_PARM_DESC(numlbs, " number of pseudo devices");
-static int fakelb_add_one(struct device *dev, struct fakelb_priv *fake)
+static int fakelb_add_one(struct device *dev)
{
- struct fakelb_dev_priv *priv;
- int err;
struct ieee802154_hw *hw;
+ struct fakelb_phy *phy;
+ int err;
- hw = ieee802154_alloc_hw(sizeof(*priv), &fakelb_ops);
+ hw = ieee802154_alloc_hw(sizeof(*phy), &fakelb_ops);
if (!hw)
return -ENOMEM;
- priv = hw->priv;
- priv->hw = hw;
+ phy = hw->priv;
+ phy->hw = hw;
/* 868 MHz BPSK 802.15.4-2003 */
- hw->phy->channels_supported[0] |= 1;
+ hw->phy->supported.channels[0] |= 1;
/* 915 MHz BPSK 802.15.4-2003 */
- hw->phy->channels_supported[0] |= 0x7fe;
+ hw->phy->supported.channels[0] |= 0x7fe;
/* 2.4 GHz O-QPSK 802.15.4-2003 */
- hw->phy->channels_supported[0] |= 0x7FFF800;
+ hw->phy->supported.channels[0] |= 0x7FFF800;
/* 868 MHz ASK 802.15.4-2006 */
- hw->phy->channels_supported[1] |= 1;
+ hw->phy->supported.channels[1] |= 1;
/* 915 MHz ASK 802.15.4-2006 */
- hw->phy->channels_supported[1] |= 0x7fe;
+ hw->phy->supported.channels[1] |= 0x7fe;
/* 868 MHz O-QPSK 802.15.4-2006 */
- hw->phy->channels_supported[2] |= 1;
+ hw->phy->supported.channels[2] |= 1;
/* 915 MHz O-QPSK 802.15.4-2006 */
- hw->phy->channels_supported[2] |= 0x7fe;
+ hw->phy->supported.channels[2] |= 0x7fe;
/* 2.4 GHz CSS 802.15.4a-2007 */
- hw->phy->channels_supported[3] |= 0x3fff;
+ hw->phy->supported.channels[3] |= 0x3fff;
/* UWB Sub-gigahertz 802.15.4a-2007 */
- hw->phy->channels_supported[4] |= 1;
+ hw->phy->supported.channels[4] |= 1;
/* UWB Low band 802.15.4a-2007 */
- hw->phy->channels_supported[4] |= 0x1e;
+ hw->phy->supported.channels[4] |= 0x1e;
/* UWB High band 802.15.4a-2007 */
- hw->phy->channels_supported[4] |= 0xffe0;
+ hw->phy->supported.channels[4] |= 0xffe0;
/* 750 MHz O-QPSK 802.15.4c-2009 */
- hw->phy->channels_supported[5] |= 0xf;
+ hw->phy->supported.channels[5] |= 0xf;
/* 750 MHz MPSK 802.15.4c-2009 */
- hw->phy->channels_supported[5] |= 0xf0;
+ hw->phy->supported.channels[5] |= 0xf0;
/* 950 MHz BPSK 802.15.4d-2009 */
- hw->phy->channels_supported[6] |= 0x3ff;
+ hw->phy->supported.channels[6] |= 0x3ff;
/* 950 MHz GFSK 802.15.4d-2009 */
- hw->phy->channels_supported[6] |= 0x3ffc00;
+ hw->phy->supported.channels[6] |= 0x3ffc00;
- INIT_LIST_HEAD(&priv->list);
- priv->fake = fake;
-
- spin_lock_init(&priv->lock);
+ ieee802154_random_extended_addr(&hw->phy->perm_extended_addr);
+ /* fake phy channel 13 as default */
+ hw->phy->current_channel = 13;
+ phy->channel = hw->phy->current_channel;
hw->parent = dev;
@@ -190,67 +175,55 @@ static int fakelb_add_one(struct device *dev, struct fakelb_priv *fake)
if (err)
goto err_reg;
- write_lock_bh(&fake->lock);
- list_add_tail(&priv->list, &fake->list);
- write_unlock_bh(&fake->lock);
+ spin_lock(&fakelb_phys_lock);
+ list_add_tail(&phy->list, &fakelb_phys);
+ spin_unlock(&fakelb_phys_lock);
return 0;
err_reg:
- ieee802154_free_hw(priv->hw);
+ ieee802154_free_hw(phy->hw);
return err;
}
-static void fakelb_del(struct fakelb_dev_priv *priv)
+static void fakelb_del(struct fakelb_phy *phy)
{
- write_lock_bh(&priv->fake->lock);
- list_del(&priv->list);
- write_unlock_bh(&priv->fake->lock);
+ list_del(&phy->list);
- ieee802154_unregister_hw(priv->hw);
- ieee802154_free_hw(priv->hw);
+ ieee802154_unregister_hw(phy->hw);
+ ieee802154_free_hw(phy->hw);
}
static int fakelb_probe(struct platform_device *pdev)
{
- struct fakelb_priv *priv;
- struct fakelb_dev_priv *dp;
- int err = -ENOMEM;
- int i;
-
- priv = devm_kzalloc(&pdev->dev, sizeof(struct fakelb_priv),
- GFP_KERNEL);
- if (!priv)
- goto err_alloc;
-
- INIT_LIST_HEAD(&priv->list);
- rwlock_init(&priv->lock);
+ struct fakelb_phy *phy, *tmp;
+ int err, i;
for (i = 0; i < numlbs; i++) {
- err = fakelb_add_one(&pdev->dev, priv);
+ err = fakelb_add_one(&pdev->dev);
if (err < 0)
goto err_slave;
}
- platform_set_drvdata(pdev, priv);
dev_info(&pdev->dev, "added ieee802154 hardware\n");
return 0;
err_slave:
- list_for_each_entry(dp, &priv->list, list)
- fakelb_del(dp);
-err_alloc:
+ spin_lock(&fakelb_phys_lock);
+ list_for_each_entry_safe(phy, tmp, &fakelb_phys, list)
+ fakelb_del(phy);
+ spin_unlock(&fakelb_phys_lock);
return err;
}
static int fakelb_remove(struct platform_device *pdev)
{
- struct fakelb_priv *priv = platform_get_drvdata(pdev);
- struct fakelb_dev_priv *dp, *temp;
-
- list_for_each_entry_safe(dp, temp, &priv->list, list)
- fakelb_del(dp);
+ struct fakelb_phy *phy, *tmp;
+ spin_lock(&fakelb_phys_lock);
+ list_for_each_entry_safe(phy, tmp, &fakelb_phys, list)
+ fakelb_del(phy);
+ spin_unlock(&fakelb_phys_lock);
return 0;
}
diff --git a/drivers/net/ieee802154/mrf24j40.c b/drivers/net/ieee802154/mrf24j40.c
index fba2dfd..f2a1bd1 100644
--- a/drivers/net/ieee802154/mrf24j40.c
+++ b/drivers/net/ieee802154/mrf24j40.c
@@ -750,7 +750,7 @@ static int mrf24j40_probe(struct spi_device *spi)
devrec->hw->priv = devrec;
devrec->hw->parent = &devrec->spi->dev;
- devrec->hw->phy->channels_supported[0] = CHANNEL_MASK;
+ devrec->hw->phy->supported.channels[0] = CHANNEL_MASK;
devrec->hw->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AACK |
IEEE802154_HW_AFILT;
diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
index f6c9163..25f2196 100644
--- a/drivers/net/irda/irda-usb.c
+++ b/drivers/net/irda/irda-usb.c
@@ -848,7 +848,9 @@ static void irda_usb_receive(struct urb *urb)
* Jean II */
self->rx_defer_timer.function = irda_usb_rx_defer_expired;
self->rx_defer_timer.data = (unsigned long) urb;
- mod_timer(&self->rx_defer_timer, jiffies + (10 * HZ / 1000));
+ mod_timer(&self->rx_defer_timer,
+ jiffies + msecs_to_jiffies(10));
+
return;
}
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 7c0cb87..cf18940 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -112,6 +112,11 @@ config MICREL_PHY
---help---
Supports the KSZ9021, VSC8201, KS8001 PHYs.
+config DP83867_PHY
+ tristate "Drivers for Texas Instruments DP83867 Gigabit PHY"
+ ---help---
+ Currently supports the DP83867 PHY.
+
config FIXED_PHY
tristate "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs"
depends on PHYLIB
@@ -205,7 +210,6 @@ config MDIO_BCM_UNIMAC
This hardware can be found in the Broadcom GENET Ethernet MAC
controllers as well as some Broadcom Ethernet switches such as the
Starfighter 2 switches.
-
endif # PHYLIB
config MICREL_KS8995MA
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index e97e7f9..fcc25a0 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o
obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o
obj-$(CONFIG_NATIONAL_PHY) += national.o
obj-$(CONFIG_DP83640_PHY) += dp83640.o
+obj-$(CONFIG_DP83867_PHY) += dp83867.o
obj-$(CONFIG_STE10XP) += ste10Xp.o
obj-$(CONFIG_MICREL_PHY) += micrel.o
obj-$(CONFIG_MDIO_OCTEON) += mdio-octeon.o
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index 64c74c6..b5dc59d 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -404,7 +404,7 @@ static struct phy_driver bcm7xxx_driver[] = {
.name = "Broadcom BCM7425",
.features = PHY_GBIT_FEATURES |
SUPPORTED_Pause | SUPPORTED_Asym_Pause,
- .flags = 0,
+ .flags = PHY_IS_INTERNAL,
.config_init = bcm7xxx_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 496e02f..00cb41e 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -47,7 +47,7 @@
#define PSF_TX 0x1000
#define EXT_EVENT 1
#define CAL_EVENT 7
-#define CAL_TRIGGER 7
+#define CAL_TRIGGER 1
#define DP83640_N_PINS 12
#define MII_DP83640_MICR 0x11
@@ -496,7 +496,9 @@ static int ptp_dp83640_enable(struct ptp_clock_info *ptp,
else
evnt |= EVNT_RISE;
}
+ mutex_lock(&clock->extreg_lock);
ext_write(0, phydev, PAGE5, PTP_EVNT, evnt);
+ mutex_unlock(&clock->extreg_lock);
return 0;
case PTP_CLK_REQ_PEROUT:
@@ -532,6 +534,8 @@ static u8 status_frame_src[6] = { 0x08, 0x00, 0x17, 0x0B, 0x6B, 0x0F };
static void enable_status_frames(struct phy_device *phydev, bool on)
{
+ struct dp83640_private *dp83640 = phydev->priv;
+ struct dp83640_clock *clock = dp83640->clock;
u16 cfg0 = 0, ver;
if (on)
@@ -539,9 +543,13 @@ static void enable_status_frames(struct phy_device *phydev, bool on)
ver = (PSF_PTPVER & VERSIONPTP_MASK) << VERSIONPTP_SHIFT;
+ mutex_lock(&clock->extreg_lock);
+
ext_write(0, phydev, PAGE5, PSF_CFG0, cfg0);
ext_write(0, phydev, PAGE6, PSF_CFG1, ver);
+ mutex_unlock(&clock->extreg_lock);
+
if (!phydev->attached_dev) {
pr_warn("expected to find an attached netdevice\n");
return;
@@ -838,7 +846,7 @@ static void decode_rxts(struct dp83640_private *dp83640,
list_del_init(&rxts->list);
phy2rxts(phy_rxts, rxts);
- spin_lock_irqsave(&dp83640->rx_queue.lock, flags);
+ spin_lock(&dp83640->rx_queue.lock);
skb_queue_walk(&dp83640->rx_queue, skb) {
struct dp83640_skb_info *skb_info;
@@ -853,7 +861,7 @@ static void decode_rxts(struct dp83640_private *dp83640,
break;
}
}
- spin_unlock_irqrestore(&dp83640->rx_queue.lock, flags);
+ spin_unlock(&dp83640->rx_queue.lock);
if (!shhwtstamps)
list_add_tail(&rxts->list, &dp83640->rxts);
@@ -1173,11 +1181,18 @@ static int dp83640_config_init(struct phy_device *phydev)
if (clock->chosen && !list_empty(&clock->phylist))
recalibrate(clock);
- else
+ else {
+ mutex_lock(&clock->extreg_lock);
enable_broadcast(phydev, clock->page, 1);
+ mutex_unlock(&clock->extreg_lock);
+ }
enable_status_frames(phydev, true);
+
+ mutex_lock(&clock->extreg_lock);
ext_write(0, phydev, PAGE4, PTP_CTL, PTP_ENABLE);
+ mutex_unlock(&clock->extreg_lock);
+
return 0;
}
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
new file mode 100644
index 0000000..ef0b4eb
--- /dev/null
+++ b/drivers/net/phy/dp83867.c
@@ -0,0 +1,239 @@
+/*
+ * Driver for the Texas Instruments DP83867 PHY
+ *
+ * Copyright (C) 2015 Texas Instruments Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/ethtool.h>
+#include <linux/kernel.h>
+#include <linux/mii.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/phy.h>
+
+#include <dt-bindings/net/ti-dp83867.h>
+
+#define DP83867_PHY_ID 0x2000a231
+#define DP83867_DEVADDR 0x1f
+
+#define MII_DP83867_PHYCTRL 0x10
+#define MII_DP83867_MICR 0x12
+#define MII_DP83867_ISR 0x13
+#define DP83867_CTRL 0x1f
+
+/* Extended Registers */
+#define DP83867_RGMIICTL 0x0032
+#define DP83867_RGMIIDCTL 0x0086
+
+#define DP83867_SW_RESET BIT(15)
+#define DP83867_SW_RESTART BIT(14)
+
+/* MICR Interrupt bits */
+#define MII_DP83867_MICR_AN_ERR_INT_EN BIT(15)
+#define MII_DP83867_MICR_SPEED_CHNG_INT_EN BIT(14)
+#define MII_DP83867_MICR_DUP_MODE_CHNG_INT_EN BIT(13)
+#define MII_DP83867_MICR_PAGE_RXD_INT_EN BIT(12)
+#define MII_DP83867_MICR_AUTONEG_COMP_INT_EN BIT(11)
+#define MII_DP83867_MICR_LINK_STS_CHNG_INT_EN BIT(10)
+#define MII_DP83867_MICR_FALSE_CARRIER_INT_EN BIT(8)
+#define MII_DP83867_MICR_SLEEP_MODE_CHNG_INT_EN BIT(4)
+#define MII_DP83867_MICR_WOL_INT_EN BIT(3)
+#define MII_DP83867_MICR_XGMII_ERR_INT_EN BIT(2)
+#define MII_DP83867_MICR_POL_CHNG_INT_EN BIT(1)
+#define MII_DP83867_MICR_JABBER_INT_EN BIT(0)
+
+/* RGMIICTL bits */
+#define DP83867_RGMII_TX_CLK_DELAY_EN BIT(1)
+#define DP83867_RGMII_RX_CLK_DELAY_EN BIT(0)
+
+/* PHY CTRL bits */
+#define DP83867_PHYCR_FIFO_DEPTH_SHIFT 14
+
+/* RGMIIDCTL bits */
+#define DP83867_RGMII_TX_CLK_DELAY_SHIFT 4
+
+struct dp83867_private {
+ int rx_id_delay;
+ int tx_id_delay;
+ int fifo_depth;
+};
+
+static int dp83867_ack_interrupt(struct phy_device *phydev)
+{
+ int err = phy_read(phydev, MII_DP83867_ISR);
+
+ if (err < 0)
+ return err;
+
+ return 0;
+}
+
+static int dp83867_config_intr(struct phy_device *phydev)
+{
+ int micr_status;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) {
+ micr_status = phy_read(phydev, MII_DP83867_MICR);
+ if (micr_status < 0)
+ return micr_status;
+
+ micr_status |=
+ (MII_DP83867_MICR_AN_ERR_INT_EN |
+ MII_DP83867_MICR_SPEED_CHNG_INT_EN |
+ MII_DP83867_MICR_DUP_MODE_CHNG_INT_EN |
+ MII_DP83867_MICR_SLEEP_MODE_CHNG_INT_EN);
+
+ return phy_write(phydev, MII_DP83867_MICR, micr_status);
+ }
+
+ micr_status = 0x0;
+ return phy_write(phydev, MII_DP83867_MICR, micr_status);
+}
+
+#ifdef CONFIG_OF_MDIO
+static int dp83867_of_init(struct phy_device *phydev)
+{
+ struct dp83867_private *dp83867 = phydev->priv;
+ struct device *dev = &phydev->dev;
+ struct device_node *of_node = dev->of_node;
+ int ret;
+
+ if (!of_node && dev->parent->of_node)
+ of_node = dev->parent->of_node;
+
+ if (!phydev->dev.of_node)
+ return -ENODEV;
+
+ ret = of_property_read_u32(of_node, "ti,rx_int_delay",
+ &dp83867->rx_id_delay);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32(of_node, "ti,tx_int_delay",
+ &dp83867->tx_id_delay);
+ if (ret)
+ return ret;
+
+ ret = of_property_read_u32(of_node, "ti,fifo_depth",
+ &dp83867->fifo_depth);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+#else
+static int dp83867_of_init(struct phy_device *phydev)
+{
+ return 0;
+}
+#endif /* CONFIG_OF_MDIO */
+
+static int dp83867_config_init(struct phy_device *phydev)
+{
+ struct dp83867_private *dp83867;
+ int ret;
+ u16 val, delay;
+
+ if (!phydev->priv) {
+ dp83867 = devm_kzalloc(&phydev->dev, sizeof(*dp83867),
+ GFP_KERNEL);
+ if (!dp83867)
+ return -ENOMEM;
+
+ phydev->priv = dp83867;
+ ret = dp83867_of_init(phydev);
+ if (ret)
+ return ret;
+ } else {
+ dp83867 = (struct dp83867_private *)phydev->priv;
+ }
+
+ if (phy_interface_is_rgmii(phydev)) {
+ ret = phy_write(phydev, MII_DP83867_PHYCTRL,
+ (dp83867->fifo_depth << DP83867_PHYCR_FIFO_DEPTH_SHIFT));
+ if (ret)
+ return ret;
+ }
+
+ if ((phydev->interface >= PHY_INTERFACE_MODE_RGMII_ID) ||
+ (phydev->interface <= PHY_INTERFACE_MODE_RGMII_RXID)) {
+ val = phy_read_mmd_indirect(phydev, DP83867_RGMIICTL,
+ DP83867_DEVADDR, phydev->addr);
+
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID)
+ val |= (DP83867_RGMII_TX_CLK_DELAY_EN | DP83867_RGMII_RX_CLK_DELAY_EN);
+
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)
+ val |= DP83867_RGMII_TX_CLK_DELAY_EN;
+
+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID)
+ val |= DP83867_RGMII_RX_CLK_DELAY_EN;
+
+ phy_write_mmd_indirect(phydev, DP83867_RGMIICTL,
+ DP83867_DEVADDR, phydev->addr, val);
+
+ delay = (dp83867->rx_id_delay |
+ (dp83867->tx_id_delay << DP83867_RGMII_TX_CLK_DELAY_SHIFT));
+
+ phy_write_mmd_indirect(phydev, DP83867_RGMIIDCTL,
+ DP83867_DEVADDR, phydev->addr, delay);
+ }
+
+ return 0;
+}
+
+static int dp83867_phy_reset(struct phy_device *phydev)
+{
+ int err;
+
+ err = phy_write(phydev, DP83867_CTRL, DP83867_SW_RESET);
+ if (err < 0)
+ return err;
+
+ return dp83867_config_init(phydev);
+}
+
+static struct phy_driver dp83867_driver[] = {
+ {
+ .phy_id = DP83867_PHY_ID,
+ .phy_id_mask = 0xfffffff0,
+ .name = "TI DP83867",
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_INTERRUPT,
+
+ .config_init = dp83867_config_init,
+ .soft_reset = dp83867_phy_reset,
+
+ /* IRQ related */
+ .ack_interrupt = dp83867_ack_interrupt,
+ .config_intr = dp83867_config_intr,
+
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+
+ .driver = {.owner = THIS_MODULE,}
+ },
+};
+module_phy_driver(dp83867_driver);
+
+static struct mdio_device_id __maybe_unused dp83867_tbl[] = {
+ { DP83867_PHY_ID, 0xfffffff0 },
+ { }
+};
+
+MODULE_DEVICE_TABLE(mdio, dp83867_tbl);
+
+MODULE_DESCRIPTION("Texas Instruments DP83867 PHY driver");
+MODULE_AUTHOR("Dan Murphy <dmurphy@ti.com");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index 8644f03..0dbc445 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -139,10 +139,7 @@ static int ip1001_config_init(struct phy_device *phydev)
if (c < 0)
return c;
- if ((phydev->interface == PHY_INTERFACE_MODE_RGMII) ||
- (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) ||
- (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
- (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
+ if (phy_interface_is_rgmii(phydev)) {
c = phy_read(phydev, IP10XX_SPEC_CTRL_STATUS);
if (c < 0)
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 1b1698f..f721444 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -317,10 +317,7 @@ static int m88e1121_config_aneg(struct phy_device *phydev)
if (err < 0)
return err;
- if ((phydev->interface == PHY_INTERFACE_MODE_RGMII) ||
- (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) ||
- (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
- (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
+ if (phy_interface_is_rgmii(phydev)) {
mscr = phy_read(phydev, MII_88E1121_PHY_MSCR_REG) &
MII_88E1121_PHY_MSCR_DELAY_MASK;
@@ -469,10 +466,7 @@ static int m88e1111_config_init(struct phy_device *phydev)
int err;
int temp;
- if ((phydev->interface == PHY_INTERFACE_MODE_RGMII) ||
- (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID) ||
- (phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
- (phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
+ if (phy_interface_is_rgmii(phydev)) {
temp = phy_read(phydev, MII_M1111_PHY_EXT_CR);
if (temp < 0)
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 377d2db..b2197b5 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -1093,8 +1093,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
if ((phydev->duplex == DUPLEX_FULL) &&
((phydev->interface == PHY_INTERFACE_MODE_MII) ||
(phydev->interface == PHY_INTERFACE_MODE_GMII) ||
- (phydev->interface >= PHY_INTERFACE_MODE_RGMII &&
- phydev->interface <= PHY_INTERFACE_MODE_RGMII_TXID) ||
+ phy_interface_is_rgmii(phydev) ||
phy_is_internal(phydev))) {
int eee_lp, eee_cap, eee_adv;
u32 lp, cap, adv;
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 5eddbc0..34c519e 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2131,9 +2131,10 @@ static void vxlan_cleanup(unsigned long arg)
if (!netif_running(vxlan->dev))
return;
- spin_lock_bh(&vxlan->hash_lock);
for (h = 0; h < FDB_HASH_SIZE; ++h) {
struct hlist_node *p, *n;
+
+ spin_lock_bh(&vxlan->hash_lock);
hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
struct vxlan_fdb *f
= container_of(p, struct vxlan_fdb, hlist);
@@ -2152,8 +2153,8 @@ static void vxlan_cleanup(unsigned long arg)
} else if (time_before(timeout, next_timer))
next_timer = timeout;
}
+ spin_unlock_bh(&vxlan->hash_lock);
}
- spin_unlock_bh(&vxlan->hash_lock);
mod_timer(&vxlan->age_timer, next_timer);
}
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index cce4625..a511ef3 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -889,7 +889,7 @@ void ath6kl_cfg80211_disconnect_event(struct ath6kl_vif *vif, u8 reason,
GFP_KERNEL);
} else if (vif->sme_state == SME_CONNECTED) {
cfg80211_disconnected(vif->ndev, proto_reason,
- NULL, 0, GFP_KERNEL);
+ NULL, 0, false, GFP_KERNEL);
}
vif->sme_state = SME_DISCONNECTED;
@@ -3467,7 +3467,7 @@ void ath6kl_cfg80211_stop(struct ath6kl_vif *vif)
GFP_KERNEL);
break;
case SME_CONNECTED:
- cfg80211_disconnected(vif->ndev, 0, NULL, 0, GFP_KERNEL);
+ cfg80211_disconnected(vif->ndev, 0, NULL, 0, true, GFP_KERNEL);
break;
}
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index ef3b6bc..6d704ae 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -227,7 +227,7 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
if (test_bit(wil_status_fwconnected, wil->status)) {
clear_bit(wil_status_fwconnected, wil->status);
cfg80211_disconnected(ndev, reason_code,
- NULL, 0, GFP_KERNEL);
+ NULL, 0, false, GFP_KERNEL);
} else if (test_bit(wil_status_fwconnecting, wil->status)) {
cfg80211_connect_result(ndev, bssid, NULL, 0, NULL, 0,
WLAN_STATUS_UNSPECIFIED_FAILURE,
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
index 6fe2b75..e10fa67 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/cfg80211.c
@@ -1296,7 +1296,7 @@ static void brcmf_link_down(struct brcmf_cfg80211_vif *vif, u16 reason)
}
clear_bit(BRCMF_VIF_STATUS_CONNECTED, &vif->sme_state);
cfg80211_disconnected(vif->wdev.netdev, reason, NULL, 0,
- GFP_KERNEL);
+ true, GFP_KERNEL);
}
clear_bit(BRCMF_VIF_STATUS_CONNECTING, &vif->sme_state);
@@ -1962,7 +1962,7 @@ brcmf_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *ndev,
clear_bit(BRCMF_VIF_STATUS_CONNECTED, &ifp->vif->sme_state);
clear_bit(BRCMF_VIF_STATUS_CONNECTING, &ifp->vif->sme_state);
- cfg80211_disconnected(ndev, reason_code, NULL, 0, GFP_KERNEL);
+ cfg80211_disconnected(ndev, reason_code, NULL, 0, true, GFP_KERNEL);
memcpy(&scbval.ea, &profile->bssid, ETH_ALEN);
scbval.val = cpu_to_le32(reason_code);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
index a932e45..1b47de0 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
@@ -500,11 +500,9 @@ static int brcmf_msgbuf_query_dcmd(struct brcmf_pub *drvr, int ifidx,
msgbuf->rx_pktids,
msgbuf->ioctl_resp_pktid);
if (msgbuf->ioctl_resp_ret_len != 0) {
- if (!skb) {
- brcmf_err("Invalid packet id idx recv'd %d\n",
- msgbuf->ioctl_resp_pktid);
+ if (!skb)
return -EBADF;
- }
+
memcpy(buf, skb->data, (len < msgbuf->ioctl_resp_ret_len) ?
len : msgbuf->ioctl_resp_ret_len);
}
@@ -866,10 +864,8 @@ brcmf_msgbuf_process_txstatus(struct brcmf_msgbuf *msgbuf, void *buf)
flowid -= BRCMF_NROF_H2D_COMMON_MSGRINGS;
skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
msgbuf->tx_pktids, idx);
- if (!skb) {
- brcmf_err("Invalid packet id idx recv'd %d\n", idx);
+ if (!skb)
return;
- }
set_bit(flowid, msgbuf->txstatus_done_map);
commonring = msgbuf->flowrings[flowid];
@@ -1148,6 +1144,8 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf)
skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
msgbuf->rx_pktids, idx);
+ if (!skb)
+ return;
if (data_offset)
skb_pull(skb, data_offset);
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index efe3cf3..aba0957 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -21,8 +21,8 @@ config IWLWIFI
Intel 7260 Wi-Fi Adapter
Intel 3160 Wi-Fi Adapter
Intel 7265 Wi-Fi Adapter
- Intel 3165 Wi-Fi Adapter
Intel 8260 Wi-Fi Adapter
+ Intel 3165 Wi-Fi Adapter
This driver uses the kernel's mac80211 subsystem.
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
index 830dfec..80fefe7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
@@ -471,7 +471,7 @@ static int iwl_get_radio_cfg(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
return le16_to_cpup(nvm_sw + RADIO_CFG);
- return le32_to_cpup((__le32 *)(nvm_sw + RADIO_CFG_FAMILY_8000));
+ return le32_to_cpup((__le32 *)(phy_sku + RADIO_CFG_FAMILY_8000));
}
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index 9a3dae6..31f72a6 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -1,7 +1,7 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -355,7 +355,7 @@ struct iwl_trans_pcie {
/*protect hw register */
spinlock_t reg_lock;
- bool cmd_in_flight;
+ bool cmd_hold_nic_awake;
bool ref_cmd_in_flight;
/* protect ref counter */
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index dd1b90b..43ae658 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -1387,7 +1387,7 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent,
spin_lock_irqsave(&trans_pcie->reg_lock, *flags);
- if (trans_pcie->cmd_in_flight)
+ if (trans_pcie->cmd_hold_nic_awake)
goto out;
/* this bit wakes up the NIC */
@@ -1453,7 +1453,7 @@ static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
*/
__acquire(&trans_pcie->reg_lock);
- if (trans_pcie->cmd_in_flight)
+ if (trans_pcie->cmd_hold_nic_awake)
goto out;
__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 2b06f99..2b86c21 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -1039,18 +1039,14 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
iwl_trans_pcie_ref(trans);
}
- if (trans_pcie->cmd_in_flight)
- return 0;
-
- trans_pcie->cmd_in_flight = true;
-
/*
* wake up the NIC to make sure that the firmware will see the host
* command - we will let the NIC sleep once all the host commands
* returned. This needs to be done only on NICs that have
* apmg_wake_up_wa set.
*/
- if (trans->cfg->base_params->apmg_wake_up_wa) {
+ if (trans->cfg->base_params->apmg_wake_up_wa &&
+ !trans_pcie->cmd_hold_nic_awake) {
__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
@@ -1062,10 +1058,10 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
if (ret < 0) {
__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
- trans_pcie->cmd_in_flight = false;
IWL_ERR(trans, "Failed to wake NIC for hcmd\n");
return -EIO;
}
+ trans_pcie->cmd_hold_nic_awake = true;
}
return 0;
@@ -1083,15 +1079,14 @@ static int iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
iwl_trans_pcie_unref(trans);
}
- if (WARN_ON(!trans_pcie->cmd_in_flight))
- return 0;
-
- trans_pcie->cmd_in_flight = false;
+ if (trans->cfg->base_params->apmg_wake_up_wa) {
+ if (WARN_ON(!trans_pcie->cmd_hold_nic_awake))
+ return 0;
- if (trans->cfg->base_params->apmg_wake_up_wa)
+ trans_pcie->cmd_hold_nic_awake = false;
__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
- CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
-
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ }
return 0;
}
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 1a4d558..8317afd 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -835,14 +835,13 @@ static int lbs_cfg_scan(struct wiphy *wiphy,
* Events
*/
-void lbs_send_disconnect_notification(struct lbs_private *priv)
+void lbs_send_disconnect_notification(struct lbs_private *priv,
+ bool locally_generated)
{
lbs_deb_enter(LBS_DEB_CFG80211);
- cfg80211_disconnected(priv->dev,
- 0,
- NULL, 0,
- GFP_KERNEL);
+ cfg80211_disconnected(priv->dev, 0, NULL, 0, locally_generated,
+ GFP_KERNEL);
lbs_deb_leave(LBS_DEB_CFG80211);
}
@@ -1458,7 +1457,7 @@ int lbs_disconnect(struct lbs_private *priv, u16 reason)
cfg80211_disconnected(priv->dev,
reason,
- NULL, 0,
+ NULL, 0, true,
GFP_KERNEL);
priv->connect_status = LBS_DISCONNECTED;
@@ -2031,7 +2030,7 @@ static int lbs_leave_ibss(struct wiphy *wiphy, struct net_device *dev)
ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_STOP, &cmd);
/* TODO: consider doing this at MACREG_INT_CODE_ADHOC_BCN_LOST time */
- lbs_mac_event_disconnected(priv);
+ lbs_mac_event_disconnected(priv, true);
lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
return ret;
diff --git a/drivers/net/wireless/libertas/cfg.h b/drivers/net/wireless/libertas/cfg.h
index 10995f5..acccc29 100644
--- a/drivers/net/wireless/libertas/cfg.h
+++ b/drivers/net/wireless/libertas/cfg.h
@@ -10,7 +10,8 @@ struct wireless_dev *lbs_cfg_alloc(struct device *dev);
int lbs_cfg_register(struct lbs_private *priv);
void lbs_cfg_free(struct lbs_private *priv);
-void lbs_send_disconnect_notification(struct lbs_private *priv);
+void lbs_send_disconnect_notification(struct lbs_private *priv,
+ bool locally_generated);
void lbs_send_mic_failureevent(struct lbs_private *priv, u32 event);
void lbs_scan_done(struct lbs_private *priv);
diff --git a/drivers/net/wireless/libertas/cmd.h b/drivers/net/wireless/libertas/cmd.h
index 4279e8a..0c5444b 100644
--- a/drivers/net/wireless/libertas/cmd.h
+++ b/drivers/net/wireless/libertas/cmd.h
@@ -68,7 +68,8 @@ int lbs_process_command_response(struct lbs_private *priv, u8 *data, u32 len);
/* From cmdresp.c */
-void lbs_mac_event_disconnected(struct lbs_private *priv);
+void lbs_mac_event_disconnected(struct lbs_private *priv,
+ bool locally_generated);
diff --git a/drivers/net/wireless/libertas/cmdresp.c b/drivers/net/wireless/libertas/cmdresp.c
index 65f18f1..e5442e8 100644
--- a/drivers/net/wireless/libertas/cmdresp.c
+++ b/drivers/net/wireless/libertas/cmdresp.c
@@ -19,10 +19,13 @@
* reset link state etc.
*
* @priv: A pointer to struct lbs_private structure
+ * @locally_generated: indicates disconnect was requested locally
+ * (usually by userspace)
*
* returns: n/a
*/
-void lbs_mac_event_disconnected(struct lbs_private *priv)
+void lbs_mac_event_disconnected(struct lbs_private *priv,
+ bool locally_generated)
{
if (priv->connect_status != LBS_CONNECTED)
return;
@@ -36,7 +39,7 @@ void lbs_mac_event_disconnected(struct lbs_private *priv)
msleep_interruptible(1000);
if (priv->wdev->iftype == NL80211_IFTYPE_STATION)
- lbs_send_disconnect_notification(priv);
+ lbs_send_disconnect_notification(priv, locally_generated);
/* report disconnect to upper layer */
netif_stop_queue(priv->dev);
@@ -229,17 +232,17 @@ int lbs_process_event(struct lbs_private *priv, u32 event)
case MACREG_INT_CODE_DEAUTHENTICATED:
lbs_deb_cmd("EVENT: deauthenticated\n");
- lbs_mac_event_disconnected(priv);
+ lbs_mac_event_disconnected(priv, false);
break;
case MACREG_INT_CODE_DISASSOCIATED:
lbs_deb_cmd("EVENT: disassociated\n");
- lbs_mac_event_disconnected(priv);
+ lbs_mac_event_disconnected(priv, false);
break;
case MACREG_INT_CODE_LINK_LOST_NO_SCAN:
lbs_deb_cmd("EVENT: link lost\n");
- lbs_mac_event_disconnected(priv);
+ lbs_mac_event_disconnected(priv, true);
break;
case MACREG_INT_CODE_PS_SLEEP:
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index 6208ef1..cce8e39 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -1449,7 +1449,7 @@ int mwifiex_deauthenticate(struct mwifiex_private *priv, u8 *mac)
ret = mwifiex_deauthenticate_infra(priv, mac);
if (ret)
cfg80211_disconnected(priv->netdev, 0, NULL, 0,
- GFP_KERNEL);
+ true, GFP_KERNEL);
break;
case NL80211_IFTYPE_ADHOC:
return mwifiex_send_cmd(priv, HostCmd_CMD_802_11_AD_HOC_STOP,
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index bed67d4..9520378 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -136,7 +136,7 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code)
if (priv->bss_mode == NL80211_IFTYPE_STATION ||
priv->bss_mode == NL80211_IFTYPE_P2P_CLIENT) {
cfg80211_disconnected(priv->netdev, reason_code, NULL, 0,
- GFP_KERNEL);
+ false, GFP_KERNEL);
}
eth_zero_addr(priv->cfg_bssid);
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 96175a7..71a825c 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -2861,7 +2861,7 @@ static void rndis_wlan_do_link_down_work(struct usbnet *usbdev)
deauthenticate(usbdev);
- cfg80211_disconnected(usbdev->net, 0, NULL, 0, GFP_KERNEL);
+ cfg80211_disconnected(usbdev->net, 0, NULL, 0, true, GFP_KERNEL);
}
netif_carrier_off(usbdev->net);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 4de46aa..f1b2c17 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -52,7 +52,7 @@
* event channels are limited resource. Split event channels are
* enabled by default.
*/
-bool separate_tx_rx_irq = 1;
+bool separate_tx_rx_irq = true;
module_param(separate_tx_rx_irq, bool, 0644);
/* The time that packets can stay on the guest Rx internal queue
@@ -1250,7 +1250,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
netdev_err(queue->vif->dev,
"txreq.offset: %x, size: %u, end: %lu\n",
txreq.offset, txreq.size,
- (txreq.offset&~PAGE_MASK) + txreq.size);
+ (unsigned long)(txreq.offset&~PAGE_MASK) + txreq.size);
xenvif_fatal_tx_err(queue->vif);
break;
}
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 3d8dbf5..968787a 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -34,6 +34,8 @@ struct backend_info {
enum xenbus_state frontend_state;
struct xenbus_watch hotplug_status_watch;
u8 have_hotplug_status_watch:1;
+
+ const char *hotplug_script;
};
static int connect_rings(struct backend_info *be, struct xenvif_queue *queue);
@@ -238,6 +240,7 @@ static int netback_remove(struct xenbus_device *dev)
xenvif_free(be->vif);
be->vif = NULL;
}
+ kfree(be->hotplug_script);
kfree(be);
dev_set_drvdata(&dev->dev, NULL);
return 0;
@@ -255,6 +258,7 @@ static int netback_probe(struct xenbus_device *dev,
struct xenbus_transaction xbt;
int err;
int sg;
+ const char *script;
struct backend_info *be = kzalloc(sizeof(struct backend_info),
GFP_KERNEL);
if (!be) {
@@ -347,6 +351,15 @@ static int netback_probe(struct xenbus_device *dev,
if (err)
pr_debug("Error writing multi-queue-max-queues\n");
+ script = xenbus_read(XBT_NIL, dev->nodename, "script", NULL);
+ if (IS_ERR(script)) {
+ err = PTR_ERR(script);
+ xenbus_dev_fatal(dev, err, "reading script");
+ goto fail;
+ }
+
+ be->hotplug_script = script;
+
err = xenbus_switch_state(dev, XenbusStateInitWait);
if (err)
goto fail;
@@ -379,22 +392,14 @@ static int netback_uevent(struct xenbus_device *xdev,
struct kobj_uevent_env *env)
{
struct backend_info *be = dev_get_drvdata(&xdev->dev);
- char *val;
- val = xenbus_read(XBT_NIL, xdev->nodename, "script", NULL);
- if (IS_ERR(val)) {
- int err = PTR_ERR(val);
- xenbus_dev_fatal(xdev, err, "reading script");
- return err;
- } else {
- if (add_uevent_var(env, "script=%s", val)) {
- kfree(val);
- return -ENOMEM;
- }
- kfree(val);
- }
+ if (!be)
+ return 0;
- if (!be || !be->vif)
+ if (add_uevent_var(env, "script=%s", be->hotplug_script))
+ return -ENOMEM;
+
+ if (!be->vif)
return 0;
return add_uevent_var(env, "vif=%s", be->vif->dev->name);
@@ -793,6 +798,7 @@ static void connect(struct backend_info *be)
goto err;
}
+ queue->credit_bytes = credit_bytes;
queue->remaining_credit = credit_bytes;
queue->credit_usec = credit_usec;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 3f45afd4..c89ca26 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1560,9 +1560,8 @@ static int xennet_init_queue(struct netfront_queue *queue)
spin_lock_init(&queue->tx_lock);
spin_lock_init(&queue->rx_lock);
- init_timer(&queue->rx_refill_timer);
- queue->rx_refill_timer.data = (unsigned long)queue;
- queue->rx_refill_timer.function = rx_refill_timeout;
+ setup_timer(&queue->rx_refill_timer, rx_refill_timeout,
+ (unsigned long)queue);
snprintf(queue->name, sizeof(queue->name), "%s-q%u",
queue->info->netdev->name, queue->id);
@@ -1698,6 +1697,7 @@ static void xennet_destroy_queues(struct netfront_info *info)
if (netif_running(info->netdev))
napi_disable(&queue->napi);
+ del_timer_sync(&queue->rx_refill_timer);
netif_napi_del(&queue->napi);
}
@@ -2102,9 +2102,6 @@ static const struct attribute_group xennet_dev_group = {
static int xennet_remove(struct xenbus_device *dev)
{
struct netfront_info *info = dev_get_drvdata(&dev->dev);
- unsigned int num_queues = info->netdev->real_num_tx_queues;
- struct netfront_queue *queue = NULL;
- unsigned int i = 0;
dev_dbg(&dev->dev, "%s\n", dev->nodename);
@@ -2112,16 +2109,7 @@ static int xennet_remove(struct xenbus_device *dev)
unregister_netdev(info->netdev);
- for (i = 0; i < num_queues; ++i) {
- queue = &info->queues[i];
- del_timer_sync(&queue->rx_refill_timer);
- }
-
- if (num_queues) {
- kfree(info->queues);
- info->queues = NULL;
- }
-
+ xennet_destroy_queues(info);
xennet_free_netdev(info->netdev);
return 0;
OpenPOWER on IntegriCloud