summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-06-12 14:27:40 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-12 14:27:40 -0700
commitf9da455b93f6ba076935b4ef4589f61e529ae046 (patch)
tree3c4e69ce1ba1d6bf65915b97a76ca2172105b278 /drivers/net/ethernet
parent0e04c641b199435f3779454055f6a7de258ecdfc (diff)
parente5eca6d41f53db48edd8cf88a3f59d2c30227f8e (diff)
downloadop-kernel-dev-f9da455b93f6ba076935b4ef4589f61e529ae046.zip
op-kernel-dev-f9da455b93f6ba076935b4ef4589f61e529ae046.tar.gz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller: 1) Seccomp BPF filters can now be JIT'd, from Alexei Starovoitov. 2) Multiqueue support in xen-netback and xen-netfront, from Andrew J Benniston. 3) Allow tweaking of aggregation settings in cdc_ncm driver, from Bjørn Mork. 4) BPF now has a "random" opcode, from Chema Gonzalez. 5) Add more BPF documentation and improve test framework, from Daniel Borkmann. 6) Support TCP fastopen over ipv6, from Daniel Lee. 7) Add software TSO helper functions and use them to support software TSO in mvneta and mv643xx_eth drivers. From Ezequiel Garcia. 8) Support software TSO in fec driver too, from Nimrod Andy. 9) Add Broadcom SYSTEMPORT driver, from Florian Fainelli. 10) Handle broadcasts more gracefully over macvlan when there are large numbers of interfaces configured, from Herbert Xu. 11) Allow more control over fwmark used for non-socket based responses, from Lorenzo Colitti. 12) Do TCP congestion window limiting based upon measurements, from Neal Cardwell. 13) Support busy polling in SCTP, from Neal Horman. 14) Allow RSS key to be configured via ethtool, from Venkata Duvvuru. 15) Bridge promisc mode handling improvements from Vlad Yasevich. 16) Don't use inetpeer entries to implement ID generation any more, it performs poorly, from Eric Dumazet. * git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1522 commits) rtnetlink: fix userspace API breakage for iproute2 < v3.9.0 tcp: fixing TLP's FIN recovery net: fec: Add software TSO support net: fec: Add Scatter/gather support net: fec: Increase buffer descriptor entry number net: fec: Factorize feature setting net: fec: Enable IP header hardware checksum net: fec: Factorize the .xmit transmit function bridge: fix compile error when compiling without IPv6 support bridge: fix smatch warning / potential null pointer dereference via-rhine: fix full-duplex with autoneg disable bnx2x: Enlarge the dorq threshold for VFs bnx2x: Check for UNDI in uncommon branch bnx2x: Fix 1G-baseT link bnx2x: Fix link for KR with swapped polarity lane sctp: Fix sk_ack_backlog wrap-around problem net/core: Add VF link state control policy net/fsl: xgmac_mdio is dependent on OF_MDIO net/fsl: Make xgmac_mdio read error message useful net_sched: drr: warn when qdisc is not work conserving ...
Diffstat (limited to 'drivers/net/ethernet')
-rw-r--r--drivers/net/ethernet/3com/3c509.c2
-rw-r--r--drivers/net/ethernet/3com/3c589_cs.c2
-rw-r--r--drivers/net/ethernet/3com/typhoon.c2
-rw-r--r--drivers/net/ethernet/8390/ax88796.c4
-rw-r--r--drivers/net/ethernet/Kconfig1
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/adaptec/starfire.c2
-rw-r--r--drivers/net/ethernet/alteon/acenic.c2
-rw-r--r--drivers/net/ethernet/altera/altera_sgdma.c1
-rw-r--r--drivers/net/ethernet/altera/altera_tse_ethtool.c2
-rw-r--r--drivers/net/ethernet/amd/Kconfig14
-rw-r--r--drivers/net/ethernet/amd/Makefile1
-rw-r--r--drivers/net/ethernet/amd/amd8111e.c2
-rw-r--r--drivers/net/ethernet/amd/ariadne.c3
-rw-r--r--drivers/net/ethernet/amd/au1000_eth.c2
-rw-r--r--drivers/net/ethernet/amd/hplance.c4
-rw-r--r--drivers/net/ethernet/amd/mvme147.c6
-rw-r--r--drivers/net/ethernet/amd/nmclan_cs.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/Makefile6
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h1007
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c375
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-desc.c556
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c2182
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c1351
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c510
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-main.c512
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-mdio.c433
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h676
-rw-r--r--drivers/net/ethernet/arc/emac_main.c49
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c6
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c6
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c4
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl2.c6
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig11
-rw-r--r--drivers/net/ethernet/broadcom/Makefile1
-rw-r--r--drivers/net/ethernet/broadcom/b44.c2
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c7
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c1654
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h678
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c23
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c36
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c49
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c15
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h4
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c29
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c21
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c104
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h2
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c6
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/cxgb2.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c6
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h14
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c263
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c11
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_msg.h10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c7
-rw-r--r--drivers/net/ethernet/cisco/enic/enic.h32
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_dev.c4
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_dev.h4
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_ethtool.c67
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c323
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_cq.h9
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.c4
-rw-r--r--drivers/net/ethernet/cisco/enic/vnic_dev.h4
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c66
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c2
-rw-r--r--drivers/net/ethernet/dec/tulip/uli526x.c4
-rw-r--r--drivers/net/ethernet/dlink/dl2k.c6
-rw-r--r--drivers/net/ethernet/dlink/sundance.c2
-rw-r--r--drivers/net/ethernet/ec_bhf.c20
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h15
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c610
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h85
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c194
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h12
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c581
-rw-r--r--drivers/net/ethernet/ethoc.c6
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c2
-rw-r--r--drivers/net/ethernet/faraday/ftmac100.c2
-rw-r--r--drivers/net/ethernet/freescale/Kconfig1
-rw-r--r--drivers/net/ethernet/freescale/fec.h13
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c661
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c17
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c22
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c14
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth_ethtool.c2
-rw-r--r--drivers/net/ethernet/freescale/xgmac_mdio.c4
-rw-r--r--drivers/net/ethernet/fujitsu/fmvj18x_cs.c2
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig27
-rw-r--r--drivers/net/ethernet/hisilicon/Makefile5
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c1066
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_ethtool.c6
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c5
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_qmr.c4
-rw-r--r--drivers/net/ethernet/ibm/emac/core.c2
-rw-r--r--drivers/net/ethernet/icplus/ipg.c2
-rw-r--r--drivers/net/ethernet/intel/e100.c2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_ethtool.c9
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_hw.c4
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c5
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c1
-rw-r--r--drivers/net/ethernet/intel/e1000e/82571.c1
-rw-r--r--drivers/net/ethernet/intel/e1000e/e1000.h41
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c17
-rw-r--r--drivers/net/ethernet/intel/e1000e/hw.h3
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c62
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c9
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.h3
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c88
-rw-r--r--drivers/net/ethernet/intel/e1000e/nvm.c1
-rw-r--r--drivers/net/ethernet/intel/e1000e/param.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h35
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c60
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h137
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_common.c141
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c6
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c21
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_diag.c50
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ethtool.c311
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.c7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_hmc.h7
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c12
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h1
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c1124
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_prototype.h12
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c37
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_register.h12
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c131
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h18
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_type.h53
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl.h4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c209
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h6
-rw-r--r--drivers/net/ethernet/intel/i40evf/Makefile5
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.c61
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq.h6
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h164
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_alloc.h5
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_common.c13
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_hmc.h12
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h6
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_osdep.h5
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_prototype.h5
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_register.h17
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_status.h5
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c84
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.h23
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h67
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h9
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h7
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c423
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c185
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c62
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c121
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.h72
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h100
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_hw.h96
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c134
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.h47
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c56
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.h47
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.c47
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.h47
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.c48
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.h49
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c66
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.h48
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h58
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h49
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c151
-rw-r--r--drivers/net/ethernet/intel/igb/igb_hwmon.c47
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c203
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c60
-rw-r--r--drivers/net/ethernet/intel/igbvf/ethtool.c7
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h21
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c89
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c85
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c80
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h31
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h24
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c28
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c96
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c356
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c10
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c68
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h32
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c164
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c66
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h62
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c15
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c2
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c376
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c18
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c324
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c2
-rw-r--r--drivers/net/ethernet/marvell/sky2.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c109
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c57
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c28
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c147
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c278
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h41
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/profile.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/qp.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/reset.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c124
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h30
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/qp.c4
-rw-r--r--drivers/net/ethernet/micrel/ks8695net.c6
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c87
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c5
-rw-r--r--drivers/net/ethernet/microchip/enc28j60.c2
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c2
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c2
-rw-r--r--drivers/net/ethernet/natsemi/ns83820.c2
-rw-r--r--drivers/net/ethernet/neterion/s2io.c15
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-config.c22
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-ethtool.c6
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c4
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c16
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c8
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/Kconfig2
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c4
-rw-r--r--drivers/net/ethernet/packetengines/hamachi.c6
-rw-r--r--drivers/net/ethernet/packetengines/yellowfin.c2
-rw-r--r--drivers/net/ethernet/qlogic/Kconfig11
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h36
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c44
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c14
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c13
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c9
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c42
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c31
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c92
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c69
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c192
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c171
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c68
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c4
-rw-r--r--drivers/net/ethernet/realtek/r8169.c2
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c47
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h2
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c2
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c50
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h1
-rw-r--r--drivers/net/ethernet/sfc/efx.c2
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c10
-rw-r--r--drivers/net/ethernet/sfc/io.h7
-rw-r--r--drivers/net/ethernet/sfc/siena_sriov.c3
-rw-r--r--drivers/net/ethernet/sfc/tx.c22
-rw-r--r--drivers/net/ethernet/sis/sis190.c2
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c2
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c10
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c26
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c17
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c2
-rw-r--r--drivers/net/ethernet/ti/cpmac.c14
-rw-r--r--drivers/net/ethernet/ti/cpsw-phy-sel.c62
-rw-r--r--drivers/net/ethernet/ti/cpsw.c114
-rw-r--r--drivers/net/ethernet/ti/cpts.c11
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c39
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c9
-rw-r--r--drivers/net/ethernet/ti/davinci_mdio.c50
-rw-r--r--drivers/net/ethernet/tile/tilegx.c13
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c2
-rw-r--r--drivers/net/ethernet/via/Kconfig2
-rw-r--r--drivers/net/ethernet/via/via-rhine.c511
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c2
326 files changed, 19853 insertions, 5292 deletions
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c
index 35df0b9..a968654 100644
--- a/drivers/net/ethernet/3com/3c509.c
+++ b/drivers/net/ethernet/3com/3c509.c
@@ -534,7 +534,7 @@ static int el3_common_init(struct net_device *dev)
/* The EL3-specific entries in the device structure. */
dev->netdev_ops = &netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
- SET_ETHTOOL_OPS(dev, &ethtool_ops);
+ dev->ethtool_ops = &ethtool_ops;
err = register_netdev(dev);
if (err) {
diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c
index 063557e..f18647c 100644
--- a/drivers/net/ethernet/3com/3c589_cs.c
+++ b/drivers/net/ethernet/3com/3c589_cs.c
@@ -218,7 +218,7 @@ static int tc589_probe(struct pcmcia_device *link)
dev->netdev_ops = &el3_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
- SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
+ dev->ethtool_ops = &netdev_ethtool_ops;
return tc589_config(link);
}
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index 465cc71..e13b046 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -2435,7 +2435,7 @@ typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netif_napi_add(dev, &tp->napi, typhoon_poll, 16);
dev->watchdog_timeo = TX_TIMEOUT;
- SET_ETHTOOL_OPS(dev, &typhoon_ethtool_ops);
+ dev->ethtool_ops = &typhoon_ethtool_ops;
/* We can handle scatter gather, up to 16 entries, and
* we can do IP checksumming (only version 4, doh...)
diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
index 455d4c3..1d162cc 100644
--- a/drivers/net/ethernet/8390/ax88796.c
+++ b/drivers/net/ethernet/8390/ax88796.c
@@ -157,7 +157,7 @@ static void ax_reset_8390(struct net_device *dev)
/* This check _should_not_ be necessary, omit eventually. */
while ((ei_inb(addr + EN0_ISR) & ENISR_RESET) == 0) {
- if (jiffies - reset_start_time > 2 * HZ / 100) {
+ if (time_after(jiffies, reset_start_time + 2 * HZ / 100)) {
netdev_warn(dev, "%s: did not complete.\n", __func__);
break;
}
@@ -293,7 +293,7 @@ static void ax_block_output(struct net_device *dev, int count,
dma_start = jiffies;
while ((ei_inb(nic_base + EN0_ISR) & ENISR_RDC) == 0) {
- if (jiffies - dma_start > 2 * HZ / 100) { /* 20ms */
+ if (time_after(jiffies, dma_start + 2 * HZ / 100)) { /* 20ms */
netdev_warn(dev, "timeout waiting for Tx RDC.\n");
ax_reset_8390(dev);
ax_NS8390_init(dev, 1);
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 0513494..edb7186 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -68,6 +68,7 @@ source "drivers/net/ethernet/neterion/Kconfig"
source "drivers/net/ethernet/faraday/Kconfig"
source "drivers/net/ethernet/freescale/Kconfig"
source "drivers/net/ethernet/fujitsu/Kconfig"
+source "drivers/net/ethernet/hisilicon/Kconfig"
source "drivers/net/ethernet/hp/Kconfig"
source "drivers/net/ethernet/ibm/Kconfig"
source "drivers/net/ethernet/intel/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 35190e3..58de333 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_NET_VENDOR_EXAR) += neterion/
obj-$(CONFIG_NET_VENDOR_FARADAY) += faraday/
obj-$(CONFIG_NET_VENDOR_FREESCALE) += freescale/
obj-$(CONFIG_NET_VENDOR_FUJITSU) += fujitsu/
+obj-$(CONFIG_NET_VENDOR_HISILICON) += hisilicon/
obj-$(CONFIG_NET_VENDOR_HP) += hp/
obj-$(CONFIG_NET_VENDOR_IBM) += ibm/
obj-$(CONFIG_NET_VENDOR_INTEL) += intel/
diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c
index 171d73c..40dbbf7 100644
--- a/drivers/net/ethernet/adaptec/starfire.c
+++ b/drivers/net/ethernet/adaptec/starfire.c
@@ -784,7 +784,7 @@ static int starfire_init_one(struct pci_dev *pdev,
dev->netdev_ops = &netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
- SET_ETHTOOL_OPS(dev, &ethtool_ops);
+ dev->ethtool_ops = &ethtool_ops;
netif_napi_add(dev, &np->napi, netdev_poll, max_interrupt_work);
diff --git a/drivers/net/ethernet/alteon/acenic.c b/drivers/net/ethernet/alteon/acenic.c
index 1517e9df..9a6991b 100644
--- a/drivers/net/ethernet/alteon/acenic.c
+++ b/drivers/net/ethernet/alteon/acenic.c
@@ -476,7 +476,7 @@ static int acenic_probe_one(struct pci_dev *pdev,
dev->watchdog_timeo = 5*HZ;
dev->netdev_ops = &ace_netdev_ops;
- SET_ETHTOOL_OPS(dev, &ace_ethtool_ops);
+ dev->ethtool_ops = &ace_ethtool_ops;
/* we only display this string ONCE */
if (!boards_found)
diff --git a/drivers/net/ethernet/altera/altera_sgdma.c b/drivers/net/ethernet/altera/altera_sgdma.c
index 99cc56f..580553d 100644
--- a/drivers/net/ethernet/altera/altera_sgdma.c
+++ b/drivers/net/ethernet/altera/altera_sgdma.c
@@ -353,7 +353,6 @@ static int sgdma_async_read(struct altera_tse_private *priv)
struct sgdma_descrip __iomem *cdesc = &descbase[0];
struct sgdma_descrip __iomem *ndesc = &descbase[1];
-
struct tse_buffer *rxbuffer = NULL;
if (!sgdma_rxbusy(priv)) {
diff --git a/drivers/net/ethernet/altera/altera_tse_ethtool.c b/drivers/net/ethernet/altera/altera_tse_ethtool.c
index 54c25ef..be72e1e 100644
--- a/drivers/net/ethernet/altera/altera_tse_ethtool.c
+++ b/drivers/net/ethernet/altera/altera_tse_ethtool.c
@@ -271,5 +271,5 @@ static const struct ethtool_ops tse_ethtool_ops = {
void altera_tse_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &tse_ethtool_ops);
+ netdev->ethtool_ops = &tse_ethtool_ops;
}
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index 562df46..bbaf36d 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -7,7 +7,7 @@ config NET_VENDOR_AMD
default y
depends on DIO || MACH_DECSTATION || MVME147 || ATARI || SUN3 || \
SUN3X || SBUS || PCI || ZORRO || (ISA && ISA_DMA_API) || \
- (ARM && ARCH_EBSA110) || ISA || EISA || PCMCIA
+ (ARM && ARCH_EBSA110) || ISA || EISA || PCMCIA || ARM64
---help---
If you have a network (Ethernet) chipset belonging to this class,
say Y.
@@ -177,4 +177,16 @@ config SUNLANCE
To compile this driver as a module, choose M here: the module
will be called sunlance.
+config AMD_XGBE
+ tristate "AMD 10GbE Ethernet driver"
+ depends on OF_NET
+ select PHYLIB
+ select AMD_XGBE_PHY
+ ---help---
+ This driver supports the AMD 10GbE Ethernet device found on an
+ AMD SoC.
+
+ To compile this driver as a module, choose M here: the module
+ will be called amd-xgbe.
+
endif # NET_VENDOR_AMD
diff --git a/drivers/net/ethernet/amd/Makefile b/drivers/net/ethernet/amd/Makefile
index cdd4301..a38a2dc 100644
--- a/drivers/net/ethernet/amd/Makefile
+++ b/drivers/net/ethernet/amd/Makefile
@@ -17,3 +17,4 @@ obj-$(CONFIG_NI65) += ni65.o
obj-$(CONFIG_PCNET32) += pcnet32.o
obj-$(CONFIG_SUN3LANCE) += sun3lance.o
obj-$(CONFIG_SUNLANCE) += sunlance.o
+obj-$(CONFIG_AMD_XGBE) += xgbe/
diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c
index 26efaaa..068dc7c 100644
--- a/drivers/net/ethernet/amd/amd8111e.c
+++ b/drivers/net/ethernet/amd/amd8111e.c
@@ -1900,7 +1900,7 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
/* Initialize driver entry points */
dev->netdev_ops = &amd8111e_netdev_ops;
- SET_ETHTOOL_OPS(dev, &ops);
+ dev->ethtool_ops = &ops;
dev->irq =pdev->irq;
dev->watchdog_timeo = AMD8111E_TX_TIMEOUT;
netif_napi_add(dev, &lp->napi, amd8111e_rx_poll, 32);
diff --git a/drivers/net/ethernet/amd/ariadne.c b/drivers/net/ethernet/amd/ariadne.c
index b08101b..968b7bf 100644
--- a/drivers/net/ethernet/amd/ariadne.c
+++ b/drivers/net/ethernet/amd/ariadne.c
@@ -718,7 +718,6 @@ static int ariadne_init_one(struct zorro_dev *z,
unsigned long mem_start = board + ARIADNE_RAM;
struct resource *r1, *r2;
struct net_device *dev;
- struct ariadne_private *priv;
u32 serial;
int err;
@@ -738,8 +737,6 @@ static int ariadne_init_one(struct zorro_dev *z,
return -ENOMEM;
}
- priv = netdev_priv(dev);
-
r1->name = dev->name;
r2->name = dev->name;
diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c
index a2bd91e..a78e4c1 100644
--- a/drivers/net/ethernet/amd/au1000_eth.c
+++ b/drivers/net/ethernet/amd/au1000_eth.c
@@ -1229,7 +1229,7 @@ static int au1000_probe(struct platform_device *pdev)
dev->base_addr = base->start;
dev->irq = irq;
dev->netdev_ops = &au1000_netdev_ops;
- SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops);
+ dev->ethtool_ops = &au1000_ethtool_ops;
dev->watchdog_timeo = ETH_TX_TIMEOUT;
/*
diff --git a/drivers/net/ethernet/amd/hplance.c b/drivers/net/ethernet/amd/hplance.c
index 47ce57c..6c9de11 100644
--- a/drivers/net/ethernet/amd/hplance.c
+++ b/drivers/net/ethernet/amd/hplance.c
@@ -27,9 +27,9 @@
#include "hplance.h"
-/* We have 16834 bytes of RAM for the init block and buffers. This places
+/* We have 16392 bytes of RAM for the init block and buffers. This places
* an upper limit on the number of buffers we can use. NetBSD uses 8 Rx
- * buffers and 2 Tx buffers.
+ * buffers and 2 Tx buffers, it takes (8 + 2) * 1544 bytes.
*/
#define LANCE_LOG_TX_BUFFERS 1
#define LANCE_LOG_RX_BUFFERS 3
diff --git a/drivers/net/ethernet/amd/mvme147.c b/drivers/net/ethernet/amd/mvme147.c
index 0e8399d..0660ac5 100644
--- a/drivers/net/ethernet/amd/mvme147.c
+++ b/drivers/net/ethernet/amd/mvme147.c
@@ -26,9 +26,9 @@
#include <asm/pgtable.h>
#include <asm/mvme147hw.h>
-/* We have 16834 bytes of RAM for the init block and buffers. This places
+/* We have 32K of RAM for the init block and buffers. This places
* an upper limit on the number of buffers we can use. NetBSD uses 8 Rx
- * buffers and 2 Tx buffers.
+ * buffers and 2 Tx buffers, it takes (8 + 2) * 1544 bytes.
*/
#define LANCE_LOG_TX_BUFFERS 1
#define LANCE_LOG_RX_BUFFERS 3
@@ -111,7 +111,7 @@ struct net_device * __init mvme147lance_probe(int unit)
dev->dev_addr);
lp = netdev_priv(dev);
- lp->ram = __get_dma_pages(GFP_ATOMIC, 3); /* 16K */
+ lp->ram = __get_dma_pages(GFP_ATOMIC, 3); /* 32K */
if (!lp->ram) {
printk("%s: No memory for LANCE buffers\n", dev->name);
free_netdev(dev);
diff --git a/drivers/net/ethernet/amd/nmclan_cs.c b/drivers/net/ethernet/amd/nmclan_cs.c
index 08569fe..abf3b15 100644
--- a/drivers/net/ethernet/amd/nmclan_cs.c
+++ b/drivers/net/ethernet/amd/nmclan_cs.c
@@ -457,7 +457,7 @@ static int nmclan_probe(struct pcmcia_device *link)
lp->tx_free_frames=AM2150_MAX_TX_FRAMES;
dev->netdev_ops = &mace_netdev_ops;
- SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
+ dev->ethtool_ops = &netdev_ethtool_ops;
dev->watchdog_timeo = TX_TIMEOUT;
return nmclan_config(link);
diff --git a/drivers/net/ethernet/amd/xgbe/Makefile b/drivers/net/ethernet/amd/xgbe/Makefile
new file mode 100644
index 0000000..26cf9af
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/Makefile
@@ -0,0 +1,6 @@
+obj-$(CONFIG_AMD_XGBE) += amd-xgbe.o
+
+amd-xgbe-objs := xgbe-main.o xgbe-drv.o xgbe-dev.o \
+ xgbe-desc.o xgbe-ethtool.o xgbe-mdio.o
+
+amd-xgbe-$(CONFIG_DEBUG_FS) += xgbe-debugfs.o
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
new file mode 100644
index 0000000..bf462ee8
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -0,0 +1,1007 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __XGBE_COMMON_H__
+#define __XGBE_COMMON_H__
+
+/* DMA register offsets */
+#define DMA_MR 0x3000
+#define DMA_SBMR 0x3004
+#define DMA_ISR 0x3008
+#define DMA_AXIARCR 0x3010
+#define DMA_AXIAWCR 0x3018
+#define DMA_DSR0 0x3020
+#define DMA_DSR1 0x3024
+#define DMA_DSR2 0x3028
+#define DMA_DSR3 0x302c
+#define DMA_DSR4 0x3030
+
+/* DMA register entry bit positions and sizes */
+#define DMA_AXIARCR_DRC_INDEX 0
+#define DMA_AXIARCR_DRC_WIDTH 4
+#define DMA_AXIARCR_DRD_INDEX 4
+#define DMA_AXIARCR_DRD_WIDTH 2
+#define DMA_AXIARCR_TEC_INDEX 8
+#define DMA_AXIARCR_TEC_WIDTH 4
+#define DMA_AXIARCR_TED_INDEX 12
+#define DMA_AXIARCR_TED_WIDTH 2
+#define DMA_AXIARCR_THC_INDEX 16
+#define DMA_AXIARCR_THC_WIDTH 4
+#define DMA_AXIARCR_THD_INDEX 20
+#define DMA_AXIARCR_THD_WIDTH 2
+#define DMA_AXIAWCR_DWC_INDEX 0
+#define DMA_AXIAWCR_DWC_WIDTH 4
+#define DMA_AXIAWCR_DWD_INDEX 4
+#define DMA_AXIAWCR_DWD_WIDTH 2
+#define DMA_AXIAWCR_RPC_INDEX 8
+#define DMA_AXIAWCR_RPC_WIDTH 4
+#define DMA_AXIAWCR_RPD_INDEX 12
+#define DMA_AXIAWCR_RPD_WIDTH 2
+#define DMA_AXIAWCR_RHC_INDEX 16
+#define DMA_AXIAWCR_RHC_WIDTH 4
+#define DMA_AXIAWCR_RHD_INDEX 20
+#define DMA_AXIAWCR_RHD_WIDTH 2
+#define DMA_AXIAWCR_TDC_INDEX 24
+#define DMA_AXIAWCR_TDC_WIDTH 4
+#define DMA_AXIAWCR_TDD_INDEX 28
+#define DMA_AXIAWCR_TDD_WIDTH 2
+#define DMA_DSR0_RPS_INDEX 8
+#define DMA_DSR0_RPS_WIDTH 4
+#define DMA_DSR0_TPS_INDEX 12
+#define DMA_DSR0_TPS_WIDTH 4
+#define DMA_ISR_MACIS_INDEX 17
+#define DMA_ISR_MACIS_WIDTH 1
+#define DMA_ISR_MTLIS_INDEX 16
+#define DMA_ISR_MTLIS_WIDTH 1
+#define DMA_MR_SWR_INDEX 0
+#define DMA_MR_SWR_WIDTH 1
+#define DMA_SBMR_EAME_INDEX 11
+#define DMA_SBMR_EAME_WIDTH 1
+#define DMA_SBMR_UNDEF_INDEX 0
+#define DMA_SBMR_UNDEF_WIDTH 1
+
+/* DMA channel register offsets
+ * Multiple channels can be active. The first channel has registers
+ * that begin at 0x3100. Each subsequent channel has registers that
+ * are accessed using an offset of 0x80 from the previous channel.
+ */
+#define DMA_CH_BASE 0x3100
+#define DMA_CH_INC 0x80
+
+#define DMA_CH_CR 0x00
+#define DMA_CH_TCR 0x04
+#define DMA_CH_RCR 0x08
+#define DMA_CH_TDLR_HI 0x10
+#define DMA_CH_TDLR_LO 0x14
+#define DMA_CH_RDLR_HI 0x18
+#define DMA_CH_RDLR_LO 0x1c
+#define DMA_CH_TDTR_LO 0x24
+#define DMA_CH_RDTR_LO 0x2c
+#define DMA_CH_TDRLR 0x30
+#define DMA_CH_RDRLR 0x34
+#define DMA_CH_IER 0x38
+#define DMA_CH_RIWT 0x3c
+#define DMA_CH_CATDR_LO 0x44
+#define DMA_CH_CARDR_LO 0x4c
+#define DMA_CH_CATBR_HI 0x50
+#define DMA_CH_CATBR_LO 0x54
+#define DMA_CH_CARBR_HI 0x58
+#define DMA_CH_CARBR_LO 0x5c
+#define DMA_CH_SR 0x60
+
+/* DMA channel register entry bit positions and sizes */
+#define DMA_CH_CR_PBLX8_INDEX 16
+#define DMA_CH_CR_PBLX8_WIDTH 1
+#define DMA_CH_IER_AIE_INDEX 15
+#define DMA_CH_IER_AIE_WIDTH 1
+#define DMA_CH_IER_FBEE_INDEX 12
+#define DMA_CH_IER_FBEE_WIDTH 1
+#define DMA_CH_IER_NIE_INDEX 16
+#define DMA_CH_IER_NIE_WIDTH 1
+#define DMA_CH_IER_RBUE_INDEX 7
+#define DMA_CH_IER_RBUE_WIDTH 1
+#define DMA_CH_IER_RIE_INDEX 6
+#define DMA_CH_IER_RIE_WIDTH 1
+#define DMA_CH_IER_RSE_INDEX 8
+#define DMA_CH_IER_RSE_WIDTH 1
+#define DMA_CH_IER_TBUE_INDEX 2
+#define DMA_CH_IER_TBUE_WIDTH 1
+#define DMA_CH_IER_TIE_INDEX 0
+#define DMA_CH_IER_TIE_WIDTH 1
+#define DMA_CH_IER_TXSE_INDEX 1
+#define DMA_CH_IER_TXSE_WIDTH 1
+#define DMA_CH_RCR_PBL_INDEX 16
+#define DMA_CH_RCR_PBL_WIDTH 6
+#define DMA_CH_RCR_RBSZ_INDEX 1
+#define DMA_CH_RCR_RBSZ_WIDTH 14
+#define DMA_CH_RCR_SR_INDEX 0
+#define DMA_CH_RCR_SR_WIDTH 1
+#define DMA_CH_RIWT_RWT_INDEX 0
+#define DMA_CH_RIWT_RWT_WIDTH 8
+#define DMA_CH_SR_FBE_INDEX 12
+#define DMA_CH_SR_FBE_WIDTH 1
+#define DMA_CH_SR_RBU_INDEX 7
+#define DMA_CH_SR_RBU_WIDTH 1
+#define DMA_CH_SR_RI_INDEX 6
+#define DMA_CH_SR_RI_WIDTH 1
+#define DMA_CH_SR_RPS_INDEX 8
+#define DMA_CH_SR_RPS_WIDTH 1
+#define DMA_CH_SR_TBU_INDEX 2
+#define DMA_CH_SR_TBU_WIDTH 1
+#define DMA_CH_SR_TI_INDEX 0
+#define DMA_CH_SR_TI_WIDTH 1
+#define DMA_CH_SR_TPS_INDEX 1
+#define DMA_CH_SR_TPS_WIDTH 1
+#define DMA_CH_TCR_OSP_INDEX 4
+#define DMA_CH_TCR_OSP_WIDTH 1
+#define DMA_CH_TCR_PBL_INDEX 16
+#define DMA_CH_TCR_PBL_WIDTH 6
+#define DMA_CH_TCR_ST_INDEX 0
+#define DMA_CH_TCR_ST_WIDTH 1
+#define DMA_CH_TCR_TSE_INDEX 12
+#define DMA_CH_TCR_TSE_WIDTH 1
+
+/* DMA channel register values */
+#define DMA_OSP_DISABLE 0x00
+#define DMA_OSP_ENABLE 0x01
+#define DMA_PBL_1 1
+#define DMA_PBL_2 2
+#define DMA_PBL_4 4
+#define DMA_PBL_8 8
+#define DMA_PBL_16 16
+#define DMA_PBL_32 32
+#define DMA_PBL_64 64 /* 8 x 8 */
+#define DMA_PBL_128 128 /* 8 x 16 */
+#define DMA_PBL_256 256 /* 8 x 32 */
+#define DMA_PBL_X8_DISABLE 0x00
+#define DMA_PBL_X8_ENABLE 0x01
+
+
+/* MAC register offsets */
+#define MAC_TCR 0x0000
+#define MAC_RCR 0x0004
+#define MAC_PFR 0x0008
+#define MAC_WTR 0x000c
+#define MAC_HTR0 0x0010
+#define MAC_HTR1 0x0014
+#define MAC_HTR2 0x0018
+#define MAC_HTR3 0x001c
+#define MAC_HTR4 0x0020
+#define MAC_HTR5 0x0024
+#define MAC_HTR6 0x0028
+#define MAC_HTR7 0x002c
+#define MAC_VLANTR 0x0050
+#define MAC_VLANHTR 0x0058
+#define MAC_VLANIR 0x0060
+#define MAC_IVLANIR 0x0064
+#define MAC_RETMR 0x006c
+#define MAC_Q0TFCR 0x0070
+#define MAC_RFCR 0x0090
+#define MAC_RQC0R 0x00a0
+#define MAC_RQC1R 0x00a4
+#define MAC_RQC2R 0x00a8
+#define MAC_RQC3R 0x00ac
+#define MAC_ISR 0x00b0
+#define MAC_IER 0x00b4
+#define MAC_RTSR 0x00b8
+#define MAC_PMTCSR 0x00c0
+#define MAC_RWKPFR 0x00c4
+#define MAC_LPICSR 0x00d0
+#define MAC_LPITCR 0x00d4
+#define MAC_VR 0x0110
+#define MAC_DR 0x0114
+#define MAC_HWF0R 0x011c
+#define MAC_HWF1R 0x0120
+#define MAC_HWF2R 0x0124
+#define MAC_GPIOCR 0x0278
+#define MAC_GPIOSR 0x027c
+#define MAC_MACA0HR 0x0300
+#define MAC_MACA0LR 0x0304
+#define MAC_MACA1HR 0x0308
+#define MAC_MACA1LR 0x030c
+
+#define MAC_QTFCR_INC 4
+#define MAC_MACA_INC 4
+
+/* MAC register entry bit positions and sizes */
+#define MAC_HWF0R_ADDMACADRSEL_INDEX 18
+#define MAC_HWF0R_ADDMACADRSEL_WIDTH 5
+#define MAC_HWF0R_ARPOFFSEL_INDEX 9
+#define MAC_HWF0R_ARPOFFSEL_WIDTH 1
+#define MAC_HWF0R_EEESEL_INDEX 13
+#define MAC_HWF0R_EEESEL_WIDTH 1
+#define MAC_HWF0R_GMIISEL_INDEX 1
+#define MAC_HWF0R_GMIISEL_WIDTH 1
+#define MAC_HWF0R_MGKSEL_INDEX 7
+#define MAC_HWF0R_MGKSEL_WIDTH 1
+#define MAC_HWF0R_MMCSEL_INDEX 8
+#define MAC_HWF0R_MMCSEL_WIDTH 1
+#define MAC_HWF0R_RWKSEL_INDEX 6
+#define MAC_HWF0R_RWKSEL_WIDTH 1
+#define MAC_HWF0R_RXCOESEL_INDEX 16
+#define MAC_HWF0R_RXCOESEL_WIDTH 1
+#define MAC_HWF0R_SAVLANINS_INDEX 27
+#define MAC_HWF0R_SAVLANINS_WIDTH 1
+#define MAC_HWF0R_SMASEL_INDEX 5
+#define MAC_HWF0R_SMASEL_WIDTH 1
+#define MAC_HWF0R_TSSEL_INDEX 12
+#define MAC_HWF0R_TSSEL_WIDTH 1
+#define MAC_HWF0R_TSSTSSEL_INDEX 25
+#define MAC_HWF0R_TSSTSSEL_WIDTH 2
+#define MAC_HWF0R_TXCOESEL_INDEX 14
+#define MAC_HWF0R_TXCOESEL_WIDTH 1
+#define MAC_HWF0R_VLHASH_INDEX 4
+#define MAC_HWF0R_VLHASH_WIDTH 1
+#define MAC_HWF1R_ADVTHWORD_INDEX 13
+#define MAC_HWF1R_ADVTHWORD_WIDTH 1
+#define MAC_HWF1R_DBGMEMA_INDEX 19
+#define MAC_HWF1R_DBGMEMA_WIDTH 1
+#define MAC_HWF1R_DCBEN_INDEX 16
+#define MAC_HWF1R_DCBEN_WIDTH 1
+#define MAC_HWF1R_HASHTBLSZ_INDEX 24
+#define MAC_HWF1R_HASHTBLSZ_WIDTH 3
+#define MAC_HWF1R_L3L4FNUM_INDEX 27
+#define MAC_HWF1R_L3L4FNUM_WIDTH 4
+#define MAC_HWF1R_RSSEN_INDEX 20
+#define MAC_HWF1R_RSSEN_WIDTH 1
+#define MAC_HWF1R_RXFIFOSIZE_INDEX 0
+#define MAC_HWF1R_RXFIFOSIZE_WIDTH 5
+#define MAC_HWF1R_SPHEN_INDEX 17
+#define MAC_HWF1R_SPHEN_WIDTH 1
+#define MAC_HWF1R_TSOEN_INDEX 18
+#define MAC_HWF1R_TSOEN_WIDTH 1
+#define MAC_HWF1R_TXFIFOSIZE_INDEX 6
+#define MAC_HWF1R_TXFIFOSIZE_WIDTH 5
+#define MAC_HWF2R_AUXSNAPNUM_INDEX 28
+#define MAC_HWF2R_AUXSNAPNUM_WIDTH 3
+#define MAC_HWF2R_PPSOUTNUM_INDEX 24
+#define MAC_HWF2R_PPSOUTNUM_WIDTH 3
+#define MAC_HWF2R_RXCHCNT_INDEX 12
+#define MAC_HWF2R_RXCHCNT_WIDTH 4
+#define MAC_HWF2R_RXQCNT_INDEX 0
+#define MAC_HWF2R_RXQCNT_WIDTH 4
+#define MAC_HWF2R_TXCHCNT_INDEX 18
+#define MAC_HWF2R_TXCHCNT_WIDTH 4
+#define MAC_HWF2R_TXQCNT_INDEX 6
+#define MAC_HWF2R_TXQCNT_WIDTH 4
+#define MAC_ISR_MMCRXIS_INDEX 9
+#define MAC_ISR_MMCRXIS_WIDTH 1
+#define MAC_ISR_MMCTXIS_INDEX 10
+#define MAC_ISR_MMCTXIS_WIDTH 1
+#define MAC_ISR_PMTIS_INDEX 4
+#define MAC_ISR_PMTIS_WIDTH 1
+#define MAC_MACA1HR_AE_INDEX 31
+#define MAC_MACA1HR_AE_WIDTH 1
+#define MAC_PFR_HMC_INDEX 2
+#define MAC_PFR_HMC_WIDTH 1
+#define MAC_PFR_HUC_INDEX 1
+#define MAC_PFR_HUC_WIDTH 1
+#define MAC_PFR_PM_INDEX 4
+#define MAC_PFR_PM_WIDTH 1
+#define MAC_PFR_PR_INDEX 0
+#define MAC_PFR_PR_WIDTH 1
+#define MAC_PMTCSR_MGKPKTEN_INDEX 1
+#define MAC_PMTCSR_MGKPKTEN_WIDTH 1
+#define MAC_PMTCSR_PWRDWN_INDEX 0
+#define MAC_PMTCSR_PWRDWN_WIDTH 1
+#define MAC_PMTCSR_RWKFILTRST_INDEX 31
+#define MAC_PMTCSR_RWKFILTRST_WIDTH 1
+#define MAC_PMTCSR_RWKPKTEN_INDEX 2
+#define MAC_PMTCSR_RWKPKTEN_WIDTH 1
+#define MAC_Q0TFCR_PT_INDEX 16
+#define MAC_Q0TFCR_PT_WIDTH 16
+#define MAC_Q0TFCR_TFE_INDEX 1
+#define MAC_Q0TFCR_TFE_WIDTH 1
+#define MAC_RCR_ACS_INDEX 1
+#define MAC_RCR_ACS_WIDTH 1
+#define MAC_RCR_CST_INDEX 2
+#define MAC_RCR_CST_WIDTH 1
+#define MAC_RCR_DCRCC_INDEX 3
+#define MAC_RCR_DCRCC_WIDTH 1
+#define MAC_RCR_IPC_INDEX 9
+#define MAC_RCR_IPC_WIDTH 1
+#define MAC_RCR_JE_INDEX 8
+#define MAC_RCR_JE_WIDTH 1
+#define MAC_RCR_LM_INDEX 10
+#define MAC_RCR_LM_WIDTH 1
+#define MAC_RCR_RE_INDEX 0
+#define MAC_RCR_RE_WIDTH 1
+#define MAC_RFCR_RFE_INDEX 0
+#define MAC_RFCR_RFE_WIDTH 1
+#define MAC_RQC0R_RXQ0EN_INDEX 0
+#define MAC_RQC0R_RXQ0EN_WIDTH 2
+#define MAC_TCR_SS_INDEX 29
+#define MAC_TCR_SS_WIDTH 2
+#define MAC_TCR_TE_INDEX 0
+#define MAC_TCR_TE_WIDTH 1
+#define MAC_VLANTR_DOVLTC_INDEX 20
+#define MAC_VLANTR_DOVLTC_WIDTH 1
+#define MAC_VLANTR_ERSVLM_INDEX 19
+#define MAC_VLANTR_ERSVLM_WIDTH 1
+#define MAC_VLANTR_ESVL_INDEX 18
+#define MAC_VLANTR_ESVL_WIDTH 1
+#define MAC_VLANTR_EVLS_INDEX 21
+#define MAC_VLANTR_EVLS_WIDTH 2
+#define MAC_VLANTR_EVLRXS_INDEX 24
+#define MAC_VLANTR_EVLRXS_WIDTH 1
+#define MAC_VR_DEVID_INDEX 8
+#define MAC_VR_DEVID_WIDTH 8
+#define MAC_VR_SNPSVER_INDEX 0
+#define MAC_VR_SNPSVER_WIDTH 8
+#define MAC_VR_USERVER_INDEX 16
+#define MAC_VR_USERVER_WIDTH 8
+
+/* MMC register offsets */
+#define MMC_CR 0x0800
+#define MMC_RISR 0x0804
+#define MMC_TISR 0x0808
+#define MMC_RIER 0x080c
+#define MMC_TIER 0x0810
+#define MMC_TXOCTETCOUNT_GB_LO 0x0814
+#define MMC_TXOCTETCOUNT_GB_HI 0x0818
+#define MMC_TXFRAMECOUNT_GB_LO 0x081c
+#define MMC_TXFRAMECOUNT_GB_HI 0x0820
+#define MMC_TXBROADCASTFRAMES_G_LO 0x0824
+#define MMC_TXBROADCASTFRAMES_G_HI 0x0828
+#define MMC_TXMULTICASTFRAMES_G_LO 0x082c
+#define MMC_TXMULTICASTFRAMES_G_HI 0x0830
+#define MMC_TX64OCTETS_GB_LO 0x0834
+#define MMC_TX64OCTETS_GB_HI 0x0838
+#define MMC_TX65TO127OCTETS_GB_LO 0x083c
+#define MMC_TX65TO127OCTETS_GB_HI 0x0840
+#define MMC_TX128TO255OCTETS_GB_LO 0x0844
+#define MMC_TX128TO255OCTETS_GB_HI 0x0848
+#define MMC_TX256TO511OCTETS_GB_LO 0x084c
+#define MMC_TX256TO511OCTETS_GB_HI 0x0850
+#define MMC_TX512TO1023OCTETS_GB_LO 0x0854
+#define MMC_TX512TO1023OCTETS_GB_HI 0x0858
+#define MMC_TX1024TOMAXOCTETS_GB_LO 0x085c
+#define MMC_TX1024TOMAXOCTETS_GB_HI 0x0860
+#define MMC_TXUNICASTFRAMES_GB_LO 0x0864
+#define MMC_TXUNICASTFRAMES_GB_HI 0x0868
+#define MMC_TXMULTICASTFRAMES_GB_LO 0x086c
+#define MMC_TXMULTICASTFRAMES_GB_HI 0x0870
+#define MMC_TXBROADCASTFRAMES_GB_LO 0x0874
+#define MMC_TXBROADCASTFRAMES_GB_HI 0x0878
+#define MMC_TXUNDERFLOWERROR_LO 0x087c
+#define MMC_TXUNDERFLOWERROR_HI 0x0880
+#define MMC_TXOCTETCOUNT_G_LO 0x0884
+#define MMC_TXOCTETCOUNT_G_HI 0x0888
+#define MMC_TXFRAMECOUNT_G_LO 0x088c
+#define MMC_TXFRAMECOUNT_G_HI 0x0890
+#define MMC_TXPAUSEFRAMES_LO 0x0894
+#define MMC_TXPAUSEFRAMES_HI 0x0898
+#define MMC_TXVLANFRAMES_G_LO 0x089c
+#define MMC_TXVLANFRAMES_G_HI 0x08a0
+#define MMC_RXFRAMECOUNT_GB_LO 0x0900
+#define MMC_RXFRAMECOUNT_GB_HI 0x0904
+#define MMC_RXOCTETCOUNT_GB_LO 0x0908
+#define MMC_RXOCTETCOUNT_GB_HI 0x090c
+#define MMC_RXOCTETCOUNT_G_LO 0x0910
+#define MMC_RXOCTETCOUNT_G_HI 0x0914
+#define MMC_RXBROADCASTFRAMES_G_LO 0x0918
+#define MMC_RXBROADCASTFRAMES_G_HI 0x091c
+#define MMC_RXMULTICASTFRAMES_G_LO 0x0920
+#define MMC_RXMULTICASTFRAMES_G_HI 0x0924
+#define MMC_RXCRCERROR_LO 0x0928
+#define MMC_RXCRCERROR_HI 0x092c
+#define MMC_RXRUNTERROR 0x0930
+#define MMC_RXJABBERERROR 0x0934
+#define MMC_RXUNDERSIZE_G 0x0938
+#define MMC_RXOVERSIZE_G 0x093c
+#define MMC_RX64OCTETS_GB_LO 0x0940
+#define MMC_RX64OCTETS_GB_HI 0x0944
+#define MMC_RX65TO127OCTETS_GB_LO 0x0948
+#define MMC_RX65TO127OCTETS_GB_HI 0x094c
+#define MMC_RX128TO255OCTETS_GB_LO 0x0950
+#define MMC_RX128TO255OCTETS_GB_HI 0x0954
+#define MMC_RX256TO511OCTETS_GB_LO 0x0958
+#define MMC_RX256TO511OCTETS_GB_HI 0x095c
+#define MMC_RX512TO1023OCTETS_GB_LO 0x0960
+#define MMC_RX512TO1023OCTETS_GB_HI 0x0964
+#define MMC_RX1024TOMAXOCTETS_GB_LO 0x0968
+#define MMC_RX1024TOMAXOCTETS_GB_HI 0x096c
+#define MMC_RXUNICASTFRAMES_G_LO 0x0970
+#define MMC_RXUNICASTFRAMES_G_HI 0x0974
+#define MMC_RXLENGTHERROR_LO 0x0978
+#define MMC_RXLENGTHERROR_HI 0x097c
+#define MMC_RXOUTOFRANGETYPE_LO 0x0980
+#define MMC_RXOUTOFRANGETYPE_HI 0x0984
+#define MMC_RXPAUSEFRAMES_LO 0x0988
+#define MMC_RXPAUSEFRAMES_HI 0x098c
+#define MMC_RXFIFOOVERFLOW_LO 0x0990
+#define MMC_RXFIFOOVERFLOW_HI 0x0994
+#define MMC_RXVLANFRAMES_GB_LO 0x0998
+#define MMC_RXVLANFRAMES_GB_HI 0x099c
+#define MMC_RXWATCHDOGERROR 0x09a0
+
+/* MMC register entry bit positions and sizes */
+#define MMC_CR_CR_INDEX 0
+#define MMC_CR_CR_WIDTH 1
+#define MMC_CR_CSR_INDEX 1
+#define MMC_CR_CSR_WIDTH 1
+#define MMC_CR_ROR_INDEX 2
+#define MMC_CR_ROR_WIDTH 1
+#define MMC_CR_MCF_INDEX 3
+#define MMC_CR_MCF_WIDTH 1
+#define MMC_CR_MCT_INDEX 4
+#define MMC_CR_MCT_WIDTH 2
+#define MMC_RIER_ALL_INTERRUPTS_INDEX 0
+#define MMC_RIER_ALL_INTERRUPTS_WIDTH 23
+#define MMC_RISR_RXFRAMECOUNT_GB_INDEX 0
+#define MMC_RISR_RXFRAMECOUNT_GB_WIDTH 1
+#define MMC_RISR_RXOCTETCOUNT_GB_INDEX 1
+#define MMC_RISR_RXOCTETCOUNT_GB_WIDTH 1
+#define MMC_RISR_RXOCTETCOUNT_G_INDEX 2
+#define MMC_RISR_RXOCTETCOUNT_G_WIDTH 1
+#define MMC_RISR_RXBROADCASTFRAMES_G_INDEX 3
+#define MMC_RISR_RXBROADCASTFRAMES_G_WIDTH 1
+#define MMC_RISR_RXMULTICASTFRAMES_G_INDEX 4
+#define MMC_RISR_RXMULTICASTFRAMES_G_WIDTH 1
+#define MMC_RISR_RXCRCERROR_INDEX 5
+#define MMC_RISR_RXCRCERROR_WIDTH 1
+#define MMC_RISR_RXRUNTERROR_INDEX 6
+#define MMC_RISR_RXRUNTERROR_WIDTH 1
+#define MMC_RISR_RXJABBERERROR_INDEX 7
+#define MMC_RISR_RXJABBERERROR_WIDTH 1
+#define MMC_RISR_RXUNDERSIZE_G_INDEX 8
+#define MMC_RISR_RXUNDERSIZE_G_WIDTH 1
+#define MMC_RISR_RXOVERSIZE_G_INDEX 9
+#define MMC_RISR_RXOVERSIZE_G_WIDTH 1
+#define MMC_RISR_RX64OCTETS_GB_INDEX 10
+#define MMC_RISR_RX64OCTETS_GB_WIDTH 1
+#define MMC_RISR_RX65TO127OCTETS_GB_INDEX 11
+#define MMC_RISR_RX65TO127OCTETS_GB_WIDTH 1
+#define MMC_RISR_RX128TO255OCTETS_GB_INDEX 12
+#define MMC_RISR_RX128TO255OCTETS_GB_WIDTH 1
+#define MMC_RISR_RX256TO511OCTETS_GB_INDEX 13
+#define MMC_RISR_RX256TO511OCTETS_GB_WIDTH 1
+#define MMC_RISR_RX512TO1023OCTETS_GB_INDEX 14
+#define MMC_RISR_RX512TO1023OCTETS_GB_WIDTH 1
+#define MMC_RISR_RX1024TOMAXOCTETS_GB_INDEX 15
+#define MMC_RISR_RX1024TOMAXOCTETS_GB_WIDTH 1
+#define MMC_RISR_RXUNICASTFRAMES_G_INDEX 16
+#define MMC_RISR_RXUNICASTFRAMES_G_WIDTH 1
+#define MMC_RISR_RXLENGTHERROR_INDEX 17
+#define MMC_RISR_RXLENGTHERROR_WIDTH 1
+#define MMC_RISR_RXOUTOFRANGETYPE_INDEX 18
+#define MMC_RISR_RXOUTOFRANGETYPE_WIDTH 1
+#define MMC_RISR_RXPAUSEFRAMES_INDEX 19
+#define MMC_RISR_RXPAUSEFRAMES_WIDTH 1
+#define MMC_RISR_RXFIFOOVERFLOW_INDEX 20
+#define MMC_RISR_RXFIFOOVERFLOW_WIDTH 1
+#define MMC_RISR_RXVLANFRAMES_GB_INDEX 21
+#define MMC_RISR_RXVLANFRAMES_GB_WIDTH 1
+#define MMC_RISR_RXWATCHDOGERROR_INDEX 22
+#define MMC_RISR_RXWATCHDOGERROR_WIDTH 1
+#define MMC_TIER_ALL_INTERRUPTS_INDEX 0
+#define MMC_TIER_ALL_INTERRUPTS_WIDTH 18
+#define MMC_TISR_TXOCTETCOUNT_GB_INDEX 0
+#define MMC_TISR_TXOCTETCOUNT_GB_WIDTH 1
+#define MMC_TISR_TXFRAMECOUNT_GB_INDEX 1
+#define MMC_TISR_TXFRAMECOUNT_GB_WIDTH 1
+#define MMC_TISR_TXBROADCASTFRAMES_G_INDEX 2
+#define MMC_TISR_TXBROADCASTFRAMES_G_WIDTH 1
+#define MMC_TISR_TXMULTICASTFRAMES_G_INDEX 3
+#define MMC_TISR_TXMULTICASTFRAMES_G_WIDTH 1
+#define MMC_TISR_TX64OCTETS_GB_INDEX 4
+#define MMC_TISR_TX64OCTETS_GB_WIDTH 1
+#define MMC_TISR_TX65TO127OCTETS_GB_INDEX 5
+#define MMC_TISR_TX65TO127OCTETS_GB_WIDTH 1
+#define MMC_TISR_TX128TO255OCTETS_GB_INDEX 6
+#define MMC_TISR_TX128TO255OCTETS_GB_WIDTH 1
+#define MMC_TISR_TX256TO511OCTETS_GB_INDEX 7
+#define MMC_TISR_TX256TO511OCTETS_GB_WIDTH 1
+#define MMC_TISR_TX512TO1023OCTETS_GB_INDEX 8
+#define MMC_TISR_TX512TO1023OCTETS_GB_WIDTH 1
+#define MMC_TISR_TX1024TOMAXOCTETS_GB_INDEX 9
+#define MMC_TISR_TX1024TOMAXOCTETS_GB_WIDTH 1
+#define MMC_TISR_TXUNICASTFRAMES_GB_INDEX 10
+#define MMC_TISR_TXUNICASTFRAMES_GB_WIDTH 1
+#define MMC_TISR_TXMULTICASTFRAMES_GB_INDEX 11
+#define MMC_TISR_TXMULTICASTFRAMES_GB_WIDTH 1
+#define MMC_TISR_TXBROADCASTFRAMES_GB_INDEX 12
+#define MMC_TISR_TXBROADCASTFRAMES_GB_WIDTH 1
+#define MMC_TISR_TXUNDERFLOWERROR_INDEX 13
+#define MMC_TISR_TXUNDERFLOWERROR_WIDTH 1
+#define MMC_TISR_TXOCTETCOUNT_G_INDEX 14
+#define MMC_TISR_TXOCTETCOUNT_G_WIDTH 1
+#define MMC_TISR_TXFRAMECOUNT_G_INDEX 15
+#define MMC_TISR_TXFRAMECOUNT_G_WIDTH 1
+#define MMC_TISR_TXPAUSEFRAMES_INDEX 16
+#define MMC_TISR_TXPAUSEFRAMES_WIDTH 1
+#define MMC_TISR_TXVLANFRAMES_G_INDEX 17
+#define MMC_TISR_TXVLANFRAMES_G_WIDTH 1
+
+/* MTL register offsets */
+#define MTL_OMR 0x1000
+#define MTL_FDCR 0x1008
+#define MTL_FDSR 0x100c
+#define MTL_FDDR 0x1010
+#define MTL_ISR 0x1020
+#define MTL_RQDCM0R 0x1030
+#define MTL_TCPM0R 0x1040
+#define MTL_TCPM1R 0x1044
+
+#define MTL_RQDCM_INC 4
+#define MTL_RQDCM_Q_PER_REG 4
+
+/* MTL register entry bit positions and sizes */
+#define MTL_OMR_ETSALG_INDEX 5
+#define MTL_OMR_ETSALG_WIDTH 2
+#define MTL_OMR_RAA_INDEX 2
+#define MTL_OMR_RAA_WIDTH 1
+
+/* MTL queue register offsets
+ * Multiple queues can be active. The first queue has registers
+ * that begin at 0x1100. Each subsequent queue has registers that
+ * are accessed using an offset of 0x80 from the previous queue.
+ */
+#define MTL_Q_BASE 0x1100
+#define MTL_Q_INC 0x80
+
+#define MTL_Q_TQOMR 0x00
+#define MTL_Q_TQUR 0x04
+#define MTL_Q_TQDR 0x08
+#define MTL_Q_TCECR 0x10
+#define MTL_Q_TCESR 0x14
+#define MTL_Q_TCQWR 0x18
+#define MTL_Q_RQOMR 0x40
+#define MTL_Q_RQMPOCR 0x44
+#define MTL_Q_RQDR 0x4c
+#define MTL_Q_IER 0x70
+#define MTL_Q_ISR 0x74
+
+/* MTL queue register entry bit positions and sizes */
+#define MTL_Q_TCQWR_QW_INDEX 0
+#define MTL_Q_TCQWR_QW_WIDTH 21
+#define MTL_Q_RQOMR_EHFC_INDEX 7
+#define MTL_Q_RQOMR_EHFC_WIDTH 1
+#define MTL_Q_RQOMR_RFA_INDEX 8
+#define MTL_Q_RQOMR_RFA_WIDTH 3
+#define MTL_Q_RQOMR_RFD_INDEX 13
+#define MTL_Q_RQOMR_RFD_WIDTH 3
+#define MTL_Q_RQOMR_RQS_INDEX 16
+#define MTL_Q_RQOMR_RQS_WIDTH 9
+#define MTL_Q_RQOMR_RSF_INDEX 5
+#define MTL_Q_RQOMR_RSF_WIDTH 1
+#define MTL_Q_RQOMR_RTC_INDEX 0
+#define MTL_Q_RQOMR_RTC_WIDTH 2
+#define MTL_Q_TQOMR_FTQ_INDEX 0
+#define MTL_Q_TQOMR_FTQ_WIDTH 1
+#define MTL_Q_TQOMR_TQS_INDEX 16
+#define MTL_Q_TQOMR_TQS_WIDTH 10
+#define MTL_Q_TQOMR_TSF_INDEX 1
+#define MTL_Q_TQOMR_TSF_WIDTH 1
+#define MTL_Q_TQOMR_TTC_INDEX 4
+#define MTL_Q_TQOMR_TTC_WIDTH 3
+#define MTL_Q_TQOMR_TXQEN_INDEX 2
+#define MTL_Q_TQOMR_TXQEN_WIDTH 2
+
+/* MTL queue register value */
+#define MTL_RSF_DISABLE 0x00
+#define MTL_RSF_ENABLE 0x01
+#define MTL_TSF_DISABLE 0x00
+#define MTL_TSF_ENABLE 0x01
+
+#define MTL_RX_THRESHOLD_64 0x00
+#define MTL_RX_THRESHOLD_96 0x02
+#define MTL_RX_THRESHOLD_128 0x03
+#define MTL_TX_THRESHOLD_32 0x01
+#define MTL_TX_THRESHOLD_64 0x00
+#define MTL_TX_THRESHOLD_96 0x02
+#define MTL_TX_THRESHOLD_128 0x03
+#define MTL_TX_THRESHOLD_192 0x04
+#define MTL_TX_THRESHOLD_256 0x05
+#define MTL_TX_THRESHOLD_384 0x06
+#define MTL_TX_THRESHOLD_512 0x07
+
+#define MTL_ETSALG_WRR 0x00
+#define MTL_ETSALG_WFQ 0x01
+#define MTL_ETSALG_DWRR 0x02
+#define MTL_RAA_SP 0x00
+#define MTL_RAA_WSP 0x01
+
+#define MTL_Q_DISABLED 0x00
+#define MTL_Q_ENABLED 0x02
+
+
+/* MTL traffic class register offsets
+ * Multiple traffic classes can be active. The first class has registers
+ * that begin at 0x1100. Each subsequent queue has registers that
+ * are accessed using an offset of 0x80 from the previous queue.
+ */
+#define MTL_TC_BASE MTL_Q_BASE
+#define MTL_TC_INC MTL_Q_INC
+
+#define MTL_TC_ETSCR 0x10
+
+/* MTL traffic class register entry bit positions and sizes */
+#define MTL_TC_ETSCR_TSA_INDEX 0
+#define MTL_TC_ETSCR_TSA_WIDTH 2
+
+/* MTL traffic class register value */
+#define MTL_TSA_SP 0x00
+#define MTL_TSA_ETS 0x02
+
+
+/* PCS MMD select register offset
+ * The MMD select register is used for accessing PCS registers
+ * when the underlying APB3 interface is using indirect addressing.
+ * Indirect addressing requires accessing registers in two phases,
+ * an address phase and a data phase. The address phases requires
+ * writing an address selection value to the MMD select regiesters.
+ */
+#define PCS_MMD_SELECT 0xff
+
+
+/* Descriptor/Packet entry bit positions and sizes */
+#define RX_PACKET_ERRORS_CRC_INDEX 2
+#define RX_PACKET_ERRORS_CRC_WIDTH 1
+#define RX_PACKET_ERRORS_FRAME_INDEX 3
+#define RX_PACKET_ERRORS_FRAME_WIDTH 1
+#define RX_PACKET_ERRORS_LENGTH_INDEX 0
+#define RX_PACKET_ERRORS_LENGTH_WIDTH 1
+#define RX_PACKET_ERRORS_OVERRUN_INDEX 1
+#define RX_PACKET_ERRORS_OVERRUN_WIDTH 1
+
+#define RX_PACKET_ATTRIBUTES_CSUM_DONE_INDEX 0
+#define RX_PACKET_ATTRIBUTES_CSUM_DONE_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 1
+#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_INCOMPLETE_INDEX 2
+#define RX_PACKET_ATTRIBUTES_INCOMPLETE_WIDTH 1
+
+#define RX_NORMAL_DESC0_OVT_INDEX 0
+#define RX_NORMAL_DESC0_OVT_WIDTH 16
+#define RX_NORMAL_DESC3_ES_INDEX 15
+#define RX_NORMAL_DESC3_ES_WIDTH 1
+#define RX_NORMAL_DESC3_ETLT_INDEX 16
+#define RX_NORMAL_DESC3_ETLT_WIDTH 4
+#define RX_NORMAL_DESC3_INTE_INDEX 30
+#define RX_NORMAL_DESC3_INTE_WIDTH 1
+#define RX_NORMAL_DESC3_LD_INDEX 28
+#define RX_NORMAL_DESC3_LD_WIDTH 1
+#define RX_NORMAL_DESC3_OWN_INDEX 31
+#define RX_NORMAL_DESC3_OWN_WIDTH 1
+#define RX_NORMAL_DESC3_PL_INDEX 0
+#define RX_NORMAL_DESC3_PL_WIDTH 14
+
+#define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_INDEX 0
+#define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_WIDTH 1
+#define TX_PACKET_ATTRIBUTES_TSO_ENABLE_INDEX 1
+#define TX_PACKET_ATTRIBUTES_TSO_ENABLE_WIDTH 1
+#define TX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 2
+#define TX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1
+
+#define TX_CONTEXT_DESC2_MSS_INDEX 0
+#define TX_CONTEXT_DESC2_MSS_WIDTH 15
+#define TX_CONTEXT_DESC3_CTXT_INDEX 30
+#define TX_CONTEXT_DESC3_CTXT_WIDTH 1
+#define TX_CONTEXT_DESC3_TCMSSV_INDEX 26
+#define TX_CONTEXT_DESC3_TCMSSV_WIDTH 1
+#define TX_CONTEXT_DESC3_VLTV_INDEX 16
+#define TX_CONTEXT_DESC3_VLTV_WIDTH 1
+#define TX_CONTEXT_DESC3_VT_INDEX 0
+#define TX_CONTEXT_DESC3_VT_WIDTH 16
+
+#define TX_NORMAL_DESC2_HL_B1L_INDEX 0
+#define TX_NORMAL_DESC2_HL_B1L_WIDTH 14
+#define TX_NORMAL_DESC2_IC_INDEX 31
+#define TX_NORMAL_DESC2_IC_WIDTH 1
+#define TX_NORMAL_DESC2_VTIR_INDEX 14
+#define TX_NORMAL_DESC2_VTIR_WIDTH 2
+#define TX_NORMAL_DESC3_CIC_INDEX 16
+#define TX_NORMAL_DESC3_CIC_WIDTH 2
+#define TX_NORMAL_DESC3_CPC_INDEX 26
+#define TX_NORMAL_DESC3_CPC_WIDTH 2
+#define TX_NORMAL_DESC3_CTXT_INDEX 30
+#define TX_NORMAL_DESC3_CTXT_WIDTH 1
+#define TX_NORMAL_DESC3_FD_INDEX 29
+#define TX_NORMAL_DESC3_FD_WIDTH 1
+#define TX_NORMAL_DESC3_FL_INDEX 0
+#define TX_NORMAL_DESC3_FL_WIDTH 15
+#define TX_NORMAL_DESC3_LD_INDEX 28
+#define TX_NORMAL_DESC3_LD_WIDTH 1
+#define TX_NORMAL_DESC3_OWN_INDEX 31
+#define TX_NORMAL_DESC3_OWN_WIDTH 1
+#define TX_NORMAL_DESC3_TCPHDRLEN_INDEX 19
+#define TX_NORMAL_DESC3_TCPHDRLEN_WIDTH 4
+#define TX_NORMAL_DESC3_TCPPL_INDEX 0
+#define TX_NORMAL_DESC3_TCPPL_WIDTH 18
+#define TX_NORMAL_DESC3_TSE_INDEX 18
+#define TX_NORMAL_DESC3_TSE_WIDTH 1
+
+#define TX_NORMAL_DESC2_VLAN_INSERT 0x2
+
+/* MDIO undefined or vendor specific registers */
+#ifndef MDIO_AN_COMP_STAT
+#define MDIO_AN_COMP_STAT 0x0030
+#endif
+
+
+/* Bit setting and getting macros
+ * The get macro will extract the current bit field value from within
+ * the variable
+ *
+ * The set macro will clear the current bit field value within the
+ * variable and then set the bit field of the variable to the
+ * specified value
+ */
+#define GET_BITS(_var, _index, _width) \
+ (((_var) >> (_index)) & ((0x1 << (_width)) - 1))
+
+#define SET_BITS(_var, _index, _width, _val) \
+do { \
+ (_var) &= ~(((0x1 << (_width)) - 1) << (_index)); \
+ (_var) |= (((_val) & ((0x1 << (_width)) - 1)) << (_index)); \
+} while (0)
+
+#define GET_BITS_LE(_var, _index, _width) \
+ ((le32_to_cpu((_var)) >> (_index)) & ((0x1 << (_width)) - 1))
+
+#define SET_BITS_LE(_var, _index, _width, _val) \
+do { \
+ (_var) &= cpu_to_le32(~(((0x1 << (_width)) - 1) << (_index))); \
+ (_var) |= cpu_to_le32((((_val) & \
+ ((0x1 << (_width)) - 1)) << (_index))); \
+} while (0)
+
+
+/* Bit setting and getting macros based on register fields
+ * The get macro uses the bit field definitions formed using the input
+ * names to extract the current bit field value from within the
+ * variable
+ *
+ * The set macro uses the bit field definitions formed using the input
+ * names to set the bit field of the variable to the specified value
+ */
+#define XGMAC_GET_BITS(_var, _prefix, _field) \
+ GET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH)
+
+#define XGMAC_SET_BITS(_var, _prefix, _field, _val) \
+ SET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH, (_val))
+
+#define XGMAC_GET_BITS_LE(_var, _prefix, _field) \
+ GET_BITS_LE((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH)
+
+#define XGMAC_SET_BITS_LE(_var, _prefix, _field, _val) \
+ SET_BITS_LE((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH, (_val))
+
+
+/* Macros for reading or writing registers
+ * The ioread macros will get bit fields or full values using the
+ * register definitions formed using the input names
+ *
+ * The iowrite macros will set bit fields or full values using the
+ * register definitions formed using the input names
+ */
+#define XGMAC_IOREAD(_pdata, _reg) \
+ ioread32((_pdata)->xgmac_regs + _reg)
+
+#define XGMAC_IOREAD_BITS(_pdata, _reg, _field) \
+ GET_BITS(XGMAC_IOREAD((_pdata), _reg), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XGMAC_IOWRITE(_pdata, _reg, _val) \
+ iowrite32((_val), (_pdata)->xgmac_regs + _reg)
+
+#define XGMAC_IOWRITE_BITS(_pdata, _reg, _field, _val) \
+do { \
+ u32 reg_val = XGMAC_IOREAD((_pdata), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XGMAC_IOWRITE((_pdata), _reg, reg_val); \
+} while (0)
+
+
+/* Macros for reading or writing MTL queue or traffic class registers
+ * Similar to the standard read and write macros except that the
+ * base register value is calculated by the queue or traffic class number
+ */
+#define XGMAC_MTL_IOREAD(_pdata, _n, _reg) \
+ ioread32((_pdata)->xgmac_regs + \
+ MTL_Q_BASE + ((_n) * MTL_Q_INC) + _reg)
+
+#define XGMAC_MTL_IOREAD_BITS(_pdata, _n, _reg, _field) \
+ GET_BITS(XGMAC_MTL_IOREAD((_pdata), (_n), _reg), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XGMAC_MTL_IOWRITE(_pdata, _n, _reg, _val) \
+ iowrite32((_val), (_pdata)->xgmac_regs + \
+ MTL_Q_BASE + ((_n) * MTL_Q_INC) + _reg)
+
+#define XGMAC_MTL_IOWRITE_BITS(_pdata, _n, _reg, _field, _val) \
+do { \
+ u32 reg_val = XGMAC_MTL_IOREAD((_pdata), (_n), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XGMAC_MTL_IOWRITE((_pdata), (_n), _reg, reg_val); \
+} while (0)
+
+
+/* Macros for reading or writing DMA channel registers
+ * Similar to the standard read and write macros except that the
+ * base register value is obtained from the ring
+ */
+#define XGMAC_DMA_IOREAD(_channel, _reg) \
+ ioread32((_channel)->dma_regs + _reg)
+
+#define XGMAC_DMA_IOREAD_BITS(_channel, _reg, _field) \
+ GET_BITS(XGMAC_DMA_IOREAD((_channel), _reg), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XGMAC_DMA_IOWRITE(_channel, _reg, _val) \
+ iowrite32((_val), (_channel)->dma_regs + _reg)
+
+#define XGMAC_DMA_IOWRITE_BITS(_channel, _reg, _field, _val) \
+do { \
+ u32 reg_val = XGMAC_DMA_IOREAD((_channel), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XGMAC_DMA_IOWRITE((_channel), _reg, reg_val); \
+} while (0)
+
+
+/* Macros for building, reading or writing register values or bits
+ * within the register values of XPCS registers.
+ */
+#define XPCS_IOWRITE(_pdata, _off, _val) \
+ iowrite32(_val, (_pdata)->xpcs_regs + (_off))
+
+#define XPCS_IOREAD(_pdata, _off) \
+ ioread32((_pdata)->xpcs_regs + (_off))
+
+
+/* Macros for building, reading or writing register values or bits
+ * using MDIO. Different from above because of the use of standardized
+ * Linux include values. No shifting is performed with the bit
+ * operations, everything works on mask values.
+ */
+#define XMDIO_READ(_pdata, _mmd, _reg) \
+ ((_pdata)->hw_if.read_mmd_regs((_pdata), 0, \
+ MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff)))
+
+#define XMDIO_READ_BITS(_pdata, _mmd, _reg, _mask) \
+ (XMDIO_READ((_pdata), _mmd, _reg) & _mask)
+
+#define XMDIO_WRITE(_pdata, _mmd, _reg, _val) \
+ ((_pdata)->hw_if.write_mmd_regs((_pdata), 0, \
+ MII_ADDR_C45 | (_mmd << 16) | ((_reg) & 0xffff), (_val)))
+
+#define XMDIO_WRITE_BITS(_pdata, _mmd, _reg, _mask, _val) \
+do { \
+ u32 mmd_val = XMDIO_READ((_pdata), _mmd, _reg); \
+ mmd_val &= ~_mask; \
+ mmd_val |= (_val); \
+ XMDIO_WRITE((_pdata), _mmd, _reg, mmd_val); \
+} while (0)
+
+#endif
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
new file mode 100644
index 0000000..6bb76d5
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
@@ -0,0 +1,375 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+
+static ssize_t xgbe_common_read(char __user *buffer, size_t count,
+ loff_t *ppos, unsigned int value)
+{
+ char *buf;
+ ssize_t len;
+
+ if (*ppos != 0)
+ return 0;
+
+ buf = kasprintf(GFP_KERNEL, "0x%08x\n", value);
+ if (!buf)
+ return -ENOMEM;
+
+ if (count < strlen(buf)) {
+ kfree(buf);
+ return -ENOSPC;
+ }
+
+ len = simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
+ kfree(buf);
+
+ return len;
+}
+
+static ssize_t xgbe_common_write(const char __user *buffer, size_t count,
+ loff_t *ppos, unsigned int *value)
+{
+ char workarea[32];
+ ssize_t len;
+ unsigned int scan_value;
+
+ if (*ppos != 0)
+ return 0;
+
+ if (count >= sizeof(workarea))
+ return -ENOSPC;
+
+ len = simple_write_to_buffer(workarea, sizeof(workarea) - 1, ppos,
+ buffer, count);
+ if (len < 0)
+ return len;
+
+ workarea[len] = '\0';
+ if (sscanf(workarea, "%x", &scan_value) == 1)
+ *value = scan_value;
+ else
+ return -EIO;
+
+ return len;
+}
+
+static ssize_t xgmac_reg_addr_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+
+ return xgbe_common_read(buffer, count, ppos, pdata->debugfs_xgmac_reg);
+}
+
+static ssize_t xgmac_reg_addr_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+
+ return xgbe_common_write(buffer, count, ppos,
+ &pdata->debugfs_xgmac_reg);
+}
+
+static ssize_t xgmac_reg_value_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+ unsigned int value;
+
+ value = XGMAC_IOREAD(pdata, pdata->debugfs_xgmac_reg);
+
+ return xgbe_common_read(buffer, count, ppos, value);
+}
+
+static ssize_t xgmac_reg_value_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+ unsigned int value;
+ ssize_t len;
+
+ len = xgbe_common_write(buffer, count, ppos, &value);
+ if (len < 0)
+ return len;
+
+ XGMAC_IOWRITE(pdata, pdata->debugfs_xgmac_reg, value);
+
+ return len;
+}
+
+static const struct file_operations xgmac_reg_addr_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = xgmac_reg_addr_read,
+ .write = xgmac_reg_addr_write,
+};
+
+static const struct file_operations xgmac_reg_value_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = xgmac_reg_value_read,
+ .write = xgmac_reg_value_write,
+};
+
+static ssize_t xpcs_mmd_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+
+ return xgbe_common_read(buffer, count, ppos, pdata->debugfs_xpcs_mmd);
+}
+
+static ssize_t xpcs_mmd_write(struct file *filp, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+
+ return xgbe_common_write(buffer, count, ppos,
+ &pdata->debugfs_xpcs_mmd);
+}
+
+static ssize_t xpcs_reg_addr_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+
+ return xgbe_common_read(buffer, count, ppos, pdata->debugfs_xpcs_reg);
+}
+
+static ssize_t xpcs_reg_addr_write(struct file *filp, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+
+ return xgbe_common_write(buffer, count, ppos,
+ &pdata->debugfs_xpcs_reg);
+}
+
+static ssize_t xpcs_reg_value_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+ unsigned int value;
+
+ value = pdata->hw_if.read_mmd_regs(pdata, pdata->debugfs_xpcs_mmd,
+ pdata->debugfs_xpcs_reg);
+
+ return xgbe_common_read(buffer, count, ppos, value);
+}
+
+static ssize_t xpcs_reg_value_write(struct file *filp,
+ const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct xgbe_prv_data *pdata = filp->private_data;
+ unsigned int value;
+ ssize_t len;
+
+ len = xgbe_common_write(buffer, count, ppos, &value);
+ if (len < 0)
+ return len;
+
+ pdata->hw_if.write_mmd_regs(pdata, pdata->debugfs_xpcs_mmd,
+ pdata->debugfs_xpcs_reg, value);
+
+ return len;
+}
+
+static const struct file_operations xpcs_mmd_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = xpcs_mmd_read,
+ .write = xpcs_mmd_write,
+};
+
+static const struct file_operations xpcs_reg_addr_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = xpcs_reg_addr_read,
+ .write = xpcs_reg_addr_write,
+};
+
+static const struct file_operations xpcs_reg_value_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = xpcs_reg_value_read,
+ .write = xpcs_reg_value_write,
+};
+
+void xgbe_debugfs_init(struct xgbe_prv_data *pdata)
+{
+ struct dentry *pfile;
+ char *buf;
+
+ /* Set defaults */
+ pdata->debugfs_xgmac_reg = 0;
+ pdata->debugfs_xpcs_mmd = 1;
+ pdata->debugfs_xpcs_reg = 0;
+
+ buf = kasprintf(GFP_KERNEL, "amd-xgbe-%s", pdata->netdev->name);
+ pdata->xgbe_debugfs = debugfs_create_dir(buf, NULL);
+ if (pdata->xgbe_debugfs == NULL) {
+ netdev_err(pdata->netdev, "debugfs_create_dir failed\n");
+ return;
+ }
+
+ pfile = debugfs_create_file("xgmac_register", 0600,
+ pdata->xgbe_debugfs, pdata,
+ &xgmac_reg_addr_fops);
+ if (!pfile)
+ netdev_err(pdata->netdev, "debugfs_create_file failed\n");
+
+ pfile = debugfs_create_file("xgmac_register_value", 0600,
+ pdata->xgbe_debugfs, pdata,
+ &xgmac_reg_value_fops);
+ if (!pfile)
+ netdev_err(pdata->netdev, "debugfs_create_file failed\n");
+
+ pfile = debugfs_create_file("xpcs_mmd", 0600,
+ pdata->xgbe_debugfs, pdata,
+ &xpcs_mmd_fops);
+ if (!pfile)
+ netdev_err(pdata->netdev, "debugfs_create_file failed\n");
+
+ pfile = debugfs_create_file("xpcs_register", 0600,
+ pdata->xgbe_debugfs, pdata,
+ &xpcs_reg_addr_fops);
+ if (!pfile)
+ netdev_err(pdata->netdev, "debugfs_create_file failed\n");
+
+ pfile = debugfs_create_file("xpcs_register_value", 0600,
+ pdata->xgbe_debugfs, pdata,
+ &xpcs_reg_value_fops);
+ if (!pfile)
+ netdev_err(pdata->netdev, "debugfs_create_file failed\n");
+
+ kfree(buf);
+}
+
+void xgbe_debugfs_exit(struct xgbe_prv_data *pdata)
+{
+ debugfs_remove_recursive(pdata->xgbe_debugfs);
+ pdata->xgbe_debugfs = NULL;
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-desc.c b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
new file mode 100644
index 0000000..6f1c859
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-desc.c
@@ -0,0 +1,556 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+
+static void xgbe_unmap_skb(struct xgbe_prv_data *, struct xgbe_ring_data *);
+
+static void xgbe_free_ring(struct xgbe_prv_data *pdata,
+ struct xgbe_ring *ring)
+{
+ struct xgbe_ring_data *rdata;
+ unsigned int i;
+
+ if (!ring)
+ return;
+
+ if (ring->rdata) {
+ for (i = 0; i < ring->rdesc_count; i++) {
+ rdata = GET_DESC_DATA(ring, i);
+ xgbe_unmap_skb(pdata, rdata);
+ }
+
+ kfree(ring->rdata);
+ ring->rdata = NULL;
+ }
+
+ if (ring->rdesc) {
+ dma_free_coherent(pdata->dev,
+ (sizeof(struct xgbe_ring_desc) *
+ ring->rdesc_count),
+ ring->rdesc, ring->rdesc_dma);
+ ring->rdesc = NULL;
+ }
+}
+
+static void xgbe_free_ring_resources(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ DBGPR("-->xgbe_free_ring_resources\n");
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ xgbe_free_ring(pdata, channel->tx_ring);
+ xgbe_free_ring(pdata, channel->rx_ring);
+ }
+
+ DBGPR("<--xgbe_free_ring_resources\n");
+}
+
+static int xgbe_init_ring(struct xgbe_prv_data *pdata,
+ struct xgbe_ring *ring, unsigned int rdesc_count)
+{
+ DBGPR("-->xgbe_init_ring\n");
+
+ if (!ring)
+ return 0;
+
+ /* Descriptors */
+ ring->rdesc_count = rdesc_count;
+ ring->rdesc = dma_alloc_coherent(pdata->dev,
+ (sizeof(struct xgbe_ring_desc) *
+ rdesc_count), &ring->rdesc_dma,
+ GFP_KERNEL);
+ if (!ring->rdesc)
+ return -ENOMEM;
+
+ /* Descriptor information */
+ ring->rdata = kcalloc(rdesc_count, sizeof(struct xgbe_ring_data),
+ GFP_KERNEL);
+ if (!ring->rdata)
+ return -ENOMEM;
+
+ DBGPR(" rdesc=0x%p, rdesc_dma=0x%llx, rdata=0x%p\n",
+ ring->rdesc, ring->rdesc_dma, ring->rdata);
+
+ DBGPR("<--xgbe_init_ring\n");
+
+ return 0;
+}
+
+static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+ int ret;
+
+ DBGPR("-->xgbe_alloc_ring_resources\n");
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ DBGPR(" %s - tx_ring:\n", channel->name);
+ ret = xgbe_init_ring(pdata, channel->tx_ring,
+ pdata->tx_desc_count);
+ if (ret) {
+ netdev_alert(pdata->netdev,
+ "error initializing Tx ring\n");
+ goto err_ring;
+ }
+
+ DBGPR(" %s - rx_ring:\n", channel->name);
+ ret = xgbe_init_ring(pdata, channel->rx_ring,
+ pdata->rx_desc_count);
+ if (ret) {
+ netdev_alert(pdata->netdev,
+ "error initializing Tx ring\n");
+ goto err_ring;
+ }
+ }
+
+ DBGPR("<--xgbe_alloc_ring_resources\n");
+
+ return 0;
+
+err_ring:
+ xgbe_free_ring_resources(pdata);
+
+ return ret;
+}
+
+static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_channel *channel;
+ struct xgbe_ring *ring;
+ struct xgbe_ring_data *rdata;
+ struct xgbe_ring_desc *rdesc;
+ dma_addr_t rdesc_dma;
+ unsigned int i, j;
+
+ DBGPR("-->xgbe_wrapper_tx_descriptor_init\n");
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ ring = channel->tx_ring;
+ if (!ring)
+ break;
+
+ rdesc = ring->rdesc;
+ rdesc_dma = ring->rdesc_dma;
+
+ for (j = 0; j < ring->rdesc_count; j++) {
+ rdata = GET_DESC_DATA(ring, j);
+
+ rdata->rdesc = rdesc;
+ rdata->rdesc_dma = rdesc_dma;
+
+ rdesc++;
+ rdesc_dma += sizeof(struct xgbe_ring_desc);
+ }
+
+ ring->cur = 0;
+ ring->dirty = 0;
+ ring->tx.queue_stopped = 0;
+
+ hw_if->tx_desc_init(channel);
+ }
+
+ DBGPR("<--xgbe_wrapper_tx_descriptor_init\n");
+}
+
+static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_channel *channel;
+ struct xgbe_ring *ring;
+ struct xgbe_ring_desc *rdesc;
+ struct xgbe_ring_data *rdata;
+ dma_addr_t rdesc_dma, skb_dma;
+ struct sk_buff *skb = NULL;
+ unsigned int i, j;
+
+ DBGPR("-->xgbe_wrapper_rx_descriptor_init\n");
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ ring = channel->rx_ring;
+ if (!ring)
+ break;
+
+ rdesc = ring->rdesc;
+ rdesc_dma = ring->rdesc_dma;
+
+ for (j = 0; j < ring->rdesc_count; j++) {
+ rdata = GET_DESC_DATA(ring, j);
+
+ rdata->rdesc = rdesc;
+ rdata->rdesc_dma = rdesc_dma;
+
+ /* Allocate skb & assign to each rdesc */
+ skb = dev_alloc_skb(pdata->rx_buf_size);
+ if (skb == NULL)
+ break;
+ skb_dma = dma_map_single(pdata->dev, skb->data,
+ pdata->rx_buf_size,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(pdata->dev, skb_dma)) {
+ netdev_alert(pdata->netdev,
+ "failed to do the dma map\n");
+ dev_kfree_skb_any(skb);
+ break;
+ }
+ rdata->skb = skb;
+ rdata->skb_dma = skb_dma;
+ rdata->skb_dma_len = pdata->rx_buf_size;
+
+ rdesc++;
+ rdesc_dma += sizeof(struct xgbe_ring_desc);
+ }
+
+ ring->cur = 0;
+ ring->dirty = 0;
+ ring->rx.realloc_index = 0;
+ ring->rx.realloc_threshold = 0;
+
+ hw_if->rx_desc_init(channel);
+ }
+
+ DBGPR("<--xgbe_wrapper_rx_descriptor_init\n");
+}
+
+static void xgbe_unmap_skb(struct xgbe_prv_data *pdata,
+ struct xgbe_ring_data *rdata)
+{
+ if (rdata->skb_dma) {
+ if (rdata->mapped_as_page) {
+ dma_unmap_page(pdata->dev, rdata->skb_dma,
+ rdata->skb_dma_len, DMA_TO_DEVICE);
+ } else {
+ dma_unmap_single(pdata->dev, rdata->skb_dma,
+ rdata->skb_dma_len, DMA_TO_DEVICE);
+ }
+ rdata->skb_dma = 0;
+ rdata->skb_dma_len = 0;
+ }
+
+ if (rdata->skb) {
+ dev_kfree_skb_any(rdata->skb);
+ rdata->skb = NULL;
+ }
+
+ rdata->tso_header = 0;
+ rdata->len = 0;
+ rdata->interrupt = 0;
+ rdata->mapped_as_page = 0;
+}
+
+static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
+{
+ struct xgbe_prv_data *pdata = channel->pdata;
+ struct xgbe_ring *ring = channel->tx_ring;
+ struct xgbe_ring_data *rdata;
+ struct xgbe_packet_data *packet;
+ struct skb_frag_struct *frag;
+ dma_addr_t skb_dma;
+ unsigned int start_index, cur_index;
+ unsigned int offset, tso, vlan, datalen, len;
+ unsigned int i;
+
+ DBGPR("-->xgbe_map_tx_skb: cur = %d\n", ring->cur);
+
+ offset = 0;
+ start_index = ring->cur;
+ cur_index = ring->cur;
+
+ packet = &ring->packet_data;
+ packet->rdesc_count = 0;
+ packet->length = 0;
+
+ tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ TSO_ENABLE);
+ vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ VLAN_CTAG);
+
+ /* Save space for a context descriptor if needed */
+ if ((tso && (packet->mss != ring->tx.cur_mss)) ||
+ (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag)))
+ cur_index++;
+ rdata = GET_DESC_DATA(ring, cur_index);
+
+ if (tso) {
+ DBGPR(" TSO packet\n");
+
+ /* Map the TSO header */
+ skb_dma = dma_map_single(pdata->dev, skb->data,
+ packet->header_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(pdata->dev, skb_dma)) {
+ netdev_alert(pdata->netdev, "dma_map_single failed\n");
+ goto err_out;
+ }
+ rdata->skb_dma = skb_dma;
+ rdata->skb_dma_len = packet->header_len;
+ rdata->tso_header = 1;
+
+ offset = packet->header_len;
+
+ packet->length += packet->header_len;
+
+ cur_index++;
+ rdata = GET_DESC_DATA(ring, cur_index);
+ }
+
+ /* Map the (remainder of the) packet */
+ for (datalen = skb_headlen(skb) - offset; datalen; ) {
+ len = min_t(unsigned int, datalen, TX_MAX_BUF_SIZE);
+
+ skb_dma = dma_map_single(pdata->dev, skb->data + offset, len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(pdata->dev, skb_dma)) {
+ netdev_alert(pdata->netdev, "dma_map_single failed\n");
+ goto err_out;
+ }
+ rdata->skb_dma = skb_dma;
+ rdata->skb_dma_len = len;
+ DBGPR(" skb data: index=%u, dma=0x%llx, len=%u\n",
+ cur_index, skb_dma, len);
+
+ datalen -= len;
+ offset += len;
+
+ packet->length += len;
+
+ cur_index++;
+ rdata = GET_DESC_DATA(ring, cur_index);
+ }
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ DBGPR(" mapping frag %u\n", i);
+
+ frag = &skb_shinfo(skb)->frags[i];
+ offset = 0;
+
+ for (datalen = skb_frag_size(frag); datalen; ) {
+ len = min_t(unsigned int, datalen, TX_MAX_BUF_SIZE);
+
+ skb_dma = skb_frag_dma_map(pdata->dev, frag, offset,
+ len, DMA_TO_DEVICE);
+ if (dma_mapping_error(pdata->dev, skb_dma)) {
+ netdev_alert(pdata->netdev,
+ "skb_frag_dma_map failed\n");
+ goto err_out;
+ }
+ rdata->skb_dma = skb_dma;
+ rdata->skb_dma_len = len;
+ rdata->mapped_as_page = 1;
+ DBGPR(" skb data: index=%u, dma=0x%llx, len=%u\n",
+ cur_index, skb_dma, len);
+
+ datalen -= len;
+ offset += len;
+
+ packet->length += len;
+
+ cur_index++;
+ rdata = GET_DESC_DATA(ring, cur_index);
+ }
+ }
+
+ /* Save the skb address in the last entry */
+ rdata->skb = skb;
+
+ /* Save the number of descriptor entries used */
+ packet->rdesc_count = cur_index - start_index;
+
+ DBGPR("<--xgbe_map_tx_skb: count=%u\n", packet->rdesc_count);
+
+ return packet->rdesc_count;
+
+err_out:
+ while (start_index < cur_index) {
+ rdata = GET_DESC_DATA(ring, start_index++);
+ xgbe_unmap_skb(pdata, rdata);
+ }
+
+ DBGPR("<--xgbe_map_tx_skb: count=0\n");
+
+ return 0;
+}
+
+static void xgbe_realloc_skb(struct xgbe_channel *channel)
+{
+ struct xgbe_prv_data *pdata = channel->pdata;
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_ring *ring = channel->rx_ring;
+ struct xgbe_ring_data *rdata;
+ struct sk_buff *skb = NULL;
+ dma_addr_t skb_dma;
+ int i;
+
+ DBGPR("-->xgbe_realloc_skb: rx_ring->rx.realloc_index = %u\n",
+ ring->rx.realloc_index);
+
+ for (i = 0; i < ring->dirty; i++) {
+ rdata = GET_DESC_DATA(ring, ring->rx.realloc_index);
+
+ /* Reset rdata values */
+ xgbe_unmap_skb(pdata, rdata);
+
+ /* Allocate skb & assign to each rdesc */
+ skb = dev_alloc_skb(pdata->rx_buf_size);
+ if (skb == NULL) {
+ netdev_alert(pdata->netdev,
+ "failed to allocate skb\n");
+ break;
+ }
+ skb_dma = dma_map_single(pdata->dev, skb->data,
+ pdata->rx_buf_size, DMA_FROM_DEVICE);
+ if (dma_mapping_error(pdata->dev, skb_dma)) {
+ netdev_alert(pdata->netdev,
+ "failed to do the dma map\n");
+ dev_kfree_skb_any(skb);
+ break;
+ }
+ rdata->skb = skb;
+ rdata->skb_dma = skb_dma;
+ rdata->skb_dma_len = pdata->rx_buf_size;
+
+ hw_if->rx_desc_reset(rdata);
+
+ ring->rx.realloc_index++;
+ }
+ ring->dirty = 0;
+
+ DBGPR("<--xgbe_realloc_skb\n");
+}
+
+void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
+{
+ DBGPR("-->xgbe_init_function_ptrs_desc\n");
+
+ desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
+ desc_if->free_ring_resources = xgbe_free_ring_resources;
+ desc_if->map_tx_skb = xgbe_map_tx_skb;
+ desc_if->realloc_skb = xgbe_realloc_skb;
+ desc_if->unmap_skb = xgbe_unmap_skb;
+ desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
+ desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
+
+ DBGPR("<--xgbe_init_function_ptrs_desc\n");
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
new file mode 100644
index 0000000..002293b
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -0,0 +1,2182 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/phy.h>
+#include <linux/clk.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+
+static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data *pdata,
+ unsigned int usec)
+{
+ unsigned long rate;
+ unsigned int ret;
+
+ DBGPR("-->xgbe_usec_to_riwt\n");
+
+ rate = clk_get_rate(pdata->sysclock);
+
+ /*
+ * Convert the input usec value to the watchdog timer value. Each
+ * watchdog timer value is equivalent to 256 clock cycles.
+ * Calculate the required value as:
+ * ( usec * ( system_clock_mhz / 10^6 ) / 256
+ */
+ ret = (usec * (rate / 1000000)) / 256;
+
+ DBGPR("<--xgbe_usec_to_riwt\n");
+
+ return ret;
+}
+
+static unsigned int xgbe_riwt_to_usec(struct xgbe_prv_data *pdata,
+ unsigned int riwt)
+{
+ unsigned long rate;
+ unsigned int ret;
+
+ DBGPR("-->xgbe_riwt_to_usec\n");
+
+ rate = clk_get_rate(pdata->sysclock);
+
+ /*
+ * Convert the input watchdog timer value to the usec value. Each
+ * watchdog timer value is equivalent to 256 clock cycles.
+ * Calculate the required value as:
+ * ( riwt * 256 ) / ( system_clock_mhz / 10^6 )
+ */
+ ret = (riwt * 256) / (rate / 1000000);
+
+ DBGPR("<--xgbe_riwt_to_usec\n");
+
+ return ret;
+}
+
+static int xgbe_config_pblx8(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++)
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_CR, PBLX8,
+ pdata->pblx8);
+
+ return 0;
+}
+
+static int xgbe_get_tx_pbl_val(struct xgbe_prv_data *pdata)
+{
+ return XGMAC_DMA_IOREAD_BITS(pdata->channel, DMA_CH_TCR, PBL);
+}
+
+static int xgbe_config_tx_pbl_val(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, PBL,
+ pdata->tx_pbl);
+ }
+
+ return 0;
+}
+
+static int xgbe_get_rx_pbl_val(struct xgbe_prv_data *pdata)
+{
+ return XGMAC_DMA_IOREAD_BITS(pdata->channel, DMA_CH_RCR, PBL);
+}
+
+static int xgbe_config_rx_pbl_val(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, PBL,
+ pdata->rx_pbl);
+ }
+
+ return 0;
+}
+
+static int xgbe_config_osp_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, OSP,
+ pdata->tx_osp_mode);
+ }
+
+ return 0;
+}
+
+static int xgbe_config_rsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val);
+
+ return 0;
+}
+
+static int xgbe_config_tsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val);
+
+ return 0;
+}
+
+static int xgbe_config_rx_threshold(struct xgbe_prv_data *pdata,
+ unsigned int val)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val);
+
+ return 0;
+}
+
+static int xgbe_config_tx_threshold(struct xgbe_prv_data *pdata,
+ unsigned int val)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val);
+
+ return 0;
+}
+
+static int xgbe_config_rx_coalesce(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RIWT, RWT,
+ pdata->rx_riwt);
+ }
+
+ return 0;
+}
+
+static int xgbe_config_tx_coalesce(struct xgbe_prv_data *pdata)
+{
+ return 0;
+}
+
+static void xgbe_config_rx_buffer_size(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, RBSZ,
+ pdata->rx_buf_size);
+ }
+}
+
+static void xgbe_config_tso_mode(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, TSE, 1);
+ }
+}
+
+static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
+{
+ unsigned int max_q_count, q_count;
+ unsigned int reg, reg_val;
+ unsigned int i;
+
+ /* Clear MTL flow control */
+ for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0);
+
+ /* Clear MAC flow control */
+ max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
+ q_count = min_t(unsigned int, pdata->hw_feat.rx_q_cnt, max_q_count);
+ reg = MAC_Q0TFCR;
+ for (i = 0; i < q_count; i++) {
+ reg_val = XGMAC_IOREAD(pdata, reg);
+ XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 0);
+ XGMAC_IOWRITE(pdata, reg, reg_val);
+
+ reg += MAC_QTFCR_INC;
+ }
+
+ return 0;
+}
+
+static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
+{
+ unsigned int max_q_count, q_count;
+ unsigned int reg, reg_val;
+ unsigned int i;
+
+ /* Set MTL flow control */
+ for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 1);
+
+ /* Set MAC flow control */
+ max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
+ q_count = min_t(unsigned int, pdata->hw_feat.rx_q_cnt, max_q_count);
+ reg = MAC_Q0TFCR;
+ for (i = 0; i < q_count; i++) {
+ reg_val = XGMAC_IOREAD(pdata, reg);
+
+ /* Enable transmit flow control */
+ XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 1);
+ /* Set pause time */
+ XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, 0xffff);
+
+ XGMAC_IOWRITE(pdata, reg, reg_val);
+
+ reg += MAC_QTFCR_INC;
+ }
+
+ return 0;
+}
+
+static int xgbe_disable_rx_flow_control(struct xgbe_prv_data *pdata)
+{
+ XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 0);
+
+ return 0;
+}
+
+static int xgbe_enable_rx_flow_control(struct xgbe_prv_data *pdata)
+{
+ XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 1);
+
+ return 0;
+}
+
+static int xgbe_config_tx_flow_control(struct xgbe_prv_data *pdata)
+{
+ if (pdata->tx_pause)
+ xgbe_enable_tx_flow_control(pdata);
+ else
+ xgbe_disable_tx_flow_control(pdata);
+
+ return 0;
+}
+
+static int xgbe_config_rx_flow_control(struct xgbe_prv_data *pdata)
+{
+ if (pdata->rx_pause)
+ xgbe_enable_rx_flow_control(pdata);
+ else
+ xgbe_disable_rx_flow_control(pdata);
+
+ return 0;
+}
+
+static void xgbe_config_flow_control(struct xgbe_prv_data *pdata)
+{
+ xgbe_config_tx_flow_control(pdata);
+ xgbe_config_rx_flow_control(pdata);
+}
+
+static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int dma_ch_isr, dma_ch_ier;
+ unsigned int i;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ /* Clear all the interrupts which are set */
+ dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
+
+ /* Clear all interrupt enable bits */
+ dma_ch_ier = 0;
+
+ /* Enable following interrupts
+ * NIE - Normal Interrupt Summary Enable
+ * AIE - Abnormal Interrupt Summary Enable
+ * FBEE - Fatal Bus Error Enable
+ */
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, NIE, 1);
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, AIE, 1);
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1);
+
+ if (channel->tx_ring) {
+ /* Enable the following Tx interrupts
+ * TIE - Transmit Interrupt Enable (unless polling)
+ */
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
+ }
+ if (channel->rx_ring) {
+ /* Enable following Rx interrupts
+ * RBUE - Receive Buffer Unavailable Enable
+ * RIE - Receive Interrupt Enable
+ */
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1);
+ XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
+ }
+
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
+ }
+}
+
+static void xgbe_enable_mtl_interrupts(struct xgbe_prv_data *pdata)
+{
+ unsigned int mtl_q_isr;
+ unsigned int q_count, i;
+
+ q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt);
+ for (i = 0; i < q_count; i++) {
+ /* Clear all the interrupts which are set */
+ mtl_q_isr = XGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR);
+ XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr);
+
+ /* No MTL interrupts to be enabled */
+ XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, 0);
+ }
+}
+
+static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata)
+{
+ /* No MAC interrupts to be enabled */
+ XGMAC_IOWRITE(pdata, MAC_IER, 0);
+
+ /* Enable all counter interrupts */
+ XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xff);
+ XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xff);
+}
+
+static int xgbe_set_gmii_speed(struct xgbe_prv_data *pdata)
+{
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x3);
+
+ return 0;
+}
+
+static int xgbe_set_gmii_2500_speed(struct xgbe_prv_data *pdata)
+{
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0x2);
+
+ return 0;
+}
+
+static int xgbe_set_xgmii_speed(struct xgbe_prv_data *pdata)
+{
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, 0);
+
+ return 0;
+}
+
+static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata,
+ unsigned int enable)
+{
+ unsigned int val = enable ? 1 : 0;
+
+ if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == val)
+ return 0;
+
+ DBGPR(" %s promiscuous mode\n", enable ? "entering" : "leaving");
+ XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val);
+
+ return 0;
+}
+
+static int xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata,
+ unsigned int enable)
+{
+ unsigned int val = enable ? 1 : 0;
+
+ if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PM) == val)
+ return 0;
+
+ DBGPR(" %s allmulti mode\n", enable ? "entering" : "leaving");
+ XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, val);
+
+ return 0;
+}
+
+static int xgbe_set_addn_mac_addrs(struct xgbe_prv_data *pdata,
+ unsigned int am_mode)
+{
+ struct netdev_hw_addr *ha;
+ unsigned int mac_reg;
+ unsigned int mac_addr_hi, mac_addr_lo;
+ u8 *mac_addr;
+ unsigned int i;
+
+ XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 0);
+ XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HMC, 0);
+
+ i = 0;
+ mac_reg = MAC_MACA1HR;
+
+ netdev_for_each_uc_addr(ha, pdata->netdev) {
+ mac_addr_lo = 0;
+ mac_addr_hi = 0;
+ mac_addr = (u8 *)&mac_addr_lo;
+ mac_addr[0] = ha->addr[0];
+ mac_addr[1] = ha->addr[1];
+ mac_addr[2] = ha->addr[2];
+ mac_addr[3] = ha->addr[3];
+ mac_addr = (u8 *)&mac_addr_hi;
+ mac_addr[0] = ha->addr[4];
+ mac_addr[1] = ha->addr[5];
+
+ DBGPR(" adding unicast address %pM at 0x%04x\n",
+ ha->addr, mac_reg);
+
+ XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1);
+
+ XGMAC_IOWRITE(pdata, mac_reg, mac_addr_hi);
+ mac_reg += MAC_MACA_INC;
+ XGMAC_IOWRITE(pdata, mac_reg, mac_addr_lo);
+ mac_reg += MAC_MACA_INC;
+
+ i++;
+ }
+
+ if (!am_mode) {
+ netdev_for_each_mc_addr(ha, pdata->netdev) {
+ mac_addr_lo = 0;
+ mac_addr_hi = 0;
+ mac_addr = (u8 *)&mac_addr_lo;
+ mac_addr[0] = ha->addr[0];
+ mac_addr[1] = ha->addr[1];
+ mac_addr[2] = ha->addr[2];
+ mac_addr[3] = ha->addr[3];
+ mac_addr = (u8 *)&mac_addr_hi;
+ mac_addr[0] = ha->addr[4];
+ mac_addr[1] = ha->addr[5];
+
+ DBGPR(" adding multicast address %pM at 0x%04x\n",
+ ha->addr, mac_reg);
+
+ XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1);
+
+ XGMAC_IOWRITE(pdata, mac_reg, mac_addr_hi);
+ mac_reg += MAC_MACA_INC;
+ XGMAC_IOWRITE(pdata, mac_reg, mac_addr_lo);
+ mac_reg += MAC_MACA_INC;
+
+ i++;
+ }
+ }
+
+ /* Clear remaining additional MAC address entries */
+ for (; i < pdata->hw_feat.addn_mac; i++) {
+ XGMAC_IOWRITE(pdata, mac_reg, 0);
+ mac_reg += MAC_MACA_INC;
+ XGMAC_IOWRITE(pdata, mac_reg, 0);
+ mac_reg += MAC_MACA_INC;
+ }
+
+ return 0;
+}
+
+static int xgbe_set_mac_address(struct xgbe_prv_data *pdata, u8 *addr)
+{
+ unsigned int mac_addr_hi, mac_addr_lo;
+
+ mac_addr_hi = (addr[5] << 8) | (addr[4] << 0);
+ mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) |
+ (addr[1] << 8) | (addr[0] << 0);
+
+ XGMAC_IOWRITE(pdata, MAC_MACA0HR, mac_addr_hi);
+ XGMAC_IOWRITE(pdata, MAC_MACA0LR, mac_addr_lo);
+
+ return 0;
+}
+
+static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
+ int mmd_reg)
+{
+ unsigned int mmd_address;
+ int mmd_data;
+
+ if (mmd_reg & MII_ADDR_C45)
+ mmd_address = mmd_reg & ~MII_ADDR_C45;
+ else
+ mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
+
+ /* The PCS registers are accessed using mmio. The underlying APB3
+ * management interface uses indirect addressing to access the MMD
+ * register sets. This requires accessing of the PCS register in two
+ * phases, an address phase and a data phase.
+ *
+ * The mmio interface is based on 32-bit offsets and values. All
+ * register offsets must therefore be adjusted by left shifting the
+ * offset 2 bits and reading 32 bits of data.
+ */
+ mutex_lock(&pdata->xpcs_mutex);
+ XPCS_IOWRITE(pdata, PCS_MMD_SELECT << 2, mmd_address >> 8);
+ mmd_data = XPCS_IOREAD(pdata, (mmd_address & 0xff) << 2);
+ mutex_unlock(&pdata->xpcs_mutex);
+
+ return mmd_data;
+}
+
+static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
+ int mmd_reg, int mmd_data)
+{
+ unsigned int mmd_address;
+
+ if (mmd_reg & MII_ADDR_C45)
+ mmd_address = mmd_reg & ~MII_ADDR_C45;
+ else
+ mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
+
+ /* The PCS registers are accessed using mmio. The underlying APB3
+ * management interface uses indirect addressing to access the MMD
+ * register sets. This requires accessing of the PCS register in two
+ * phases, an address phase and a data phase.
+ *
+ * The mmio interface is based on 32-bit offsets and values. All
+ * register offsets must therefore be adjusted by left shifting the
+ * offset 2 bits and reading 32 bits of data.
+ */
+ mutex_lock(&pdata->xpcs_mutex);
+ XPCS_IOWRITE(pdata, PCS_MMD_SELECT << 2, mmd_address >> 8);
+ XPCS_IOWRITE(pdata, (mmd_address & 0xff) << 2, mmd_data);
+ mutex_unlock(&pdata->xpcs_mutex);
+}
+
+static int xgbe_tx_complete(struct xgbe_ring_desc *rdesc)
+{
+ return !XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN);
+}
+
+static int xgbe_disable_rx_csum(struct xgbe_prv_data *pdata)
+{
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 0);
+
+ return 0;
+}
+
+static int xgbe_enable_rx_csum(struct xgbe_prv_data *pdata)
+{
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 1);
+
+ return 0;
+}
+
+static int xgbe_enable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
+{
+ /* Put the VLAN tag in the Rx descriptor */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLRXS, 1);
+
+ /* Don't check the VLAN type */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, DOVLTC, 1);
+
+ /* Check only C-TAG (0x8100) packets */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERSVLM, 0);
+
+ /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ESVL, 0);
+
+ /* Enable VLAN tag stripping */
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0x3);
+
+ return 0;
+}
+
+static int xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
+{
+ XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0);
+
+ return 0;
+}
+
+static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata)
+{
+ struct xgbe_ring_desc *rdesc = rdata->rdesc;
+
+ /* Reset the Tx descriptor
+ * Set buffer 1 (lo) address to zero
+ * Set buffer 1 (hi) address to zero
+ * Reset all other control bits (IC, TTSE, B2L & B1L)
+ * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, etc)
+ */
+ rdesc->desc0 = 0;
+ rdesc->desc1 = 0;
+ rdesc->desc2 = 0;
+ rdesc->desc3 = 0;
+}
+
+static void xgbe_tx_desc_init(struct xgbe_channel *channel)
+{
+ struct xgbe_ring *ring = channel->tx_ring;
+ struct xgbe_ring_data *rdata;
+ struct xgbe_ring_desc *rdesc;
+ int i;
+ int start_index = ring->cur;
+
+ DBGPR("-->tx_desc_init\n");
+
+ /* Initialze all descriptors */
+ for (i = 0; i < ring->rdesc_count; i++) {
+ rdata = GET_DESC_DATA(ring, i);
+ rdesc = rdata->rdesc;
+
+ /* Initialize Tx descriptor
+ * Set buffer 1 (lo) address to zero
+ * Set buffer 1 (hi) address to zero
+ * Reset all other control bits (IC, TTSE, B2L & B1L)
+ * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC,
+ * etc)
+ */
+ rdesc->desc0 = 0;
+ rdesc->desc1 = 0;
+ rdesc->desc2 = 0;
+ rdesc->desc3 = 0;
+ }
+
+ /* Make sure everything is written to the descriptor(s) before
+ * telling the device about them
+ */
+ wmb();
+
+ /* Update the total number of Tx descriptors */
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1);
+
+ /* Update the starting address of descriptor ring */
+ rdata = GET_DESC_DATA(ring, start_index);
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_HI,
+ upper_32_bits(rdata->rdesc_dma));
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_LO,
+ lower_32_bits(rdata->rdesc_dma));
+
+ DBGPR("<--tx_desc_init\n");
+}
+
+static void xgbe_rx_desc_reset(struct xgbe_ring_data *rdata)
+{
+ struct xgbe_ring_desc *rdesc = rdata->rdesc;
+
+ /* Reset the Rx descriptor
+ * Set buffer 1 (lo) address to dma address (lo)
+ * Set buffer 1 (hi) address to dma address (hi)
+ * Set buffer 2 (lo) address to zero
+ * Set buffer 2 (hi) address to zero and set control bits
+ * OWN and INTE
+ */
+ rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
+ rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
+ rdesc->desc2 = 0;
+
+ rdesc->desc3 = 0;
+ if (rdata->interrupt)
+ XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, 1);
+
+ /* Since the Rx DMA engine is likely running, make sure everything
+ * is written to the descriptor(s) before setting the OWN bit
+ * for the descriptor
+ */
+ wmb();
+
+ XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1);
+
+ /* Make sure ownership is written to the descriptor */
+ wmb();
+}
+
+static void xgbe_rx_desc_init(struct xgbe_channel *channel)
+{
+ struct xgbe_prv_data *pdata = channel->pdata;
+ struct xgbe_ring *ring = channel->rx_ring;
+ struct xgbe_ring_data *rdata;
+ struct xgbe_ring_desc *rdesc;
+ unsigned int start_index = ring->cur;
+ unsigned int rx_coalesce, rx_frames;
+ unsigned int i;
+
+ DBGPR("-->rx_desc_init\n");
+
+ rx_coalesce = (pdata->rx_riwt || pdata->rx_frames) ? 1 : 0;
+ rx_frames = pdata->rx_frames;
+
+ /* Initialize all descriptors */
+ for (i = 0; i < ring->rdesc_count; i++) {
+ rdata = GET_DESC_DATA(ring, i);
+ rdesc = rdata->rdesc;
+
+ /* Initialize Rx descriptor
+ * Set buffer 1 (lo) address to dma address (lo)
+ * Set buffer 1 (hi) address to dma address (hi)
+ * Set buffer 2 (lo) address to zero
+ * Set buffer 2 (hi) address to zero and set control
+ * bits OWN and INTE appropriateley
+ */
+ rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
+ rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
+ rdesc->desc2 = 0;
+ rdesc->desc3 = 0;
+ XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1);
+ XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, 1);
+ rdata->interrupt = 1;
+ if (rx_coalesce && (!rx_frames || ((i + 1) % rx_frames))) {
+ /* Clear interrupt on completion bit */
+ XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE,
+ 0);
+ rdata->interrupt = 0;
+ }
+ }
+
+ /* Make sure everything is written to the descriptors before
+ * telling the device about them
+ */
+ wmb();
+
+ /* Update the total number of Rx descriptors */
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1);
+
+ /* Update the starting address of descriptor ring */
+ rdata = GET_DESC_DATA(ring, start_index);
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_HI,
+ upper_32_bits(rdata->rdesc_dma));
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_LO,
+ lower_32_bits(rdata->rdesc_dma));
+
+ /* Update the Rx Descriptor Tail Pointer */
+ rdata = GET_DESC_DATA(ring, start_index + ring->rdesc_count - 1);
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
+ lower_32_bits(rdata->rdesc_dma));
+
+ DBGPR("<--rx_desc_init\n");
+}
+
+static void xgbe_pre_xmit(struct xgbe_channel *channel)
+{
+ struct xgbe_prv_data *pdata = channel->pdata;
+ struct xgbe_ring *ring = channel->tx_ring;
+ struct xgbe_ring_data *rdata;
+ struct xgbe_ring_desc *rdesc;
+ struct xgbe_packet_data *packet = &ring->packet_data;
+ unsigned int csum, tso, vlan;
+ unsigned int tso_context, vlan_context;
+ unsigned int tx_coalesce, tx_frames;
+ int start_index = ring->cur;
+ int i;
+
+ DBGPR("-->xgbe_pre_xmit\n");
+
+ csum = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ CSUM_ENABLE);
+ tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ TSO_ENABLE);
+ vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ VLAN_CTAG);
+
+ if (tso && (packet->mss != ring->tx.cur_mss))
+ tso_context = 1;
+ else
+ tso_context = 0;
+
+ if (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag))
+ vlan_context = 1;
+ else
+ vlan_context = 0;
+
+ tx_coalesce = (pdata->tx_usecs || pdata->tx_frames) ? 1 : 0;
+ tx_frames = pdata->tx_frames;
+ if (tx_coalesce && !channel->tx_timer_active)
+ ring->coalesce_count = 0;
+
+ rdata = GET_DESC_DATA(ring, ring->cur);
+ rdesc = rdata->rdesc;
+
+ /* Create a context descriptor if this is a TSO packet */
+ if (tso_context || vlan_context) {
+ if (tso_context) {
+ DBGPR(" TSO context descriptor, mss=%u\n",
+ packet->mss);
+
+ /* Set the MSS size */
+ XGMAC_SET_BITS_LE(rdesc->desc2, TX_CONTEXT_DESC2,
+ MSS, packet->mss);
+
+ /* Mark it as a CONTEXT descriptor */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
+ CTXT, 1);
+
+ /* Indicate this descriptor contains the MSS */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
+ TCMSSV, 1);
+
+ ring->tx.cur_mss = packet->mss;
+ }
+
+ if (vlan_context) {
+ DBGPR(" VLAN context descriptor, ctag=%u\n",
+ packet->vlan_ctag);
+
+ /* Mark it as a CONTEXT descriptor */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
+ CTXT, 1);
+
+ /* Set the VLAN tag */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
+ VT, packet->vlan_ctag);
+
+ /* Indicate this descriptor contains the VLAN tag */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
+ VLTV, 1);
+
+ ring->tx.cur_vlan_ctag = packet->vlan_ctag;
+ }
+
+ ring->cur++;
+ rdata = GET_DESC_DATA(ring, ring->cur);
+ rdesc = rdata->rdesc;
+ }
+
+ /* Update buffer address (for TSO this is the header) */
+ rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
+ rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
+
+ /* Update the buffer length */
+ XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L,
+ rdata->skb_dma_len);
+
+ /* VLAN tag insertion check */
+ if (vlan)
+ XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, VTIR,
+ TX_NORMAL_DESC2_VLAN_INSERT);
+
+ /* Set IC bit based on Tx coalescing settings */
+ XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1);
+ if (tx_coalesce && (!tx_frames ||
+ (++ring->coalesce_count % tx_frames)))
+ /* Clear IC bit */
+ XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 0);
+
+ /* Mark it as First Descriptor */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FD, 1);
+
+ /* Mark it as a NORMAL descriptor */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
+
+ /* Set OWN bit if not the first descriptor */
+ if (ring->cur != start_index)
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
+
+ if (tso) {
+ /* Enable TSO */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TSE, 1);
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPPL,
+ packet->tcp_payload_len);
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPHDRLEN,
+ packet->tcp_header_len / 4);
+ } else {
+ /* Enable CRC and Pad Insertion */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0);
+
+ /* Enable HW CSUM */
+ if (csum)
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3,
+ CIC, 0x3);
+
+ /* Set the total length to be transmitted */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FL,
+ packet->length);
+ }
+
+ for (i = ring->cur - start_index + 1; i < packet->rdesc_count; i++) {
+ ring->cur++;
+ rdata = GET_DESC_DATA(ring, ring->cur);
+ rdesc = rdata->rdesc;
+
+ /* Update buffer address */
+ rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
+ rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
+
+ /* Update the buffer length */
+ XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L,
+ rdata->skb_dma_len);
+
+ /* Set IC bit based on Tx coalescing settings */
+ XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1);
+ if (tx_coalesce && (!tx_frames ||
+ (++ring->coalesce_count % tx_frames)))
+ /* Clear IC bit */
+ XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 0);
+
+ /* Set OWN bit */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
+
+ /* Mark it as NORMAL descriptor */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
+
+ /* Enable HW CSUM */
+ if (csum)
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3,
+ CIC, 0x3);
+ }
+
+ /* Set LAST bit for the last descriptor */
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD, 1);
+
+ /* In case the Tx DMA engine is running, make sure everything
+ * is written to the descriptor(s) before setting the OWN bit
+ * for the first descriptor
+ */
+ wmb();
+
+ /* Set OWN bit for the first descriptor */
+ rdata = GET_DESC_DATA(ring, start_index);
+ rdesc = rdata->rdesc;
+ XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
+
+#ifdef XGMAC_ENABLE_TX_DESC_DUMP
+ xgbe_dump_tx_desc(ring, start_index, packet->rdesc_count, 1);
+#endif
+
+ /* Make sure ownership is written to the descriptor */
+ wmb();
+
+ /* Issue a poll command to Tx DMA by writing address
+ * of next immediate free descriptor */
+ ring->cur++;
+ rdata = GET_DESC_DATA(ring, ring->cur);
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO,
+ lower_32_bits(rdata->rdesc_dma));
+
+ /* Start the Tx coalescing timer */
+ if (tx_coalesce && !channel->tx_timer_active) {
+ channel->tx_timer_active = 1;
+ hrtimer_start(&channel->tx_timer,
+ ktime_set(0, pdata->tx_usecs * NSEC_PER_USEC),
+ HRTIMER_MODE_REL);
+ }
+
+ DBGPR(" %s: descriptors %u to %u written\n",
+ channel->name, start_index & (ring->rdesc_count - 1),
+ (ring->cur - 1) & (ring->rdesc_count - 1));
+
+ DBGPR("<--xgbe_pre_xmit\n");
+}
+
+static int xgbe_dev_read(struct xgbe_channel *channel)
+{
+ struct xgbe_ring *ring = channel->rx_ring;
+ struct xgbe_ring_data *rdata;
+ struct xgbe_ring_desc *rdesc;
+ struct xgbe_packet_data *packet = &ring->packet_data;
+ unsigned int err, etlt;
+
+ DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur);
+
+ rdata = GET_DESC_DATA(ring, ring->cur);
+ rdesc = rdata->rdesc;
+
+ /* Check for data availability */
+ if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN))
+ return 1;
+
+#ifdef XGMAC_ENABLE_RX_DESC_DUMP
+ xgbe_dump_rx_desc(ring, rdesc, ring->cur);
+#endif
+
+ /* Get the packet length */
+ rdata->len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
+
+ if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) {
+ /* Not all the data has been transferred for this packet */
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ INCOMPLETE, 1);
+ return 0;
+ }
+
+ /* This is the last of the data for this packet */
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ INCOMPLETE, 0);
+
+ /* Set checksum done indicator as appropriate */
+ if (channel->pdata->netdev->features & NETIF_F_RXCSUM)
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ CSUM_DONE, 1);
+
+ /* Check for errors (only valid in last descriptor) */
+ err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES);
+ etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT);
+ DBGPR(" err=%u, etlt=%#x\n", err, etlt);
+
+ if (!err || (err && !etlt)) {
+ if (etlt == 0x09) {
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ VLAN_CTAG, 1);
+ packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0,
+ RX_NORMAL_DESC0,
+ OVT);
+ DBGPR(" vlan-ctag=0x%04x\n", packet->vlan_ctag);
+ }
+ } else {
+ if ((etlt == 0x05) || (etlt == 0x06))
+ XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+ CSUM_DONE, 0);
+ else
+ XGMAC_SET_BITS(packet->errors, RX_PACKET_ERRORS,
+ FRAME, 1);
+ }
+
+ DBGPR("<--xgbe_dev_read: %s - descriptor=%u (cur=%d)\n", channel->name,
+ ring->cur & (ring->rdesc_count - 1), ring->cur);
+
+ return 0;
+}
+
+static int xgbe_is_context_desc(struct xgbe_ring_desc *rdesc)
+{
+ /* Rx and Tx share CTXT bit, so check TDES3.CTXT bit */
+ return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT);
+}
+
+static int xgbe_is_last_desc(struct xgbe_ring_desc *rdesc)
+{
+ /* Rx and Tx share LD bit, so check TDES3.LD bit */
+ return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD);
+}
+
+static void xgbe_save_interrupt_status(struct xgbe_channel *channel,
+ enum xgbe_int_state int_state)
+{
+ unsigned int dma_ch_ier;
+
+ if (int_state == XGMAC_INT_STATE_SAVE) {
+ channel->saved_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
+ channel->saved_ier &= DMA_INTERRUPT_MASK;
+ } else {
+ dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
+ dma_ch_ier |= channel->saved_ier;
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
+ }
+}
+
+static int xgbe_enable_int(struct xgbe_channel *channel,
+ enum xgbe_int int_id)
+{
+ switch (int_id) {
+ case XGMAC_INT_DMA_ISR_DC0IS:
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TIE, 1);
+ break;
+ case XGMAC_INT_DMA_CH_SR_TI:
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TIE, 1);
+ break;
+ case XGMAC_INT_DMA_CH_SR_TPS:
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TXSE, 1);
+ break;
+ case XGMAC_INT_DMA_CH_SR_TBU:
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TBUE, 1);
+ break;
+ case XGMAC_INT_DMA_CH_SR_RI:
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RIE, 1);
+ break;
+ case XGMAC_INT_DMA_CH_SR_RBU:
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RBUE, 1);
+ break;
+ case XGMAC_INT_DMA_CH_SR_RPS:
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RSE, 1);
+ break;
+ case XGMAC_INT_DMA_CH_SR_FBE:
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, FBEE, 1);
+ break;
+ case XGMAC_INT_DMA_ALL:
+ xgbe_save_interrupt_status(channel, XGMAC_INT_STATE_RESTORE);
+ break;
+ default:
+ return -1;
+ }
+
+ return 0;
+}
+
+static int xgbe_disable_int(struct xgbe_channel *channel,
+ enum xgbe_int int_id)
+{
+ unsigned int dma_ch_ier;
+
+ switch (int_id) {
+ case XGMAC_INT_DMA_ISR_DC0IS:
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TIE, 0);
+ break;
+ case XGMAC_INT_DMA_CH_SR_TI:
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TIE, 0);
+ break;
+ case XGMAC_INT_DMA_CH_SR_TPS:
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TXSE, 0);
+ break;
+ case XGMAC_INT_DMA_CH_SR_TBU:
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TBUE, 0);
+ break;
+ case XGMAC_INT_DMA_CH_SR_RI:
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RIE, 0);
+ break;
+ case XGMAC_INT_DMA_CH_SR_RBU:
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RBUE, 0);
+ break;
+ case XGMAC_INT_DMA_CH_SR_RPS:
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RSE, 0);
+ break;
+ case XGMAC_INT_DMA_CH_SR_FBE:
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, FBEE, 0);
+ break;
+ case XGMAC_INT_DMA_ALL:
+ xgbe_save_interrupt_status(channel, XGMAC_INT_STATE_SAVE);
+
+ dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
+ dma_ch_ier &= ~DMA_INTERRUPT_MASK;
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
+ break;
+ default:
+ return -1;
+ }
+
+ return 0;
+}
+
+static int xgbe_exit(struct xgbe_prv_data *pdata)
+{
+ unsigned int count = 2000;
+
+ DBGPR("-->xgbe_exit\n");
+
+ /* Issue a software reset */
+ XGMAC_IOWRITE_BITS(pdata, DMA_MR, SWR, 1);
+ usleep_range(10, 15);
+
+ /* Poll Until Poll Condition */
+ while (count-- && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR))
+ usleep_range(500, 600);
+
+ if (!count)
+ return -EBUSY;
+
+ DBGPR("<--xgbe_exit\n");
+
+ return 0;
+}
+
+static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata)
+{
+ unsigned int i, count;
+
+ for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1);
+
+ /* Poll Until Poll Condition */
+ for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++) {
+ count = 2000;
+ while (count-- && XGMAC_MTL_IOREAD_BITS(pdata, i,
+ MTL_Q_TQOMR, FTQ))
+ usleep_range(500, 600);
+
+ if (!count)
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static void xgbe_config_dma_bus(struct xgbe_prv_data *pdata)
+{
+ /* Set enhanced addressing mode */
+ XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, EAME, 1);
+
+ /* Set the System Bus mode */
+ XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, UNDEF, 1);
+}
+
+static void xgbe_config_dma_cache(struct xgbe_prv_data *pdata)
+{
+ unsigned int arcache, awcache;
+
+ arcache = 0;
+ XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, DMA_ARCACHE_SETTING);
+ XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRD, DMA_ARDOMAIN_SETTING);
+ XGMAC_SET_BITS(arcache, DMA_AXIARCR, TEC, DMA_ARCACHE_SETTING);
+ XGMAC_SET_BITS(arcache, DMA_AXIARCR, TED, DMA_ARDOMAIN_SETTING);
+ XGMAC_SET_BITS(arcache, DMA_AXIARCR, THC, DMA_ARCACHE_SETTING);
+ XGMAC_SET_BITS(arcache, DMA_AXIARCR, THD, DMA_ARDOMAIN_SETTING);
+ XGMAC_IOWRITE(pdata, DMA_AXIARCR, arcache);
+
+ awcache = 0;
+ XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, DMA_AWCACHE_SETTING);
+ XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWD, DMA_AWDOMAIN_SETTING);
+ XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, DMA_AWCACHE_SETTING);
+ XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPD, DMA_AWDOMAIN_SETTING);
+ XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, DMA_AWCACHE_SETTING);
+ XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHD, DMA_AWDOMAIN_SETTING);
+ XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDC, DMA_AWCACHE_SETTING);
+ XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDD, DMA_AWDOMAIN_SETTING);
+ XGMAC_IOWRITE(pdata, DMA_AXIAWCR, awcache);
+}
+
+static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata)
+{
+ unsigned int i;
+
+ /* Set Tx to weighted round robin scheduling algorithm (when
+ * traffic class is using ETS algorithm)
+ */
+ XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR);
+
+ /* Set Tx traffic classes to strict priority algorithm */
+ for (i = 0; i < XGBE_TC_CNT; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA, MTL_TSA_SP);
+
+ /* Set Rx to strict priority algorithm */
+ XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP);
+}
+
+static unsigned int xgbe_calculate_per_queue_fifo(unsigned long fifo_size,
+ unsigned char queue_count)
+{
+ unsigned int q_fifo_size = 0;
+ enum xgbe_mtl_fifo_size p_fifo = XGMAC_MTL_FIFO_SIZE_256;
+
+ /* Calculate Tx/Rx fifo share per queue */
+ switch (fifo_size) {
+ case 0:
+ q_fifo_size = FIFO_SIZE_B(128);
+ break;
+ case 1:
+ q_fifo_size = FIFO_SIZE_B(256);
+ break;
+ case 2:
+ q_fifo_size = FIFO_SIZE_B(512);
+ break;
+ case 3:
+ q_fifo_size = FIFO_SIZE_KB(1);
+ break;
+ case 4:
+ q_fifo_size = FIFO_SIZE_KB(2);
+ break;
+ case 5:
+ q_fifo_size = FIFO_SIZE_KB(4);
+ break;
+ case 6:
+ q_fifo_size = FIFO_SIZE_KB(8);
+ break;
+ case 7:
+ q_fifo_size = FIFO_SIZE_KB(16);
+ break;
+ case 8:
+ q_fifo_size = FIFO_SIZE_KB(32);
+ break;
+ case 9:
+ q_fifo_size = FIFO_SIZE_KB(64);
+ break;
+ case 10:
+ q_fifo_size = FIFO_SIZE_KB(128);
+ break;
+ case 11:
+ q_fifo_size = FIFO_SIZE_KB(256);
+ break;
+ }
+ q_fifo_size = q_fifo_size / queue_count;
+
+ /* Set the queue fifo size programmable value */
+ if (q_fifo_size >= FIFO_SIZE_KB(256))
+ p_fifo = XGMAC_MTL_FIFO_SIZE_256K;
+ else if (q_fifo_size >= FIFO_SIZE_KB(128))
+ p_fifo = XGMAC_MTL_FIFO_SIZE_128K;
+ else if (q_fifo_size >= FIFO_SIZE_KB(64))
+ p_fifo = XGMAC_MTL_FIFO_SIZE_64K;
+ else if (q_fifo_size >= FIFO_SIZE_KB(32))
+ p_fifo = XGMAC_MTL_FIFO_SIZE_32K;
+ else if (q_fifo_size >= FIFO_SIZE_KB(16))
+ p_fifo = XGMAC_MTL_FIFO_SIZE_16K;
+ else if (q_fifo_size >= FIFO_SIZE_KB(8))
+ p_fifo = XGMAC_MTL_FIFO_SIZE_8K;
+ else if (q_fifo_size >= FIFO_SIZE_KB(4))
+ p_fifo = XGMAC_MTL_FIFO_SIZE_4K;
+ else if (q_fifo_size >= FIFO_SIZE_KB(2))
+ p_fifo = XGMAC_MTL_FIFO_SIZE_2K;
+ else if (q_fifo_size >= FIFO_SIZE_KB(1))
+ p_fifo = XGMAC_MTL_FIFO_SIZE_1K;
+ else if (q_fifo_size >= FIFO_SIZE_B(512))
+ p_fifo = XGMAC_MTL_FIFO_SIZE_512;
+ else if (q_fifo_size >= FIFO_SIZE_B(256))
+ p_fifo = XGMAC_MTL_FIFO_SIZE_256;
+
+ return p_fifo;
+}
+
+static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata)
+{
+ enum xgbe_mtl_fifo_size fifo_size;
+ unsigned int i;
+
+ fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.tx_fifo_size,
+ pdata->hw_feat.tx_q_cnt);
+
+ for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo_size);
+
+ netdev_notice(pdata->netdev, "%d Tx queues, %d byte fifo per queue\n",
+ pdata->hw_feat.tx_q_cnt, ((fifo_size + 1) * 256));
+}
+
+static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
+{
+ enum xgbe_mtl_fifo_size fifo_size;
+ unsigned int i;
+
+ fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.rx_fifo_size,
+ pdata->hw_feat.rx_q_cnt);
+
+ for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo_size);
+
+ netdev_notice(pdata->netdev, "%d Rx queues, %d byte fifo per queue\n",
+ pdata->hw_feat.rx_q_cnt, ((fifo_size + 1) * 256));
+}
+
+static void xgbe_config_rx_queue_mapping(struct xgbe_prv_data *pdata)
+{
+ unsigned int i, reg, reg_val;
+ unsigned int q_count = pdata->hw_feat.rx_q_cnt;
+
+ /* Select dynamic mapping of MTL Rx queue to DMA Rx channel */
+ reg = MTL_RQDCM0R;
+ reg_val = 0;
+ for (i = 0; i < q_count;) {
+ reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3));
+
+ if ((i % MTL_RQDCM_Q_PER_REG) && (i != q_count))
+ continue;
+
+ XGMAC_IOWRITE(pdata, reg, reg_val);
+
+ reg += MTL_RQDCM_INC;
+ reg_val = 0;
+ }
+}
+
+static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++) {
+ /* Activate flow control when less than 4k left in fifo */
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFA, 2);
+
+ /* De-activate flow control when more than 6k left in fifo */
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFD, 4);
+ }
+}
+
+static void xgbe_config_mac_address(struct xgbe_prv_data *pdata)
+{
+ xgbe_set_mac_address(pdata, pdata->netdev->dev_addr);
+}
+
+static void xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata)
+{
+ unsigned int val;
+
+ val = (pdata->netdev->mtu > XGMAC_STD_PACKET_MTU) ? 1 : 0;
+
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
+}
+
+static void xgbe_config_checksum_offload(struct xgbe_prv_data *pdata)
+{
+ if (pdata->netdev->features & NETIF_F_RXCSUM)
+ xgbe_enable_rx_csum(pdata);
+ else
+ xgbe_disable_rx_csum(pdata);
+}
+
+static void xgbe_config_vlan_support(struct xgbe_prv_data *pdata)
+{
+ if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
+ xgbe_enable_rx_vlan_stripping(pdata);
+ else
+ xgbe_disable_rx_vlan_stripping(pdata);
+}
+
+static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
+ unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_TISR);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_GB))
+ stats->txoctetcount_gb +=
+ XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_GB))
+ stats->txframecount_gb +=
+ XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_G))
+ stats->txbroadcastframes_g +=
+ XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_G))
+ stats->txmulticastframes_g +=
+ XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX64OCTETS_GB))
+ stats->tx64octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX65TO127OCTETS_GB))
+ stats->tx65to127octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX128TO255OCTETS_GB))
+ stats->tx128to255octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX256TO511OCTETS_GB))
+ stats->tx256to511octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX512TO1023OCTETS_GB))
+ stats->tx512to1023octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX1024TOMAXOCTETS_GB))
+ stats->tx1024tomaxoctets_gb +=
+ XGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNICASTFRAMES_GB))
+ stats->txunicastframes_gb +=
+ XGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_GB))
+ stats->txmulticastframes_gb +=
+ XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_GB))
+ stats->txbroadcastframes_g +=
+ XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNDERFLOWERROR))
+ stats->txunderflowerror +=
+ XGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_G))
+ stats->txoctetcount_g +=
+ XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_G))
+ stats->txframecount_g +=
+ XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXPAUSEFRAMES))
+ stats->txpauseframes +=
+ XGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXVLANFRAMES_G))
+ stats->txvlanframes_g +=
+ XGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO);
+}
+
+static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
+ unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_RISR);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFRAMECOUNT_GB))
+ stats->rxframecount_gb +=
+ XGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_GB))
+ stats->rxoctetcount_gb +=
+ XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_G))
+ stats->rxoctetcount_g +=
+ XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXBROADCASTFRAMES_G))
+ stats->rxbroadcastframes_g +=
+ XGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXMULTICASTFRAMES_G))
+ stats->rxmulticastframes_g +=
+ XGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXCRCERROR))
+ stats->rxcrcerror +=
+ XGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXRUNTERROR))
+ stats->rxrunterror +=
+ XGMAC_IOREAD(pdata, MMC_RXRUNTERROR);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXJABBERERROR))
+ stats->rxjabbererror +=
+ XGMAC_IOREAD(pdata, MMC_RXJABBERERROR);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNDERSIZE_G))
+ stats->rxundersize_g +=
+ XGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOVERSIZE_G))
+ stats->rxoversize_g +=
+ XGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX64OCTETS_GB))
+ stats->rx64octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX65TO127OCTETS_GB))
+ stats->rx65to127octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX128TO255OCTETS_GB))
+ stats->rx128to255octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX256TO511OCTETS_GB))
+ stats->rx256to511octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX512TO1023OCTETS_GB))
+ stats->rx512to1023octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX1024TOMAXOCTETS_GB))
+ stats->rx1024tomaxoctets_gb +=
+ XGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNICASTFRAMES_G))
+ stats->rxunicastframes_g +=
+ XGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXLENGTHERROR))
+ stats->rxlengtherror +=
+ XGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOUTOFRANGETYPE))
+ stats->rxoutofrangetype +=
+ XGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXPAUSEFRAMES))
+ stats->rxpauseframes +=
+ XGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFIFOOVERFLOW))
+ stats->rxfifooverflow +=
+ XGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXVLANFRAMES_GB))
+ stats->rxvlanframes_gb +=
+ XGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO);
+
+ if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXWATCHDOGERROR))
+ stats->rxwatchdogerror +=
+ XGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR);
+}
+
+static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
+
+ /* Freeze counters */
+ XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1);
+
+ stats->txoctetcount_gb +=
+ XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_GB_LO);
+
+ stats->txframecount_gb +=
+ XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_GB_LO);
+
+ stats->txbroadcastframes_g +=
+ XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_G_LO);
+
+ stats->txmulticastframes_g +=
+ XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_G_LO);
+
+ stats->tx64octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_TX64OCTETS_GB_LO);
+
+ stats->tx65to127octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_TX65TO127OCTETS_GB_LO);
+
+ stats->tx128to255octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_TX128TO255OCTETS_GB_LO);
+
+ stats->tx256to511octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_TX256TO511OCTETS_GB_LO);
+
+ stats->tx512to1023octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_TX512TO1023OCTETS_GB_LO);
+
+ stats->tx1024tomaxoctets_gb +=
+ XGMAC_IOREAD(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
+
+ stats->txunicastframes_gb +=
+ XGMAC_IOREAD(pdata, MMC_TXUNICASTFRAMES_GB_LO);
+
+ stats->txmulticastframes_gb +=
+ XGMAC_IOREAD(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
+
+ stats->txbroadcastframes_g +=
+ XGMAC_IOREAD(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
+
+ stats->txunderflowerror +=
+ XGMAC_IOREAD(pdata, MMC_TXUNDERFLOWERROR_LO);
+
+ stats->txoctetcount_g +=
+ XGMAC_IOREAD(pdata, MMC_TXOCTETCOUNT_G_LO);
+
+ stats->txframecount_g +=
+ XGMAC_IOREAD(pdata, MMC_TXFRAMECOUNT_G_LO);
+
+ stats->txpauseframes +=
+ XGMAC_IOREAD(pdata, MMC_TXPAUSEFRAMES_LO);
+
+ stats->txvlanframes_g +=
+ XGMAC_IOREAD(pdata, MMC_TXVLANFRAMES_G_LO);
+
+ stats->rxframecount_gb +=
+ XGMAC_IOREAD(pdata, MMC_RXFRAMECOUNT_GB_LO);
+
+ stats->rxoctetcount_gb +=
+ XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_GB_LO);
+
+ stats->rxoctetcount_g +=
+ XGMAC_IOREAD(pdata, MMC_RXOCTETCOUNT_G_LO);
+
+ stats->rxbroadcastframes_g +=
+ XGMAC_IOREAD(pdata, MMC_RXBROADCASTFRAMES_G_LO);
+
+ stats->rxmulticastframes_g +=
+ XGMAC_IOREAD(pdata, MMC_RXMULTICASTFRAMES_G_LO);
+
+ stats->rxcrcerror +=
+ XGMAC_IOREAD(pdata, MMC_RXCRCERROR_LO);
+
+ stats->rxrunterror +=
+ XGMAC_IOREAD(pdata, MMC_RXRUNTERROR);
+
+ stats->rxjabbererror +=
+ XGMAC_IOREAD(pdata, MMC_RXJABBERERROR);
+
+ stats->rxundersize_g +=
+ XGMAC_IOREAD(pdata, MMC_RXUNDERSIZE_G);
+
+ stats->rxoversize_g +=
+ XGMAC_IOREAD(pdata, MMC_RXOVERSIZE_G);
+
+ stats->rx64octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_RX64OCTETS_GB_LO);
+
+ stats->rx65to127octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_RX65TO127OCTETS_GB_LO);
+
+ stats->rx128to255octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_RX128TO255OCTETS_GB_LO);
+
+ stats->rx256to511octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_RX256TO511OCTETS_GB_LO);
+
+ stats->rx512to1023octets_gb +=
+ XGMAC_IOREAD(pdata, MMC_RX512TO1023OCTETS_GB_LO);
+
+ stats->rx1024tomaxoctets_gb +=
+ XGMAC_IOREAD(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
+
+ stats->rxunicastframes_g +=
+ XGMAC_IOREAD(pdata, MMC_RXUNICASTFRAMES_G_LO);
+
+ stats->rxlengtherror +=
+ XGMAC_IOREAD(pdata, MMC_RXLENGTHERROR_LO);
+
+ stats->rxoutofrangetype +=
+ XGMAC_IOREAD(pdata, MMC_RXOUTOFRANGETYPE_LO);
+
+ stats->rxpauseframes +=
+ XGMAC_IOREAD(pdata, MMC_RXPAUSEFRAMES_LO);
+
+ stats->rxfifooverflow +=
+ XGMAC_IOREAD(pdata, MMC_RXFIFOOVERFLOW_LO);
+
+ stats->rxvlanframes_gb +=
+ XGMAC_IOREAD(pdata, MMC_RXVLANFRAMES_GB_LO);
+
+ stats->rxwatchdogerror +=
+ XGMAC_IOREAD(pdata, MMC_RXWATCHDOGERROR);
+
+ /* Un-freeze counters */
+ XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0);
+}
+
+static void xgbe_config_mmc(struct xgbe_prv_data *pdata)
+{
+ /* Set counters to reset on read */
+ XGMAC_IOWRITE_BITS(pdata, MMC_CR, ROR, 1);
+
+ /* Reset the counters */
+ XGMAC_IOWRITE_BITS(pdata, MMC_CR, CR, 1);
+}
+
+static void xgbe_enable_tx(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ /* Enable each Tx DMA channel */
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 1);
+ }
+
+ /* Enable each Tx queue */
+ for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN,
+ MTL_Q_ENABLED);
+
+ /* Enable MAC Tx */
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
+}
+
+static void xgbe_disable_tx(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ /* Disable MAC Tx */
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
+
+ /* Disable each Tx queue */
+ for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 0);
+
+ /* Disable each Tx DMA channel */
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 0);
+ }
+}
+
+static void xgbe_enable_rx(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int reg_val, i;
+
+ /* Enable each Rx DMA channel */
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 1);
+ }
+
+ /* Enable each Rx queue */
+ reg_val = 0;
+ for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
+ reg_val |= (0x02 << (i << 1));
+ XGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val);
+
+ /* Enable MAC Rx */
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 1);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 1);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 1);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 1);
+}
+
+static void xgbe_disable_rx(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ /* Disable MAC Rx */
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 0);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 0);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0);
+ XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0);
+
+ /* Disable each Rx queue */
+ XGMAC_IOWRITE(pdata, MAC_RQC0R, 0);
+
+ /* Disable each Rx DMA channel */
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 0);
+ }
+}
+
+static void xgbe_powerup_tx(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ /* Enable each Tx DMA channel */
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 1);
+ }
+
+ /* Enable MAC Tx */
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
+}
+
+static void xgbe_powerdown_tx(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ /* Disable MAC Tx */
+ XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
+
+ /* Disable each Tx DMA channel */
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 0);
+ }
+}
+
+static void xgbe_powerup_rx(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ /* Enable each Rx DMA channel */
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 1);
+ }
+}
+
+static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ /* Disable each Rx DMA channel */
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->rx_ring)
+ break;
+
+ XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 0);
+ }
+}
+
+static int xgbe_init(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_desc_if *desc_if = &pdata->desc_if;
+ int ret;
+
+ DBGPR("-->xgbe_init\n");
+
+ /* Flush Tx queues */
+ ret = xgbe_flush_tx_queues(pdata);
+ if (ret)
+ return ret;
+
+ /*
+ * Initialize DMA related features
+ */
+ xgbe_config_dma_bus(pdata);
+ xgbe_config_dma_cache(pdata);
+ xgbe_config_osp_mode(pdata);
+ xgbe_config_pblx8(pdata);
+ xgbe_config_tx_pbl_val(pdata);
+ xgbe_config_rx_pbl_val(pdata);
+ xgbe_config_rx_coalesce(pdata);
+ xgbe_config_tx_coalesce(pdata);
+ xgbe_config_rx_buffer_size(pdata);
+ xgbe_config_tso_mode(pdata);
+ desc_if->wrapper_tx_desc_init(pdata);
+ desc_if->wrapper_rx_desc_init(pdata);
+ xgbe_enable_dma_interrupts(pdata);
+
+ /*
+ * Initialize MTL related features
+ */
+ xgbe_config_mtl_mode(pdata);
+ xgbe_config_rx_queue_mapping(pdata);
+ /*TODO: Program the priorities mapped to the Selected Traffic Classes
+ in MTL_TC_Prty_Map0-3 registers */
+ xgbe_config_tsf_mode(pdata, pdata->tx_sf_mode);
+ xgbe_config_rsf_mode(pdata, pdata->rx_sf_mode);
+ xgbe_config_tx_threshold(pdata, pdata->tx_threshold);
+ xgbe_config_rx_threshold(pdata, pdata->rx_threshold);
+ xgbe_config_tx_fifo_size(pdata);
+ xgbe_config_rx_fifo_size(pdata);
+ xgbe_config_flow_control_threshold(pdata);
+ /*TODO: Queue to Traffic Class Mapping (Q2TCMAP) */
+ /*TODO: Error Packet and undersized good Packet forwarding enable
+ (FEP and FUP)
+ */
+ xgbe_enable_mtl_interrupts(pdata);
+
+ /* Transmit Class Weight */
+ XGMAC_IOWRITE_BITS(pdata, MTL_Q_TCQWR, QW, 0x10);
+
+ /*
+ * Initialize MAC related features
+ */
+ xgbe_config_mac_address(pdata);
+ xgbe_config_jumbo_enable(pdata);
+ xgbe_config_flow_control(pdata);
+ xgbe_config_checksum_offload(pdata);
+ xgbe_config_vlan_support(pdata);
+ xgbe_config_mmc(pdata);
+ xgbe_enable_mac_interrupts(pdata);
+
+ DBGPR("<--xgbe_init\n");
+
+ return 0;
+}
+
+void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
+{
+ DBGPR("-->xgbe_init_function_ptrs\n");
+
+ hw_if->tx_complete = xgbe_tx_complete;
+
+ hw_if->set_promiscuous_mode = xgbe_set_promiscuous_mode;
+ hw_if->set_all_multicast_mode = xgbe_set_all_multicast_mode;
+ hw_if->set_addn_mac_addrs = xgbe_set_addn_mac_addrs;
+ hw_if->set_mac_address = xgbe_set_mac_address;
+
+ hw_if->enable_rx_csum = xgbe_enable_rx_csum;
+ hw_if->disable_rx_csum = xgbe_disable_rx_csum;
+
+ hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping;
+ hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping;
+
+ hw_if->read_mmd_regs = xgbe_read_mmd_regs;
+ hw_if->write_mmd_regs = xgbe_write_mmd_regs;
+
+ hw_if->set_gmii_speed = xgbe_set_gmii_speed;
+ hw_if->set_gmii_2500_speed = xgbe_set_gmii_2500_speed;
+ hw_if->set_xgmii_speed = xgbe_set_xgmii_speed;
+
+ hw_if->enable_tx = xgbe_enable_tx;
+ hw_if->disable_tx = xgbe_disable_tx;
+ hw_if->enable_rx = xgbe_enable_rx;
+ hw_if->disable_rx = xgbe_disable_rx;
+
+ hw_if->powerup_tx = xgbe_powerup_tx;
+ hw_if->powerdown_tx = xgbe_powerdown_tx;
+ hw_if->powerup_rx = xgbe_powerup_rx;
+ hw_if->powerdown_rx = xgbe_powerdown_rx;
+
+ hw_if->pre_xmit = xgbe_pre_xmit;
+ hw_if->dev_read = xgbe_dev_read;
+ hw_if->enable_int = xgbe_enable_int;
+ hw_if->disable_int = xgbe_disable_int;
+ hw_if->init = xgbe_init;
+ hw_if->exit = xgbe_exit;
+
+ /* Descriptor related Sequences have to be initialized here */
+ hw_if->tx_desc_init = xgbe_tx_desc_init;
+ hw_if->rx_desc_init = xgbe_rx_desc_init;
+ hw_if->tx_desc_reset = xgbe_tx_desc_reset;
+ hw_if->rx_desc_reset = xgbe_rx_desc_reset;
+ hw_if->is_last_desc = xgbe_is_last_desc;
+ hw_if->is_context_desc = xgbe_is_context_desc;
+
+ /* For FLOW ctrl */
+ hw_if->config_tx_flow_control = xgbe_config_tx_flow_control;
+ hw_if->config_rx_flow_control = xgbe_config_rx_flow_control;
+
+ /* For RX coalescing */
+ hw_if->config_rx_coalesce = xgbe_config_rx_coalesce;
+ hw_if->config_tx_coalesce = xgbe_config_tx_coalesce;
+ hw_if->usec_to_riwt = xgbe_usec_to_riwt;
+ hw_if->riwt_to_usec = xgbe_riwt_to_usec;
+
+ /* For RX and TX threshold config */
+ hw_if->config_rx_threshold = xgbe_config_rx_threshold;
+ hw_if->config_tx_threshold = xgbe_config_tx_threshold;
+
+ /* For RX and TX Store and Forward Mode config */
+ hw_if->config_rsf_mode = xgbe_config_rsf_mode;
+ hw_if->config_tsf_mode = xgbe_config_tsf_mode;
+
+ /* For TX DMA Operating on Second Frame config */
+ hw_if->config_osp_mode = xgbe_config_osp_mode;
+
+ /* For RX and TX PBL config */
+ hw_if->config_rx_pbl_val = xgbe_config_rx_pbl_val;
+ hw_if->get_rx_pbl_val = xgbe_get_rx_pbl_val;
+ hw_if->config_tx_pbl_val = xgbe_config_tx_pbl_val;
+ hw_if->get_tx_pbl_val = xgbe_get_tx_pbl_val;
+ hw_if->config_pblx8 = xgbe_config_pblx8;
+
+ /* For MMC statistics support */
+ hw_if->tx_mmc_int = xgbe_tx_mmc_int;
+ hw_if->rx_mmc_int = xgbe_rx_mmc_int;
+ hw_if->read_mmc_stats = xgbe_read_mmc_stats;
+
+ DBGPR("<--xgbe_init_function_ptrs\n");
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
new file mode 100644
index 0000000..cfe3d93
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -0,0 +1,1351 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/tcp.h>
+#include <linux/if_vlan.h>
+#include <linux/phy.h>
+#include <net/busy_poll.h>
+#include <linux/clk.h>
+#include <linux/if_ether.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+
+static int xgbe_poll(struct napi_struct *, int);
+static void xgbe_set_rx_mode(struct net_device *);
+
+static inline unsigned int xgbe_tx_avail_desc(struct xgbe_ring *ring)
+{
+ return (ring->rdesc_count - (ring->cur - ring->dirty));
+}
+
+static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
+{
+ unsigned int rx_buf_size;
+
+ if (mtu > XGMAC_JUMBO_PACKET_MTU) {
+ netdev_alert(netdev, "MTU exceeds maximum supported value\n");
+ return -EINVAL;
+ }
+
+ rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
+ if (rx_buf_size < RX_MIN_BUF_SIZE)
+ rx_buf_size = RX_MIN_BUF_SIZE;
+ rx_buf_size = (rx_buf_size + RX_BUF_ALIGN - 1) & ~(RX_BUF_ALIGN - 1);
+
+ return rx_buf_size;
+}
+
+static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (channel->tx_ring)
+ hw_if->enable_int(channel,
+ XGMAC_INT_DMA_CH_SR_TI);
+ if (channel->rx_ring)
+ hw_if->enable_int(channel,
+ XGMAC_INT_DMA_CH_SR_RI);
+ }
+}
+
+static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (channel->tx_ring)
+ hw_if->disable_int(channel,
+ XGMAC_INT_DMA_CH_SR_TI);
+ if (channel->rx_ring)
+ hw_if->disable_int(channel,
+ XGMAC_INT_DMA_CH_SR_RI);
+ }
+}
+
+static irqreturn_t xgbe_isr(int irq, void *data)
+{
+ struct xgbe_prv_data *pdata = data;
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_channel *channel;
+ unsigned int dma_isr, dma_ch_isr;
+ unsigned int mac_isr;
+ unsigned int i;
+
+ /* The DMA interrupt status register also reports MAC and MTL
+ * interrupts. So for polling mode, we just need to check for
+ * this register to be non-zero
+ */
+ dma_isr = XGMAC_IOREAD(pdata, DMA_ISR);
+ if (!dma_isr)
+ goto isr_done;
+
+ DBGPR("-->xgbe_isr\n");
+
+ DBGPR(" DMA_ISR = %08x\n", dma_isr);
+ DBGPR(" DMA_DS0 = %08x\n", XGMAC_IOREAD(pdata, DMA_DSR0));
+ DBGPR(" DMA_DS1 = %08x\n", XGMAC_IOREAD(pdata, DMA_DSR1));
+
+ for (i = 0; i < pdata->channel_count; i++) {
+ if (!(dma_isr & (1 << i)))
+ continue;
+
+ channel = pdata->channel + i;
+
+ dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
+ DBGPR(" DMA_CH%u_ISR = %08x\n", i, dma_ch_isr);
+
+ if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, TI) ||
+ XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, RI)) {
+ if (napi_schedule_prep(&pdata->napi)) {
+ /* Disable Tx and Rx interrupts */
+ xgbe_disable_rx_tx_ints(pdata);
+
+ /* Turn on polling */
+ __napi_schedule(&pdata->napi);
+ }
+ }
+
+ /* Restart the device on a Fatal Bus Error */
+ if (XGMAC_GET_BITS(dma_ch_isr, DMA_CH_SR, FBE))
+ schedule_work(&pdata->restart_work);
+
+ /* Clear all interrupt signals */
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
+ }
+
+ if (XGMAC_GET_BITS(dma_isr, DMA_ISR, MACIS)) {
+ mac_isr = XGMAC_IOREAD(pdata, MAC_ISR);
+
+ if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCTXIS))
+ hw_if->tx_mmc_int(pdata);
+
+ if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCRXIS))
+ hw_if->rx_mmc_int(pdata);
+ }
+
+ DBGPR(" DMA_ISR = %08x\n", XGMAC_IOREAD(pdata, DMA_ISR));
+
+ DBGPR("<--xgbe_isr\n");
+
+isr_done:
+ return IRQ_HANDLED;
+}
+
+static enum hrtimer_restart xgbe_tx_timer(struct hrtimer *timer)
+{
+ struct xgbe_channel *channel = container_of(timer,
+ struct xgbe_channel,
+ tx_timer);
+ struct xgbe_ring *ring = channel->tx_ring;
+ struct xgbe_prv_data *pdata = channel->pdata;
+ unsigned long flags;
+
+ DBGPR("-->xgbe_tx_timer\n");
+
+ spin_lock_irqsave(&ring->lock, flags);
+
+ if (napi_schedule_prep(&pdata->napi)) {
+ /* Disable Tx and Rx interrupts */
+ xgbe_disable_rx_tx_ints(pdata);
+
+ /* Turn on polling */
+ __napi_schedule(&pdata->napi);
+ }
+
+ channel->tx_timer_active = 0;
+
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ DBGPR("<--xgbe_tx_timer\n");
+
+ return HRTIMER_NORESTART;
+}
+
+static void xgbe_init_tx_timers(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ DBGPR("-->xgbe_init_tx_timers\n");
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ DBGPR(" %s adding tx timer\n", channel->name);
+ hrtimer_init(&channel->tx_timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_REL);
+ channel->tx_timer.function = xgbe_tx_timer;
+ }
+
+ DBGPR("<--xgbe_init_tx_timers\n");
+}
+
+static void xgbe_stop_tx_timers(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel;
+ unsigned int i;
+
+ DBGPR("-->xgbe_stop_tx_timers\n");
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ if (!channel->tx_ring)
+ break;
+
+ DBGPR(" %s deleting tx timer\n", channel->name);
+ channel->tx_timer_active = 0;
+ hrtimer_cancel(&channel->tx_timer);
+ }
+
+ DBGPR("<--xgbe_stop_tx_timers\n");
+}
+
+void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
+{
+ unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
+ struct xgbe_hw_features *hw_feat = &pdata->hw_feat;
+
+ DBGPR("-->xgbe_get_all_hw_features\n");
+
+ mac_hfr0 = XGMAC_IOREAD(pdata, MAC_HWF0R);
+ mac_hfr1 = XGMAC_IOREAD(pdata, MAC_HWF1R);
+ mac_hfr2 = XGMAC_IOREAD(pdata, MAC_HWF2R);
+
+ memset(hw_feat, 0, sizeof(*hw_feat));
+
+ /* Hardware feature register 0 */
+ hw_feat->gmii = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
+ hw_feat->vlhash = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
+ hw_feat->sma = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL);
+ hw_feat->rwk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL);
+ hw_feat->mgk = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL);
+ hw_feat->mmc = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL);
+ hw_feat->aoe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL);
+ hw_feat->ts = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL);
+ hw_feat->eee = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL);
+ hw_feat->tx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL);
+ hw_feat->rx_coe = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL);
+ hw_feat->addn_mac = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R,
+ ADDMACADRSEL);
+ hw_feat->ts_src = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL);
+ hw_feat->sa_vlan_ins = XGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS);
+
+ /* Hardware feature register 1 */
+ hw_feat->rx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
+ RXFIFOSIZE);
+ hw_feat->tx_fifo_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
+ TXFIFOSIZE);
+ hw_feat->dcb = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
+ hw_feat->sph = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
+ hw_feat->tso = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
+ hw_feat->dma_debug = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
+ hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
+ HASHTBLSZ);
+ hw_feat->l3l4_filter_num = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
+ L3L4FNUM);
+
+ /* Hardware feature register 2 */
+ hw_feat->rx_q_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT);
+ hw_feat->tx_q_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT);
+ hw_feat->rx_ch_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT);
+ hw_feat->tx_ch_cnt = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT);
+ hw_feat->pps_out_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
+ hw_feat->aux_snap_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, AUXSNAPNUM);
+
+ /* The Queue and Channel counts are zero based so increment them
+ * to get the actual number
+ */
+ hw_feat->rx_q_cnt++;
+ hw_feat->tx_q_cnt++;
+ hw_feat->rx_ch_cnt++;
+ hw_feat->tx_ch_cnt++;
+
+ DBGPR("<--xgbe_get_all_hw_features\n");
+}
+
+static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add)
+{
+ if (add)
+ netif_napi_add(pdata->netdev, &pdata->napi, xgbe_poll,
+ NAPI_POLL_WEIGHT);
+ napi_enable(&pdata->napi);
+}
+
+static void xgbe_napi_disable(struct xgbe_prv_data *pdata)
+{
+ napi_disable(&pdata->napi);
+}
+
+void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+
+ DBGPR("-->xgbe_init_tx_coalesce\n");
+
+ pdata->tx_usecs = XGMAC_INIT_DMA_TX_USECS;
+ pdata->tx_frames = XGMAC_INIT_DMA_TX_FRAMES;
+
+ hw_if->config_tx_coalesce(pdata);
+
+ DBGPR("<--xgbe_init_tx_coalesce\n");
+}
+
+void xgbe_init_rx_coalesce(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+
+ DBGPR("-->xgbe_init_rx_coalesce\n");
+
+ pdata->rx_riwt = hw_if->usec_to_riwt(pdata, XGMAC_INIT_DMA_RX_USECS);
+ pdata->rx_frames = XGMAC_INIT_DMA_RX_FRAMES;
+
+ hw_if->config_rx_coalesce(pdata);
+
+ DBGPR("<--xgbe_init_rx_coalesce\n");
+}
+
+static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_desc_if *desc_if = &pdata->desc_if;
+ struct xgbe_channel *channel;
+ struct xgbe_ring *ring;
+ struct xgbe_ring_data *rdata;
+ unsigned int i, j;
+
+ DBGPR("-->xgbe_free_tx_skbuff\n");
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ ring = channel->tx_ring;
+ if (!ring)
+ break;
+
+ for (j = 0; j < ring->rdesc_count; j++) {
+ rdata = GET_DESC_DATA(ring, j);
+ desc_if->unmap_skb(pdata, rdata);
+ }
+ }
+
+ DBGPR("<--xgbe_free_tx_skbuff\n");
+}
+
+static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_desc_if *desc_if = &pdata->desc_if;
+ struct xgbe_channel *channel;
+ struct xgbe_ring *ring;
+ struct xgbe_ring_data *rdata;
+ unsigned int i, j;
+
+ DBGPR("-->xgbe_free_rx_skbuff\n");
+
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++) {
+ ring = channel->rx_ring;
+ if (!ring)
+ break;
+
+ for (j = 0; j < ring->rdesc_count; j++) {
+ rdata = GET_DESC_DATA(ring, j);
+ desc_if->unmap_skb(pdata, rdata);
+ }
+ }
+
+ DBGPR("<--xgbe_free_rx_skbuff\n");
+}
+
+int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ unsigned long flags;
+
+ DBGPR("-->xgbe_powerdown\n");
+
+ if (!netif_running(netdev) ||
+ (caller == XGMAC_IOCTL_CONTEXT && pdata->power_down)) {
+ netdev_alert(netdev, "Device is already powered down\n");
+ DBGPR("<--xgbe_powerdown\n");
+ return -EINVAL;
+ }
+
+ phy_stop(pdata->phydev);
+
+ spin_lock_irqsave(&pdata->lock, flags);
+
+ if (caller == XGMAC_DRIVER_CONTEXT)
+ netif_device_detach(netdev);
+
+ netif_tx_stop_all_queues(netdev);
+ xgbe_napi_disable(pdata);
+
+ /* Powerdown Tx/Rx */
+ hw_if->powerdown_tx(pdata);
+ hw_if->powerdown_rx(pdata);
+
+ pdata->power_down = 1;
+
+ spin_unlock_irqrestore(&pdata->lock, flags);
+
+ DBGPR("<--xgbe_powerdown\n");
+
+ return 0;
+}
+
+int xgbe_powerup(struct net_device *netdev, unsigned int caller)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ unsigned long flags;
+
+ DBGPR("-->xgbe_powerup\n");
+
+ if (!netif_running(netdev) ||
+ (caller == XGMAC_IOCTL_CONTEXT && !pdata->power_down)) {
+ netdev_alert(netdev, "Device is already powered up\n");
+ DBGPR("<--xgbe_powerup\n");
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&pdata->lock, flags);
+
+ pdata->power_down = 0;
+
+ phy_start(pdata->phydev);
+
+ /* Enable Tx/Rx */
+ hw_if->powerup_tx(pdata);
+ hw_if->powerup_rx(pdata);
+
+ if (caller == XGMAC_DRIVER_CONTEXT)
+ netif_device_attach(netdev);
+
+ xgbe_napi_enable(pdata, 0);
+ netif_tx_start_all_queues(netdev);
+
+ spin_unlock_irqrestore(&pdata->lock, flags);
+
+ DBGPR("<--xgbe_powerup\n");
+
+ return 0;
+}
+
+static int xgbe_start(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct net_device *netdev = pdata->netdev;
+
+ DBGPR("-->xgbe_start\n");
+
+ xgbe_set_rx_mode(netdev);
+
+ hw_if->init(pdata);
+
+ phy_start(pdata->phydev);
+
+ hw_if->enable_tx(pdata);
+ hw_if->enable_rx(pdata);
+
+ xgbe_init_tx_timers(pdata);
+
+ xgbe_napi_enable(pdata, 1);
+ netif_tx_start_all_queues(netdev);
+
+ DBGPR("<--xgbe_start\n");
+
+ return 0;
+}
+
+static void xgbe_stop(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct net_device *netdev = pdata->netdev;
+
+ DBGPR("-->xgbe_stop\n");
+
+ phy_stop(pdata->phydev);
+
+ netif_tx_stop_all_queues(netdev);
+ xgbe_napi_disable(pdata);
+
+ xgbe_stop_tx_timers(pdata);
+
+ hw_if->disable_tx(pdata);
+ hw_if->disable_rx(pdata);
+
+ DBGPR("<--xgbe_stop\n");
+}
+
+static void xgbe_restart_dev(struct xgbe_prv_data *pdata, unsigned int reset)
+{
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+
+ DBGPR("-->xgbe_restart_dev\n");
+
+ /* If not running, "restart" will happen on open */
+ if (!netif_running(pdata->netdev))
+ return;
+
+ xgbe_stop(pdata);
+ synchronize_irq(pdata->irq_number);
+
+ xgbe_free_tx_skbuff(pdata);
+ xgbe_free_rx_skbuff(pdata);
+
+ /* Issue software reset to device if requested */
+ if (reset)
+ hw_if->exit(pdata);
+
+ xgbe_start(pdata);
+
+ DBGPR("<--xgbe_restart_dev\n");
+}
+
+static void xgbe_restart(struct work_struct *work)
+{
+ struct xgbe_prv_data *pdata = container_of(work,
+ struct xgbe_prv_data,
+ restart_work);
+
+ rtnl_lock();
+
+ xgbe_restart_dev(pdata, 1);
+
+ rtnl_unlock();
+}
+
+static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet)
+{
+ if (vlan_tx_tag_present(skb))
+ packet->vlan_ctag = vlan_tx_tag_get(skb);
+}
+
+static int xgbe_prep_tso(struct sk_buff *skb, struct xgbe_packet_data *packet)
+{
+ int ret;
+
+ if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ TSO_ENABLE))
+ return 0;
+
+ ret = skb_cow_head(skb, 0);
+ if (ret)
+ return ret;
+
+ packet->header_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ packet->tcp_header_len = tcp_hdrlen(skb);
+ packet->tcp_payload_len = skb->len - packet->header_len;
+ packet->mss = skb_shinfo(skb)->gso_size;
+ DBGPR(" packet->header_len=%u\n", packet->header_len);
+ DBGPR(" packet->tcp_header_len=%u, packet->tcp_payload_len=%u\n",
+ packet->tcp_header_len, packet->tcp_payload_len);
+ DBGPR(" packet->mss=%u\n", packet->mss);
+
+ return 0;
+}
+
+static int xgbe_is_tso(struct sk_buff *skb)
+{
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
+ if (!skb_is_gso(skb))
+ return 0;
+
+ DBGPR(" TSO packet to be processed\n");
+
+ return 1;
+}
+
+static void xgbe_packet_info(struct xgbe_ring *ring, struct sk_buff *skb,
+ struct xgbe_packet_data *packet)
+{
+ struct skb_frag_struct *frag;
+ unsigned int context_desc;
+ unsigned int len;
+ unsigned int i;
+
+ context_desc = 0;
+ packet->rdesc_count = 0;
+
+ if (xgbe_is_tso(skb)) {
+ /* TSO requires an extra desriptor if mss is different */
+ if (skb_shinfo(skb)->gso_size != ring->tx.cur_mss) {
+ context_desc = 1;
+ packet->rdesc_count++;
+ }
+
+ /* TSO requires an extra desriptor for TSO header */
+ packet->rdesc_count++;
+
+ XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ TSO_ENABLE, 1);
+ XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ CSUM_ENABLE, 1);
+ } else if (skb->ip_summed == CHECKSUM_PARTIAL)
+ XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ CSUM_ENABLE, 1);
+
+ if (vlan_tx_tag_present(skb)) {
+ /* VLAN requires an extra descriptor if tag is different */
+ if (vlan_tx_tag_get(skb) != ring->tx.cur_vlan_ctag)
+ /* We can share with the TSO context descriptor */
+ if (!context_desc) {
+ context_desc = 1;
+ packet->rdesc_count++;
+ }
+
+ XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+ VLAN_CTAG, 1);
+ }
+
+ for (len = skb_headlen(skb); len;) {
+ packet->rdesc_count++;
+ len -= min_t(unsigned int, len, TX_MAX_BUF_SIZE);
+ }
+
+ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ frag = &skb_shinfo(skb)->frags[i];
+ for (len = skb_frag_size(frag); len; ) {
+ packet->rdesc_count++;
+ len -= min_t(unsigned int, len, TX_MAX_BUF_SIZE);
+ }
+ }
+}
+
+static int xgbe_open(struct net_device *netdev)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_desc_if *desc_if = &pdata->desc_if;
+ int ret;
+
+ DBGPR("-->xgbe_open\n");
+
+ /* Enable the clock */
+ ret = clk_prepare_enable(pdata->sysclock);
+ if (ret) {
+ netdev_alert(netdev, "clk_prepare_enable failed\n");
+ return ret;
+ }
+
+ /* Calculate the Rx buffer size before allocating rings */
+ ret = xgbe_calc_rx_buf_size(netdev, netdev->mtu);
+ if (ret < 0)
+ goto err_clk;
+ pdata->rx_buf_size = ret;
+
+ /* Allocate the ring descriptors and buffers */
+ ret = desc_if->alloc_ring_resources(pdata);
+ if (ret)
+ goto err_clk;
+
+ /* Initialize the device restart work struct */
+ INIT_WORK(&pdata->restart_work, xgbe_restart);
+
+ /* Request interrupts */
+ ret = devm_request_irq(pdata->dev, netdev->irq, xgbe_isr, 0,
+ netdev->name, pdata);
+ if (ret) {
+ netdev_alert(netdev, "error requesting irq %d\n",
+ pdata->irq_number);
+ goto err_irq;
+ }
+ pdata->irq_number = netdev->irq;
+
+ ret = xgbe_start(pdata);
+ if (ret)
+ goto err_start;
+
+ DBGPR("<--xgbe_open\n");
+
+ return 0;
+
+err_start:
+ hw_if->exit(pdata);
+
+ devm_free_irq(pdata->dev, pdata->irq_number, pdata);
+ pdata->irq_number = 0;
+
+err_irq:
+ desc_if->free_ring_resources(pdata);
+
+err_clk:
+ clk_disable_unprepare(pdata->sysclock);
+
+ return ret;
+}
+
+static int xgbe_close(struct net_device *netdev)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_desc_if *desc_if = &pdata->desc_if;
+
+ DBGPR("-->xgbe_close\n");
+
+ /* Stop the device */
+ xgbe_stop(pdata);
+
+ /* Issue software reset to device */
+ hw_if->exit(pdata);
+
+ /* Free all the ring data */
+ desc_if->free_ring_resources(pdata);
+
+ /* Release the interrupt */
+ if (pdata->irq_number != 0) {
+ devm_free_irq(pdata->dev, pdata->irq_number, pdata);
+ pdata->irq_number = 0;
+ }
+
+ /* Disable the clock */
+ clk_disable_unprepare(pdata->sysclock);
+
+ DBGPR("<--xgbe_close\n");
+
+ return 0;
+}
+
+static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_desc_if *desc_if = &pdata->desc_if;
+ struct xgbe_channel *channel;
+ struct xgbe_ring *ring;
+ struct xgbe_packet_data *packet;
+ unsigned long flags;
+ int ret;
+
+ DBGPR("-->xgbe_xmit: skb->len = %d\n", skb->len);
+
+ channel = pdata->channel + skb->queue_mapping;
+ ring = channel->tx_ring;
+ packet = &ring->packet_data;
+
+ ret = NETDEV_TX_OK;
+
+ spin_lock_irqsave(&ring->lock, flags);
+
+ if (skb->len == 0) {
+ netdev_err(netdev, "empty skb received from stack\n");
+ dev_kfree_skb_any(skb);
+ goto tx_netdev_return;
+ }
+
+ /* Calculate preliminary packet info */
+ memset(packet, 0, sizeof(*packet));
+ xgbe_packet_info(ring, skb, packet);
+
+ /* Check that there are enough descriptors available */
+ if (packet->rdesc_count > xgbe_tx_avail_desc(ring)) {
+ DBGPR(" Tx queue stopped, not enough descriptors available\n");
+ netif_stop_subqueue(netdev, channel->queue_index);
+ ring->tx.queue_stopped = 1;
+ ret = NETDEV_TX_BUSY;
+ goto tx_netdev_return;
+ }
+
+ ret = xgbe_prep_tso(skb, packet);
+ if (ret) {
+ netdev_err(netdev, "error processing TSO packet\n");
+ dev_kfree_skb_any(skb);
+ goto tx_netdev_return;
+ }
+ xgbe_prep_vlan(skb, packet);
+
+ if (!desc_if->map_tx_skb(channel, skb)) {
+ dev_kfree_skb_any(skb);
+ goto tx_netdev_return;
+ }
+
+ /* Configure required descriptor fields for transmission */
+ hw_if->pre_xmit(channel);
+
+#ifdef XGMAC_ENABLE_TX_PKT_DUMP
+ xgbe_print_pkt(netdev, skb, true);
+#endif
+
+tx_netdev_return:
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ DBGPR("<--xgbe_xmit\n");
+
+ return ret;
+}
+
+static void xgbe_set_rx_mode(struct net_device *netdev)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ unsigned int pr_mode, am_mode;
+
+ DBGPR("-->xgbe_set_rx_mode\n");
+
+ pr_mode = ((netdev->flags & IFF_PROMISC) != 0);
+ am_mode = ((netdev->flags & IFF_ALLMULTI) != 0);
+
+ if (netdev_uc_count(netdev) > pdata->hw_feat.addn_mac)
+ pr_mode = 1;
+ if (netdev_mc_count(netdev) > pdata->hw_feat.addn_mac)
+ am_mode = 1;
+ if ((netdev_uc_count(netdev) + netdev_mc_count(netdev)) >
+ pdata->hw_feat.addn_mac)
+ pr_mode = 1;
+
+ hw_if->set_promiscuous_mode(pdata, pr_mode);
+ hw_if->set_all_multicast_mode(pdata, am_mode);
+ if (!pr_mode)
+ hw_if->set_addn_mac_addrs(pdata, am_mode);
+
+ DBGPR("<--xgbe_set_rx_mode\n");
+}
+
+static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct sockaddr *saddr = addr;
+
+ DBGPR("-->xgbe_set_mac_address\n");
+
+ if (!is_valid_ether_addr(saddr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(netdev->dev_addr, saddr->sa_data, netdev->addr_len);
+
+ hw_if->set_mac_address(pdata, netdev->dev_addr);
+
+ DBGPR("<--xgbe_set_mac_address\n");
+
+ return 0;
+}
+
+static int xgbe_change_mtu(struct net_device *netdev, int mtu)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ int ret;
+
+ DBGPR("-->xgbe_change_mtu\n");
+
+ ret = xgbe_calc_rx_buf_size(netdev, mtu);
+ if (ret < 0)
+ return ret;
+
+ pdata->rx_buf_size = ret;
+ netdev->mtu = mtu;
+
+ xgbe_restart_dev(pdata, 0);
+
+ DBGPR("<--xgbe_change_mtu\n");
+
+ return 0;
+}
+
+static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
+ struct rtnl_link_stats64 *s)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_mmc_stats *pstats = &pdata->mmc_stats;
+
+ DBGPR("-->%s\n", __func__);
+
+ pdata->hw_if.read_mmc_stats(pdata);
+
+ s->rx_packets = pstats->rxframecount_gb;
+ s->rx_bytes = pstats->rxoctetcount_gb;
+ s->rx_errors = pstats->rxframecount_gb -
+ pstats->rxbroadcastframes_g -
+ pstats->rxmulticastframes_g -
+ pstats->rxunicastframes_g;
+ s->multicast = pstats->rxmulticastframes_g;
+ s->rx_length_errors = pstats->rxlengtherror;
+ s->rx_crc_errors = pstats->rxcrcerror;
+ s->rx_fifo_errors = pstats->rxfifooverflow;
+
+ s->tx_packets = pstats->txframecount_gb;
+ s->tx_bytes = pstats->txoctetcount_gb;
+ s->tx_errors = pstats->txframecount_gb - pstats->txframecount_g;
+ s->tx_dropped = netdev->stats.tx_dropped;
+
+ DBGPR("<--%s\n", __func__);
+
+ return s;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void xgbe_poll_controller(struct net_device *netdev)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ DBGPR("-->xgbe_poll_controller\n");
+
+ disable_irq(pdata->irq_number);
+
+ xgbe_isr(pdata->irq_number, pdata);
+
+ enable_irq(pdata->irq_number);
+
+ DBGPR("<--xgbe_poll_controller\n");
+}
+#endif /* End CONFIG_NET_POLL_CONTROLLER */
+
+static int xgbe_set_features(struct net_device *netdev,
+ netdev_features_t features)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ unsigned int rxcsum_enabled, rxvlan_enabled;
+
+ rxcsum_enabled = !!(pdata->netdev_features & NETIF_F_RXCSUM);
+ rxvlan_enabled = !!(pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX);
+
+ if ((features & NETIF_F_RXCSUM) && !rxcsum_enabled) {
+ hw_if->enable_rx_csum(pdata);
+ netdev_alert(netdev, "state change - rxcsum enabled\n");
+ } else if (!(features & NETIF_F_RXCSUM) && rxcsum_enabled) {
+ hw_if->disable_rx_csum(pdata);
+ netdev_alert(netdev, "state change - rxcsum disabled\n");
+ }
+
+ if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan_enabled) {
+ hw_if->enable_rx_vlan_stripping(pdata);
+ netdev_alert(netdev, "state change - rxvlan enabled\n");
+ } else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan_enabled) {
+ hw_if->disable_rx_vlan_stripping(pdata);
+ netdev_alert(netdev, "state change - rxvlan disabled\n");
+ }
+
+ pdata->netdev_features = features;
+
+ DBGPR("<--xgbe_set_features\n");
+
+ return 0;
+}
+
+static const struct net_device_ops xgbe_netdev_ops = {
+ .ndo_open = xgbe_open,
+ .ndo_stop = xgbe_close,
+ .ndo_start_xmit = xgbe_xmit,
+ .ndo_set_rx_mode = xgbe_set_rx_mode,
+ .ndo_set_mac_address = xgbe_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = xgbe_change_mtu,
+ .ndo_get_stats64 = xgbe_get_stats64,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = xgbe_poll_controller,
+#endif
+ .ndo_set_features = xgbe_set_features,
+};
+
+struct net_device_ops *xgbe_get_netdev_ops(void)
+{
+ return (struct net_device_ops *)&xgbe_netdev_ops;
+}
+
+static int xgbe_tx_poll(struct xgbe_channel *channel)
+{
+ struct xgbe_prv_data *pdata = channel->pdata;
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_desc_if *desc_if = &pdata->desc_if;
+ struct xgbe_ring *ring = channel->tx_ring;
+ struct xgbe_ring_data *rdata;
+ struct xgbe_ring_desc *rdesc;
+ struct net_device *netdev = pdata->netdev;
+ unsigned long flags;
+ int processed = 0;
+
+ DBGPR("-->xgbe_tx_poll\n");
+
+ /* Nothing to do if there isn't a Tx ring for this channel */
+ if (!ring)
+ return 0;
+
+ spin_lock_irqsave(&ring->lock, flags);
+
+ while ((processed < TX_DESC_MAX_PROC) && (ring->dirty < ring->cur)) {
+ rdata = GET_DESC_DATA(ring, ring->dirty);
+ rdesc = rdata->rdesc;
+
+ if (!hw_if->tx_complete(rdesc))
+ break;
+
+#ifdef XGMAC_ENABLE_TX_DESC_DUMP
+ xgbe_dump_tx_desc(ring, ring->dirty, 1, 0);
+#endif
+
+ /* Free the SKB and reset the descriptor for re-use */
+ desc_if->unmap_skb(pdata, rdata);
+ hw_if->tx_desc_reset(rdata);
+
+ processed++;
+ ring->dirty++;
+ }
+
+ if ((ring->tx.queue_stopped == 1) &&
+ (xgbe_tx_avail_desc(ring) > TX_DESC_MIN_FREE)) {
+ ring->tx.queue_stopped = 0;
+ netif_wake_subqueue(netdev, channel->queue_index);
+ }
+
+ DBGPR("<--xgbe_tx_poll: processed=%d\n", processed);
+
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ return processed;
+}
+
+static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
+{
+ struct xgbe_prv_data *pdata = channel->pdata;
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct xgbe_desc_if *desc_if = &pdata->desc_if;
+ struct xgbe_ring *ring = channel->rx_ring;
+ struct xgbe_ring_data *rdata;
+ struct xgbe_packet_data *packet;
+ struct net_device *netdev = pdata->netdev;
+ struct sk_buff *skb;
+ unsigned int incomplete, error;
+ unsigned int cur_len, put_len, max_len;
+ int received = 0;
+
+ DBGPR("-->xgbe_rx_poll: budget=%d\n", budget);
+
+ /* Nothing to do if there isn't a Rx ring for this channel */
+ if (!ring)
+ return 0;
+
+ packet = &ring->packet_data;
+ while (received < budget) {
+ DBGPR(" cur = %d\n", ring->cur);
+
+ /* Clear the packet data information */
+ memset(packet, 0, sizeof(*packet));
+ skb = NULL;
+ error = 0;
+ cur_len = 0;
+
+read_again:
+ rdata = GET_DESC_DATA(ring, ring->cur);
+
+ if (hw_if->dev_read(channel))
+ break;
+
+ received++;
+ ring->cur++;
+ ring->dirty++;
+
+ dma_unmap_single(pdata->dev, rdata->skb_dma,
+ rdata->skb_dma_len, DMA_FROM_DEVICE);
+ rdata->skb_dma = 0;
+
+ incomplete = XGMAC_GET_BITS(packet->attributes,
+ RX_PACKET_ATTRIBUTES,
+ INCOMPLETE);
+
+ /* Earlier error, just drain the remaining data */
+ if (incomplete && error)
+ goto read_again;
+
+ if (error || packet->errors) {
+ if (packet->errors)
+ DBGPR("Error in received packet\n");
+ dev_kfree_skb(skb);
+ continue;
+ }
+
+ put_len = rdata->len - cur_len;
+ if (skb) {
+ if (pskb_expand_head(skb, 0, put_len, GFP_ATOMIC)) {
+ DBGPR("pskb_expand_head error\n");
+ if (incomplete) {
+ error = 1;
+ goto read_again;
+ }
+
+ dev_kfree_skb(skb);
+ continue;
+ }
+ memcpy(skb_tail_pointer(skb), rdata->skb->data,
+ put_len);
+ } else {
+ skb = rdata->skb;
+ rdata->skb = NULL;
+ }
+ skb_put(skb, put_len);
+ cur_len += put_len;
+
+ if (incomplete)
+ goto read_again;
+
+ /* Be sure we don't exceed the configured MTU */
+ max_len = netdev->mtu + ETH_HLEN;
+ if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ (skb->protocol == htons(ETH_P_8021Q)))
+ max_len += VLAN_HLEN;
+
+ if (skb->len > max_len) {
+ DBGPR("packet length exceeds configured MTU\n");
+ dev_kfree_skb(skb);
+ continue;
+ }
+
+#ifdef XGMAC_ENABLE_RX_PKT_DUMP
+ xgbe_print_pkt(netdev, skb, false);
+#endif
+
+ skb_checksum_none_assert(skb);
+ if (XGMAC_GET_BITS(packet->attributes,
+ RX_PACKET_ATTRIBUTES, CSUM_DONE))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ if (XGMAC_GET_BITS(packet->attributes,
+ RX_PACKET_ATTRIBUTES, VLAN_CTAG))
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ packet->vlan_ctag);
+
+ skb->dev = netdev;
+ skb->protocol = eth_type_trans(skb, netdev);
+ skb_record_rx_queue(skb, channel->queue_index);
+ skb_mark_napi_id(skb, &pdata->napi);
+
+ netdev->last_rx = jiffies;
+ napi_gro_receive(&pdata->napi, skb);
+ }
+
+ if (received) {
+ desc_if->realloc_skb(channel);
+
+ /* Update the Rx Tail Pointer Register with address of
+ * the last cleaned entry */
+ rdata = GET_DESC_DATA(ring, ring->rx.realloc_index - 1);
+ XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
+ lower_32_bits(rdata->rdesc_dma));
+ }
+
+ DBGPR("<--xgbe_rx_poll: received = %d\n", received);
+
+ return received;
+}
+
+static int xgbe_poll(struct napi_struct *napi, int budget)
+{
+ struct xgbe_prv_data *pdata = container_of(napi, struct xgbe_prv_data,
+ napi);
+ struct xgbe_channel *channel;
+ int processed;
+ unsigned int i;
+
+ DBGPR("-->xgbe_poll: budget=%d\n", budget);
+
+ /* Cleanup Tx ring first */
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++)
+ xgbe_tx_poll(channel);
+
+ /* Process Rx ring next */
+ processed = 0;
+ channel = pdata->channel;
+ for (i = 0; i < pdata->channel_count; i++, channel++)
+ processed += xgbe_rx_poll(channel, budget - processed);
+
+ /* If we processed everything, we are done */
+ if (processed < budget) {
+ /* Turn off polling */
+ napi_complete(napi);
+
+ /* Enable Tx and Rx interrupts */
+ xgbe_enable_rx_tx_ints(pdata);
+ }
+
+ DBGPR("<--xgbe_poll: received = %d\n", processed);
+
+ return processed;
+}
+
+void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx,
+ unsigned int count, unsigned int flag)
+{
+ struct xgbe_ring_data *rdata;
+ struct xgbe_ring_desc *rdesc;
+
+ while (count--) {
+ rdata = GET_DESC_DATA(ring, idx);
+ rdesc = rdata->rdesc;
+ DBGPR("TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
+ (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
+ le32_to_cpu(rdesc->desc0), le32_to_cpu(rdesc->desc1),
+ le32_to_cpu(rdesc->desc2), le32_to_cpu(rdesc->desc3));
+ idx++;
+ }
+}
+
+void xgbe_dump_rx_desc(struct xgbe_ring *ring, struct xgbe_ring_desc *desc,
+ unsigned int idx)
+{
+ DBGPR("RX_NORMAL_DESC[%d RX BY DEVICE] = %08x:%08x:%08x:%08x\n", idx,
+ le32_to_cpu(desc->desc0), le32_to_cpu(desc->desc1),
+ le32_to_cpu(desc->desc2), le32_to_cpu(desc->desc3));
+}
+
+void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx)
+{
+ struct ethhdr *eth = (struct ethhdr *)skb->data;
+ unsigned char *buf = skb->data;
+ unsigned char buffer[128];
+ unsigned int i, j;
+
+ netdev_alert(netdev, "\n************** SKB dump ****************\n");
+
+ netdev_alert(netdev, "%s packet of %d bytes\n",
+ (tx_rx ? "TX" : "RX"), skb->len);
+
+ netdev_alert(netdev, "Dst MAC addr: %pM\n", eth->h_dest);
+ netdev_alert(netdev, "Src MAC addr: %pM\n", eth->h_source);
+ netdev_alert(netdev, "Protocol: 0x%04hx\n", ntohs(eth->h_proto));
+
+ for (i = 0, j = 0; i < skb->len;) {
+ j += snprintf(buffer + j, sizeof(buffer) - j, "%02hhx",
+ buf[i++]);
+
+ if ((i % 32) == 0) {
+ netdev_alert(netdev, " 0x%04x: %s\n", i - 32, buffer);
+ j = 0;
+ } else if ((i % 16) == 0) {
+ buffer[j++] = ' ';
+ buffer[j++] = ' ';
+ } else if ((i % 4) == 0) {
+ buffer[j++] = ' ';
+ }
+ }
+ if (i % 32)
+ netdev_alert(netdev, " 0x%04x: %s\n", i - (i % 32), buffer);
+
+ netdev_alert(netdev, "\n************** SKB dump ****************\n");
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
new file mode 100644
index 0000000..8909f2b
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
@@ -0,0 +1,510 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/phy.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+
+struct xgbe_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int stat_size;
+ int stat_offset;
+};
+
+#define XGMAC_MMC_STAT(_string, _var) \
+ { _string, \
+ FIELD_SIZEOF(struct xgbe_mmc_stats, _var), \
+ offsetof(struct xgbe_prv_data, mmc_stats._var), \
+ }
+
+static const struct xgbe_stats xgbe_gstring_stats[] = {
+ XGMAC_MMC_STAT("tx_bytes", txoctetcount_gb),
+ XGMAC_MMC_STAT("tx_packets", txframecount_gb),
+ XGMAC_MMC_STAT("tx_unicast_packets", txunicastframes_gb),
+ XGMAC_MMC_STAT("tx_broadcast_packets", txbroadcastframes_gb),
+ XGMAC_MMC_STAT("tx_multicast_packets", txmulticastframes_gb),
+ XGMAC_MMC_STAT("tx_vlan_packets", txvlanframes_g),
+ XGMAC_MMC_STAT("tx_64_byte_packets", tx64octets_gb),
+ XGMAC_MMC_STAT("tx_65_to_127_byte_packets", tx65to127octets_gb),
+ XGMAC_MMC_STAT("tx_128_to_255_byte_packets", tx128to255octets_gb),
+ XGMAC_MMC_STAT("tx_256_to_511_byte_packets", tx256to511octets_gb),
+ XGMAC_MMC_STAT("tx_512_to_1023_byte_packets", tx512to1023octets_gb),
+ XGMAC_MMC_STAT("tx_1024_to_max_byte_packets", tx1024tomaxoctets_gb),
+ XGMAC_MMC_STAT("tx_underflow_errors", txunderflowerror),
+ XGMAC_MMC_STAT("tx_pause_frames", txpauseframes),
+
+ XGMAC_MMC_STAT("rx_bytes", rxoctetcount_gb),
+ XGMAC_MMC_STAT("rx_packets", rxframecount_gb),
+ XGMAC_MMC_STAT("rx_unicast_packets", rxunicastframes_g),
+ XGMAC_MMC_STAT("rx_broadcast_packets", rxbroadcastframes_g),
+ XGMAC_MMC_STAT("rx_multicast_packets", rxmulticastframes_g),
+ XGMAC_MMC_STAT("rx_vlan_packets", rxvlanframes_gb),
+ XGMAC_MMC_STAT("rx_64_byte_packets", rx64octets_gb),
+ XGMAC_MMC_STAT("rx_65_to_127_byte_packets", rx65to127octets_gb),
+ XGMAC_MMC_STAT("rx_128_to_255_byte_packets", rx128to255octets_gb),
+ XGMAC_MMC_STAT("rx_256_to_511_byte_packets", rx256to511octets_gb),
+ XGMAC_MMC_STAT("rx_512_to_1023_byte_packets", rx512to1023octets_gb),
+ XGMAC_MMC_STAT("rx_1024_to_max_byte_packets", rx1024tomaxoctets_gb),
+ XGMAC_MMC_STAT("rx_undersize_packets", rxundersize_g),
+ XGMAC_MMC_STAT("rx_oversize_packets", rxoversize_g),
+ XGMAC_MMC_STAT("rx_crc_errors", rxcrcerror),
+ XGMAC_MMC_STAT("rx_crc_errors_small_packets", rxrunterror),
+ XGMAC_MMC_STAT("rx_crc_errors_giant_packets", rxjabbererror),
+ XGMAC_MMC_STAT("rx_length_errors", rxlengtherror),
+ XGMAC_MMC_STAT("rx_out_of_range_errors", rxoutofrangetype),
+ XGMAC_MMC_STAT("rx_fifo_overflow_errors", rxfifooverflow),
+ XGMAC_MMC_STAT("rx_watchdog_errors", rxwatchdogerror),
+ XGMAC_MMC_STAT("rx_pause_frames", rxpauseframes),
+};
+#define XGBE_STATS_COUNT ARRAY_SIZE(xgbe_gstring_stats)
+
+static void xgbe_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
+{
+ int i;
+
+ DBGPR("-->%s\n", __func__);
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < XGBE_STATS_COUNT; i++) {
+ memcpy(data, xgbe_gstring_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ data += ETH_GSTRING_LEN;
+ }
+ break;
+ }
+
+ DBGPR("<--%s\n", __func__);
+}
+
+static void xgbe_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ u8 *stat;
+ int i;
+
+ DBGPR("-->%s\n", __func__);
+
+ pdata->hw_if.read_mmc_stats(pdata);
+ for (i = 0; i < XGBE_STATS_COUNT; i++) {
+ stat = (u8 *)pdata + xgbe_gstring_stats[i].stat_offset;
+ *data++ = *(u64 *)stat;
+ }
+
+ DBGPR("<--%s\n", __func__);
+}
+
+static int xgbe_get_sset_count(struct net_device *netdev, int stringset)
+{
+ int ret;
+
+ DBGPR("-->%s\n", __func__);
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ ret = XGBE_STATS_COUNT;
+ break;
+
+ default:
+ ret = -EOPNOTSUPP;
+ }
+
+ DBGPR("<--%s\n", __func__);
+
+ return ret;
+}
+
+static void xgbe_get_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ DBGPR("-->xgbe_get_pauseparam\n");
+
+ pause->autoneg = pdata->pause_autoneg;
+ pause->tx_pause = pdata->tx_pause;
+ pause->rx_pause = pdata->rx_pause;
+
+ DBGPR("<--xgbe_get_pauseparam\n");
+}
+
+static int xgbe_set_pauseparam(struct net_device *netdev,
+ struct ethtool_pauseparam *pause)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct phy_device *phydev = pdata->phydev;
+ int ret = 0;
+
+ DBGPR("-->xgbe_set_pauseparam\n");
+
+ DBGPR(" autoneg = %d, tx_pause = %d, rx_pause = %d\n",
+ pause->autoneg, pause->tx_pause, pause->rx_pause);
+
+ pdata->pause_autoneg = pause->autoneg;
+ if (pause->autoneg) {
+ phydev->advertising |= ADVERTISED_Pause;
+ phydev->advertising |= ADVERTISED_Asym_Pause;
+
+ } else {
+ phydev->advertising &= ~ADVERTISED_Pause;
+ phydev->advertising &= ~ADVERTISED_Asym_Pause;
+
+ pdata->tx_pause = pause->tx_pause;
+ pdata->rx_pause = pause->rx_pause;
+ }
+
+ if (netif_running(netdev))
+ ret = phy_start_aneg(phydev);
+
+ DBGPR("<--xgbe_set_pauseparam\n");
+
+ return ret;
+}
+
+static int xgbe_get_settings(struct net_device *netdev,
+ struct ethtool_cmd *cmd)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ int ret;
+
+ DBGPR("-->xgbe_get_settings\n");
+
+ if (!pdata->phydev)
+ return -ENODEV;
+
+ spin_lock_irq(&pdata->lock);
+
+ ret = phy_ethtool_gset(pdata->phydev, cmd);
+ cmd->transceiver = XCVR_EXTERNAL;
+
+ spin_unlock_irq(&pdata->lock);
+
+ DBGPR("<--xgbe_get_settings\n");
+
+ return ret;
+}
+
+static int xgbe_set_settings(struct net_device *netdev,
+ struct ethtool_cmd *cmd)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct phy_device *phydev = pdata->phydev;
+ u32 speed;
+ int ret;
+
+ DBGPR("-->xgbe_set_settings\n");
+
+ if (!pdata->phydev)
+ return -ENODEV;
+
+ spin_lock_irq(&pdata->lock);
+
+ speed = ethtool_cmd_speed(cmd);
+
+ ret = -EINVAL;
+ if (cmd->phy_address != phydev->addr)
+ goto unlock;
+
+ if ((cmd->autoneg != AUTONEG_ENABLE) &&
+ (cmd->autoneg != AUTONEG_DISABLE))
+ goto unlock;
+
+ if ((cmd->autoneg == AUTONEG_DISABLE) &&
+ (((speed != SPEED_10000) && (speed != SPEED_1000)) ||
+ (cmd->duplex != DUPLEX_FULL)))
+ goto unlock;
+
+ if (cmd->autoneg == AUTONEG_ENABLE) {
+ /* Clear settings needed to force speeds */
+ phydev->supported &= ~SUPPORTED_1000baseT_Full;
+ phydev->supported &= ~SUPPORTED_10000baseT_Full;
+ } else {
+ /* Add settings needed to force speed */
+ phydev->supported |= SUPPORTED_1000baseT_Full;
+ phydev->supported |= SUPPORTED_10000baseT_Full;
+ }
+
+ cmd->advertising &= phydev->supported;
+ if ((cmd->autoneg == AUTONEG_ENABLE) && !cmd->advertising)
+ goto unlock;
+
+ ret = 0;
+ phydev->autoneg = cmd->autoneg;
+ phydev->speed = speed;
+ phydev->duplex = cmd->duplex;
+ phydev->advertising = cmd->advertising;
+
+ if (cmd->autoneg == AUTONEG_ENABLE)
+ phydev->advertising |= ADVERTISED_Autoneg;
+ else
+ phydev->advertising &= ~ADVERTISED_Autoneg;
+
+ if (netif_running(netdev))
+ ret = phy_start_aneg(phydev);
+
+unlock:
+ spin_unlock_irq(&pdata->lock);
+
+ DBGPR("<--xgbe_set_settings\n");
+
+ return ret;
+}
+
+static void xgbe_get_drvinfo(struct net_device *netdev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ strlcpy(drvinfo->driver, XGBE_DRV_NAME, sizeof(drvinfo->driver));
+ strlcpy(drvinfo->version, XGBE_DRV_VERSION, sizeof(drvinfo->version));
+ strlcpy(drvinfo->bus_info, dev_name(pdata->dev),
+ sizeof(drvinfo->bus_info));
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%d.%d.%d",
+ XGMAC_IOREAD_BITS(pdata, MAC_VR, USERVER),
+ XGMAC_IOREAD_BITS(pdata, MAC_VR, DEVID),
+ XGMAC_IOREAD_BITS(pdata, MAC_VR, SNPSVER));
+ drvinfo->n_stats = XGBE_STATS_COUNT;
+}
+
+static int xgbe_get_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ unsigned int riwt;
+
+ DBGPR("-->xgbe_get_coalesce\n");
+
+ memset(ec, 0, sizeof(struct ethtool_coalesce));
+
+ riwt = pdata->rx_riwt;
+ ec->rx_coalesce_usecs = hw_if->riwt_to_usec(pdata, riwt);
+ ec->rx_max_coalesced_frames = pdata->rx_frames;
+
+ ec->tx_coalesce_usecs = pdata->tx_usecs;
+ ec->tx_max_coalesced_frames = pdata->tx_frames;
+
+ DBGPR("<--xgbe_get_coalesce\n");
+
+ return 0;
+}
+
+static int xgbe_set_coalesce(struct net_device *netdev,
+ struct ethtool_coalesce *ec)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ unsigned int rx_frames, rx_riwt, rx_usecs;
+ unsigned int tx_frames, tx_usecs;
+
+ DBGPR("-->xgbe_set_coalesce\n");
+
+ /* Check for not supported parameters */
+ if ((ec->rx_coalesce_usecs_irq) ||
+ (ec->rx_max_coalesced_frames_irq) ||
+ (ec->tx_coalesce_usecs_irq) ||
+ (ec->tx_max_coalesced_frames_irq) ||
+ (ec->stats_block_coalesce_usecs) ||
+ (ec->use_adaptive_rx_coalesce) ||
+ (ec->use_adaptive_tx_coalesce) ||
+ (ec->pkt_rate_low) ||
+ (ec->rx_coalesce_usecs_low) ||
+ (ec->rx_max_coalesced_frames_low) ||
+ (ec->tx_coalesce_usecs_low) ||
+ (ec->tx_max_coalesced_frames_low) ||
+ (ec->pkt_rate_high) ||
+ (ec->rx_coalesce_usecs_high) ||
+ (ec->rx_max_coalesced_frames_high) ||
+ (ec->tx_coalesce_usecs_high) ||
+ (ec->tx_max_coalesced_frames_high) ||
+ (ec->rate_sample_interval))
+ return -EOPNOTSUPP;
+
+ /* Can only change rx-frames when interface is down (see
+ * rx_descriptor_init in xgbe-dev.c)
+ */
+ rx_frames = pdata->rx_frames;
+ if (rx_frames != ec->rx_max_coalesced_frames && netif_running(netdev)) {
+ netdev_alert(netdev,
+ "interface must be down to change rx-frames\n");
+ return -EINVAL;
+ }
+
+ rx_riwt = hw_if->usec_to_riwt(pdata, ec->rx_coalesce_usecs);
+ rx_frames = ec->rx_max_coalesced_frames;
+
+ /* Use smallest possible value if conversion resulted in zero */
+ if (ec->rx_coalesce_usecs && !rx_riwt)
+ rx_riwt = 1;
+
+ /* Check the bounds of values for Rx */
+ if (rx_riwt > XGMAC_MAX_DMA_RIWT) {
+ rx_usecs = hw_if->riwt_to_usec(pdata, XGMAC_MAX_DMA_RIWT);
+ netdev_alert(netdev, "rx-usec is limited to %d usecs\n",
+ rx_usecs);
+ return -EINVAL;
+ }
+ if (rx_frames > pdata->channel->rx_ring->rdesc_count) {
+ netdev_alert(netdev, "rx-frames is limited to %d frames\n",
+ pdata->channel->rx_ring->rdesc_count);
+ return -EINVAL;
+ }
+
+ tx_usecs = ec->tx_coalesce_usecs;
+ tx_frames = ec->tx_max_coalesced_frames;
+
+ /* Check the bounds of values for Tx */
+ if (tx_frames > pdata->channel->tx_ring->rdesc_count) {
+ netdev_alert(netdev, "tx-frames is limited to %d frames\n",
+ pdata->channel->tx_ring->rdesc_count);
+ return -EINVAL;
+ }
+
+ pdata->rx_riwt = rx_riwt;
+ pdata->rx_frames = rx_frames;
+ hw_if->config_rx_coalesce(pdata);
+
+ pdata->tx_usecs = tx_usecs;
+ pdata->tx_frames = tx_frames;
+ hw_if->config_tx_coalesce(pdata);
+
+ DBGPR("<--xgbe_set_coalesce\n");
+
+ return 0;
+}
+
+static const struct ethtool_ops xgbe_ethtool_ops = {
+ .get_settings = xgbe_get_settings,
+ .set_settings = xgbe_set_settings,
+ .get_drvinfo = xgbe_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_coalesce = xgbe_get_coalesce,
+ .set_coalesce = xgbe_set_coalesce,
+ .get_pauseparam = xgbe_get_pauseparam,
+ .set_pauseparam = xgbe_set_pauseparam,
+ .get_strings = xgbe_get_strings,
+ .get_ethtool_stats = xgbe_get_ethtool_stats,
+ .get_sset_count = xgbe_get_sset_count,
+};
+
+struct ethtool_ops *xgbe_get_ethtool_ops(void)
+{
+ return (struct ethtool_ops *)&xgbe_ethtool_ops;
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
new file mode 100644
index 0000000..c83584a
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c
@@ -0,0 +1,512 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/clk.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+
+MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(XGBE_DRV_VERSION);
+MODULE_DESCRIPTION(XGBE_DRV_DESC);
+
+static struct xgbe_channel *xgbe_alloc_rings(struct xgbe_prv_data *pdata)
+{
+ struct xgbe_channel *channel_mem, *channel;
+ struct xgbe_ring *tx_ring, *rx_ring;
+ unsigned int count, i;
+
+ DBGPR("-->xgbe_alloc_rings\n");
+
+ count = max_t(unsigned int, pdata->tx_ring_count, pdata->rx_ring_count);
+
+ channel_mem = devm_kcalloc(pdata->dev, count,
+ sizeof(struct xgbe_channel), GFP_KERNEL);
+ if (!channel_mem)
+ return NULL;
+
+ tx_ring = devm_kcalloc(pdata->dev, pdata->tx_ring_count,
+ sizeof(struct xgbe_ring), GFP_KERNEL);
+ if (!tx_ring)
+ return NULL;
+
+ rx_ring = devm_kcalloc(pdata->dev, pdata->rx_ring_count,
+ sizeof(struct xgbe_ring), GFP_KERNEL);
+ if (!rx_ring)
+ return NULL;
+
+ for (i = 0, channel = channel_mem; i < count; i++, channel++) {
+ snprintf(channel->name, sizeof(channel->name), "channel-%d", i);
+ channel->pdata = pdata;
+ channel->queue_index = i;
+ channel->dma_regs = pdata->xgmac_regs + DMA_CH_BASE +
+ (DMA_CH_INC * i);
+
+ if (i < pdata->tx_ring_count) {
+ spin_lock_init(&tx_ring->lock);
+ channel->tx_ring = tx_ring++;
+ }
+
+ if (i < pdata->rx_ring_count) {
+ spin_lock_init(&tx_ring->lock);
+ channel->rx_ring = rx_ring++;
+ }
+
+ DBGPR(" %s - queue_index=%u, dma_regs=%p, tx=%p, rx=%p\n",
+ channel->name, channel->queue_index, channel->dma_regs,
+ channel->tx_ring, channel->rx_ring);
+ }
+
+ pdata->channel_count = count;
+
+ DBGPR("<--xgbe_alloc_rings\n");
+
+ return channel_mem;
+}
+
+static void xgbe_default_config(struct xgbe_prv_data *pdata)
+{
+ DBGPR("-->xgbe_default_config\n");
+
+ pdata->pblx8 = DMA_PBL_X8_ENABLE;
+ pdata->tx_sf_mode = MTL_TSF_ENABLE;
+ pdata->tx_threshold = MTL_TX_THRESHOLD_64;
+ pdata->tx_pbl = DMA_PBL_16;
+ pdata->tx_osp_mode = DMA_OSP_ENABLE;
+ pdata->rx_sf_mode = MTL_RSF_DISABLE;
+ pdata->rx_threshold = MTL_RX_THRESHOLD_64;
+ pdata->rx_pbl = DMA_PBL_16;
+ pdata->pause_autoneg = 1;
+ pdata->tx_pause = 1;
+ pdata->rx_pause = 1;
+ pdata->power_down = 0;
+ pdata->default_autoneg = AUTONEG_ENABLE;
+ pdata->default_speed = SPEED_10000;
+
+ DBGPR("<--xgbe_default_config\n");
+}
+
+static void xgbe_init_all_fptrs(struct xgbe_prv_data *pdata)
+{
+ xgbe_init_function_ptrs_dev(&pdata->hw_if);
+ xgbe_init_function_ptrs_desc(&pdata->desc_if);
+}
+
+static int xgbe_probe(struct platform_device *pdev)
+{
+ struct xgbe_prv_data *pdata;
+ struct xgbe_hw_if *hw_if;
+ struct xgbe_desc_if *desc_if;
+ struct net_device *netdev;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ const u8 *mac_addr;
+ int ret;
+
+ DBGPR("--> xgbe_probe\n");
+
+ netdev = alloc_etherdev_mq(sizeof(struct xgbe_prv_data),
+ XGBE_MAX_DMA_CHANNELS);
+ if (!netdev) {
+ dev_err(dev, "alloc_etherdev failed\n");
+ ret = -ENOMEM;
+ goto err_alloc;
+ }
+ SET_NETDEV_DEV(netdev, dev);
+ pdata = netdev_priv(netdev);
+ pdata->netdev = netdev;
+ pdata->pdev = pdev;
+ pdata->dev = dev;
+ platform_set_drvdata(pdev, netdev);
+
+ spin_lock_init(&pdata->lock);
+ mutex_init(&pdata->xpcs_mutex);
+
+ /* Set and validate the number of descriptors for a ring */
+ BUILD_BUG_ON_NOT_POWER_OF_2(TX_DESC_CNT);
+ pdata->tx_desc_count = TX_DESC_CNT;
+ if (pdata->tx_desc_count & (pdata->tx_desc_count - 1)) {
+ dev_err(dev, "tx descriptor count (%d) is not valid\n",
+ pdata->tx_desc_count);
+ ret = -EINVAL;
+ goto err_io;
+ }
+ BUILD_BUG_ON_NOT_POWER_OF_2(RX_DESC_CNT);
+ pdata->rx_desc_count = RX_DESC_CNT;
+ if (pdata->rx_desc_count & (pdata->rx_desc_count - 1)) {
+ dev_err(dev, "rx descriptor count (%d) is not valid\n",
+ pdata->rx_desc_count);
+ ret = -EINVAL;
+ goto err_io;
+ }
+
+ /* Obtain the system clock setting */
+ pdata->sysclock = devm_clk_get(dev, NULL);
+ if (IS_ERR(pdata->sysclock)) {
+ dev_err(dev, "devm_clk_get failed\n");
+ ret = PTR_ERR(pdata->sysclock);
+ goto err_io;
+ }
+
+ /* Obtain the mmio areas for the device */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ pdata->xgmac_regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pdata->xgmac_regs)) {
+ dev_err(dev, "xgmac ioremap failed\n");
+ ret = PTR_ERR(pdata->xgmac_regs);
+ goto err_io;
+ }
+ DBGPR(" xgmac_regs = %p\n", pdata->xgmac_regs);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ pdata->xpcs_regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pdata->xpcs_regs)) {
+ dev_err(dev, "xpcs ioremap failed\n");
+ ret = PTR_ERR(pdata->xpcs_regs);
+ goto err_io;
+ }
+ DBGPR(" xpcs_regs = %p\n", pdata->xpcs_regs);
+
+ /* Set the DMA mask */
+ if (!dev->dma_mask)
+ dev->dma_mask = &dev->coherent_dma_mask;
+ *(dev->dma_mask) = DMA_BIT_MASK(40);
+ dev->coherent_dma_mask = DMA_BIT_MASK(40);
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret < 0) {
+ dev_err(dev, "platform_get_irq failed\n");
+ goto err_io;
+ }
+ netdev->irq = ret;
+ netdev->base_addr = (unsigned long)pdata->xgmac_regs;
+
+ /* Set all the function pointers */
+ xgbe_init_all_fptrs(pdata);
+ hw_if = &pdata->hw_if;
+ desc_if = &pdata->desc_if;
+
+ /* Issue software reset to device */
+ hw_if->exit(pdata);
+
+ /* Populate the hardware features */
+ xgbe_get_all_hw_features(pdata);
+
+ /* Retrieve the MAC address */
+ mac_addr = of_get_mac_address(dev->of_node);
+ if (!mac_addr) {
+ dev_err(dev, "invalid mac address for this device\n");
+ ret = -EINVAL;
+ goto err_io;
+ }
+ memcpy(netdev->dev_addr, mac_addr, netdev->addr_len);
+
+ /* Retrieve the PHY mode - it must be "xgmii" */
+ pdata->phy_mode = of_get_phy_mode(dev->of_node);
+ if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) {
+ dev_err(dev, "invalid phy-mode specified for this device\n");
+ ret = -EINVAL;
+ goto err_io;
+ }
+
+ /* Set default configuration data */
+ xgbe_default_config(pdata);
+
+ /* Calculate the number of Tx and Rx rings to be created */
+ pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(),
+ pdata->hw_feat.tx_ch_cnt);
+ if (netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count)) {
+ dev_err(dev, "error setting real tx queue count\n");
+ goto err_io;
+ }
+
+ pdata->rx_ring_count = min_t(unsigned int,
+ netif_get_num_default_rss_queues(),
+ pdata->hw_feat.rx_ch_cnt);
+ ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count);
+ if (ret) {
+ dev_err(dev, "error setting real rx queue count\n");
+ goto err_io;
+ }
+
+ /* Allocate the rings for the DMA channels */
+ pdata->channel = xgbe_alloc_rings(pdata);
+ if (!pdata->channel) {
+ dev_err(dev, "ring allocation failed\n");
+ ret = -ENOMEM;
+ goto err_io;
+ }
+
+ /* Prepare to regsiter with MDIO */
+ pdata->mii_bus_id = kasprintf(GFP_KERNEL, "%s", pdev->name);
+ if (!pdata->mii_bus_id) {
+ dev_err(dev, "failed to allocate mii bus id\n");
+ ret = -ENOMEM;
+ goto err_io;
+ }
+ ret = xgbe_mdio_register(pdata);
+ if (ret)
+ goto err_bus_id;
+
+ /* Set network and ethtool operations */
+ netdev->netdev_ops = xgbe_get_netdev_ops();
+ netdev->ethtool_ops = xgbe_get_ethtool_ops();
+
+ /* Set device features */
+ netdev->hw_features = NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO6 |
+ NETIF_F_GRO |
+ NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_HW_VLAN_CTAG_TX;
+
+ netdev->vlan_features |= NETIF_F_SG |
+ NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM |
+ NETIF_F_TSO |
+ NETIF_F_TSO6;
+
+ netdev->features |= netdev->hw_features;
+ pdata->netdev_features = netdev->features;
+
+ xgbe_init_rx_coalesce(pdata);
+ xgbe_init_tx_coalesce(pdata);
+
+ netif_carrier_off(netdev);
+ ret = register_netdev(netdev);
+ if (ret) {
+ dev_err(dev, "net device registration failed\n");
+ goto err_reg_netdev;
+ }
+
+ xgbe_debugfs_init(pdata);
+
+ netdev_notice(netdev, "net device enabled\n");
+
+ DBGPR("<-- xgbe_probe\n");
+
+ return 0;
+
+err_reg_netdev:
+ xgbe_mdio_unregister(pdata);
+
+err_bus_id:
+ kfree(pdata->mii_bus_id);
+
+err_io:
+ free_netdev(netdev);
+
+err_alloc:
+ dev_notice(dev, "net device not enabled\n");
+
+ return ret;
+}
+
+static int xgbe_remove(struct platform_device *pdev)
+{
+ struct net_device *netdev = platform_get_drvdata(pdev);
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+ DBGPR("-->xgbe_remove\n");
+
+ xgbe_debugfs_exit(pdata);
+
+ unregister_netdev(netdev);
+
+ xgbe_mdio_unregister(pdata);
+
+ kfree(pdata->mii_bus_id);
+
+ free_netdev(netdev);
+
+ DBGPR("<--xgbe_remove\n");
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int xgbe_suspend(struct device *dev)
+{
+ struct net_device *netdev = dev_get_drvdata(dev);
+ int ret;
+
+ DBGPR("-->xgbe_suspend\n");
+
+ if (!netif_running(netdev)) {
+ DBGPR("<--xgbe_dev_suspend\n");
+ return -EINVAL;
+ }
+
+ ret = xgbe_powerdown(netdev, XGMAC_DRIVER_CONTEXT);
+
+ DBGPR("<--xgbe_suspend\n");
+
+ return ret;
+}
+
+static int xgbe_resume(struct device *dev)
+{
+ struct net_device *netdev = dev_get_drvdata(dev);
+ int ret;
+
+ DBGPR("-->xgbe_resume\n");
+
+ if (!netif_running(netdev)) {
+ DBGPR("<--xgbe_dev_resume\n");
+ return -EINVAL;
+ }
+
+ ret = xgbe_powerup(netdev, XGMAC_DRIVER_CONTEXT);
+
+ DBGPR("<--xgbe_resume\n");
+
+ return ret;
+}
+#endif /* CONFIG_PM */
+
+static const struct of_device_id xgbe_of_match[] = {
+ { .compatible = "amd,xgbe-seattle-v1a", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, xgbe_of_match);
+static SIMPLE_DEV_PM_OPS(xgbe_pm_ops, xgbe_suspend, xgbe_resume);
+
+static struct platform_driver xgbe_driver = {
+ .driver = {
+ .name = "amd-xgbe",
+ .of_match_table = xgbe_of_match,
+ .pm = &xgbe_pm_ops,
+ },
+ .probe = xgbe_probe,
+ .remove = xgbe_remove,
+};
+
+module_platform_driver(xgbe_driver);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
new file mode 100644
index 0000000..ea7a5d6
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
@@ -0,0 +1,433 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/module.h>
+#include <linux/kmod.h>
+#include <linux/spinlock.h>
+#include <linux/mdio.h>
+#include <linux/phy.h>
+#include <linux/of.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+
+static int xgbe_mdio_read(struct mii_bus *mii, int prtad, int mmd_reg)
+{
+ struct xgbe_prv_data *pdata = mii->priv;
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ int mmd_data;
+
+ DBGPR_MDIO("-->xgbe_mdio_read: prtad=%#x mmd_reg=%#x\n",
+ prtad, mmd_reg);
+
+ mmd_data = hw_if->read_mmd_regs(pdata, prtad, mmd_reg);
+
+ DBGPR_MDIO("<--xgbe_mdio_read: mmd_data=%#x\n", mmd_data);
+
+ return mmd_data;
+}
+
+static int xgbe_mdio_write(struct mii_bus *mii, int prtad, int mmd_reg,
+ u16 mmd_val)
+{
+ struct xgbe_prv_data *pdata = mii->priv;
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ int mmd_data = mmd_val;
+
+ DBGPR_MDIO("-->xgbe_mdio_write: prtad=%#x mmd_reg=%#x mmd_data=%#x\n",
+ prtad, mmd_reg, mmd_data);
+
+ hw_if->write_mmd_regs(pdata, prtad, mmd_reg, mmd_data);
+
+ DBGPR_MDIO("<--xgbe_mdio_write\n");
+
+ return 0;
+}
+
+static void xgbe_adjust_link(struct net_device *netdev)
+{
+ struct xgbe_prv_data *pdata = netdev_priv(netdev);
+ struct xgbe_hw_if *hw_if = &pdata->hw_if;
+ struct phy_device *phydev = pdata->phydev;
+ unsigned long flags;
+ int new_state = 0;
+
+ if (phydev == NULL)
+ return;
+
+ DBGPR_MDIO("-->xgbe_adjust_link: address=%d, newlink=%d, curlink=%d\n",
+ phydev->addr, phydev->link, pdata->phy_link);
+
+ spin_lock_irqsave(&pdata->lock, flags);
+
+ if (phydev->link) {
+ /* Flow control support */
+ if (pdata->pause_autoneg) {
+ if (phydev->pause || phydev->asym_pause) {
+ pdata->tx_pause = 1;
+ pdata->rx_pause = 1;
+ } else {
+ pdata->tx_pause = 0;
+ pdata->rx_pause = 0;
+ }
+ }
+
+ if (pdata->tx_pause != pdata->phy_tx_pause) {
+ hw_if->config_tx_flow_control(pdata);
+ pdata->phy_tx_pause = pdata->tx_pause;
+ }
+
+ if (pdata->rx_pause != pdata->phy_rx_pause) {
+ hw_if->config_rx_flow_control(pdata);
+ pdata->phy_rx_pause = pdata->rx_pause;
+ }
+
+ /* Speed support */
+ if (phydev->speed != pdata->phy_speed) {
+ new_state = 1;
+
+ switch (phydev->speed) {
+ case SPEED_10000:
+ hw_if->set_xgmii_speed(pdata);
+ break;
+
+ case SPEED_2500:
+ hw_if->set_gmii_2500_speed(pdata);
+ break;
+
+ case SPEED_1000:
+ hw_if->set_gmii_speed(pdata);
+ break;
+ }
+ pdata->phy_speed = phydev->speed;
+ }
+
+ if (phydev->link != pdata->phy_link) {
+ new_state = 1;
+ pdata->phy_link = 1;
+ }
+ } else if (pdata->phy_link) {
+ new_state = 1;
+ pdata->phy_link = 0;
+ pdata->phy_speed = SPEED_UNKNOWN;
+ }
+
+ if (new_state)
+ phy_print_status(phydev);
+
+ spin_unlock_irqrestore(&pdata->lock, flags);
+
+ DBGPR_MDIO("<--xgbe_adjust_link\n");
+}
+
+void xgbe_dump_phy_registers(struct xgbe_prv_data *pdata)
+{
+ struct device *dev = pdata->dev;
+ struct phy_device *phydev = pdata->mii->phy_map[XGBE_PRTAD];
+ int i;
+
+ dev_alert(dev, "\n************* PHY Reg dump **********************\n");
+
+ dev_alert(dev, "PCS Control Reg (%#04x) = %#04x\n", MDIO_CTRL1,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_CTRL1));
+ dev_alert(dev, "PCS Status Reg (%#04x) = %#04x\n", MDIO_STAT1,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1));
+ dev_alert(dev, "Phy Id (PHYS ID 1 %#04x)= %#04x\n", MDIO_DEVID1,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID1));
+ dev_alert(dev, "Phy Id (PHYS ID 2 %#04x)= %#04x\n", MDIO_DEVID2,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVID2));
+ dev_alert(dev, "Devices in Package (%#04x)= %#04x\n", MDIO_DEVS1,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS1));
+ dev_alert(dev, "Devices in Package (%#04x)= %#04x\n", MDIO_DEVS2,
+ XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_DEVS2));
+
+ dev_alert(dev, "Auto-Neg Control Reg (%#04x) = %#04x\n", MDIO_CTRL1,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1));
+ dev_alert(dev, "Auto-Neg Status Reg (%#04x) = %#04x\n", MDIO_STAT1,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_STAT1));
+ dev_alert(dev, "Auto-Neg Ad Reg 1 (%#04x) = %#04x\n",
+ MDIO_AN_ADVERTISE,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE));
+ dev_alert(dev, "Auto-Neg Ad Reg 2 (%#04x) = %#04x\n",
+ MDIO_AN_ADVERTISE + 1,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1));
+ dev_alert(dev, "Auto-Neg Ad Reg 3 (%#04x) = %#04x\n",
+ MDIO_AN_ADVERTISE + 2,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2));
+ dev_alert(dev, "Auto-Neg Completion Reg (%#04x) = %#04x\n",
+ MDIO_AN_COMP_STAT,
+ XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_COMP_STAT));
+
+ dev_alert(dev, "MMD Device Mask = %#x\n",
+ phydev->c45_ids.devices_in_package);
+ for (i = 0; i < ARRAY_SIZE(phydev->c45_ids.device_ids); i++)
+ dev_alert(dev, " MMD %d: ID = %#08x\n", i,
+ phydev->c45_ids.device_ids[i]);
+
+ dev_alert(dev, "\n*************************************************\n");
+}
+
+int xgbe_mdio_register(struct xgbe_prv_data *pdata)
+{
+ struct net_device *netdev = pdata->netdev;
+ struct device_node *phy_node;
+ struct mii_bus *mii;
+ struct phy_device *phydev;
+ int ret = 0;
+
+ DBGPR("-->xgbe_mdio_register\n");
+
+ /* Retrieve the phy-handle */
+ phy_node = of_parse_phandle(pdata->dev->of_node, "phy-handle", 0);
+ if (!phy_node) {
+ dev_err(pdata->dev, "unable to parse phy-handle\n");
+ return -EINVAL;
+ }
+
+ /* Register with the MDIO bus */
+ mii = mdiobus_alloc();
+ if (mii == NULL) {
+ dev_err(pdata->dev, "mdiobus_alloc failed\n");
+ ret = -ENOMEM;
+ goto err_node_get;
+ }
+
+ /* Register on the MDIO bus (don't probe any PHYs) */
+ mii->name = XGBE_PHY_NAME;
+ mii->read = xgbe_mdio_read;
+ mii->write = xgbe_mdio_write;
+ snprintf(mii->id, sizeof(mii->id), "%s", pdata->mii_bus_id);
+ mii->priv = pdata;
+ mii->phy_mask = ~0;
+ mii->parent = pdata->dev;
+ ret = mdiobus_register(mii);
+ if (ret) {
+ dev_err(pdata->dev, "mdiobus_register failed\n");
+ goto err_mdiobus_alloc;
+ }
+ DBGPR(" mdiobus_register succeeded for %s\n", pdata->mii_bus_id);
+
+ /* Probe the PCS using Clause 45 */
+ phydev = get_phy_device(mii, XGBE_PRTAD, true);
+ if (IS_ERR(phydev) || !phydev ||
+ !phydev->c45_ids.device_ids[MDIO_MMD_PCS]) {
+ dev_err(pdata->dev, "get_phy_device failed\n");
+ ret = phydev ? PTR_ERR(phydev) : -ENOLINK;
+ goto err_mdiobus_register;
+ }
+ request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT,
+ MDIO_ID_ARGS(phydev->c45_ids.device_ids[MDIO_MMD_PCS]));
+
+ of_node_get(phy_node);
+ phydev->dev.of_node = phy_node;
+ ret = phy_device_register(phydev);
+ if (ret) {
+ dev_err(pdata->dev, "phy_device_register failed\n");
+ of_node_put(phy_node);
+ goto err_phy_device;
+ }
+
+ /* Add a reference to the PHY driver so it can't be unloaded */
+ pdata->phy_module = phydev->dev.driver ?
+ phydev->dev.driver->owner : NULL;
+ if (!try_module_get(pdata->phy_module)) {
+ dev_err(pdata->dev, "try_module_get failed\n");
+ ret = -EIO;
+ goto err_phy_device;
+ }
+
+ pdata->mii = mii;
+ pdata->mdio_mmd = MDIO_MMD_PCS;
+
+ pdata->phy_link = -1;
+ pdata->phy_speed = SPEED_UNKNOWN;
+ pdata->phy_tx_pause = pdata->tx_pause;
+ pdata->phy_rx_pause = pdata->rx_pause;
+
+ ret = phy_connect_direct(netdev, phydev, &xgbe_adjust_link,
+ pdata->phy_mode);
+ if (ret) {
+ netdev_err(netdev, "phy_connect_direct failed\n");
+ goto err_phy_device;
+ }
+
+ if (!phydev->drv || (phydev->drv->phy_id == 0)) {
+ netdev_err(netdev, "phy_id not valid\n");
+ ret = -ENODEV;
+ goto err_phy_connect;
+ }
+ DBGPR(" phy_connect_direct succeeded for PHY %s, link=%d\n",
+ dev_name(&phydev->dev), phydev->link);
+
+ phydev->autoneg = pdata->default_autoneg;
+ if (phydev->autoneg == AUTONEG_DISABLE) {
+ /* Add settings needed to force speed */
+ phydev->supported |= SUPPORTED_1000baseT_Full;
+ phydev->supported |= SUPPORTED_10000baseT_Full;
+
+ phydev->speed = pdata->default_speed;
+ phydev->duplex = DUPLEX_FULL;
+
+ phydev->advertising &= ~ADVERTISED_Autoneg;
+ }
+
+ pdata->phydev = phydev;
+
+ of_node_put(phy_node);
+
+ DBGPHY_REGS(pdata);
+
+ DBGPR("<--xgbe_mdio_register\n");
+
+ return 0;
+
+err_phy_connect:
+ phy_disconnect(phydev);
+
+err_phy_device:
+ phy_device_free(phydev);
+
+err_mdiobus_register:
+ mdiobus_unregister(mii);
+
+err_mdiobus_alloc:
+ mdiobus_free(mii);
+
+err_node_get:
+ of_node_put(phy_node);
+
+ return ret;
+}
+
+void xgbe_mdio_unregister(struct xgbe_prv_data *pdata)
+{
+ DBGPR("-->xgbe_mdio_unregister\n");
+
+ phy_disconnect(pdata->phydev);
+ pdata->phydev = NULL;
+
+ module_put(pdata->phy_module);
+ pdata->phy_module = NULL;
+
+ mdiobus_unregister(pdata->mii);
+ pdata->mii->priv = NULL;
+
+ mdiobus_free(pdata->mii);
+ pdata->mii = NULL;
+
+ DBGPR("<--xgbe_mdio_unregister\n");
+}
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
new file mode 100644
index 0000000..ab06271
--- /dev/null
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -0,0 +1,676 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ * The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ * Inc. unless otherwise expressly agreed to in writing between Synopsys
+ * and you.
+ *
+ * The Software IS NOT an item of Licensed Software or Licensed Product
+ * under any End User Software License Agreement or Agreement for Licensed
+ * Product with Synopsys or any supplement thereto. Permission is hereby
+ * granted, free of charge, to any person obtaining a copy of this software
+ * annotated with this license and the Software, to deal in the Software
+ * without restriction, including without limitation the rights to use,
+ * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ * of the Software, and to permit persons to whom the Software is furnished
+ * to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ * THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __XGBE_H__
+#define __XGBE_H__
+
+#include <linux/dma-mapping.h>
+#include <linux/netdevice.h>
+#include <linux/workqueue.h>
+#include <linux/phy.h>
+
+
+#define XGBE_DRV_NAME "amd-xgbe"
+#define XGBE_DRV_VERSION "1.0.0-a"
+#define XGBE_DRV_DESC "AMD 10 Gigabit Ethernet Driver"
+
+/* Descriptor related defines */
+#define TX_DESC_CNT 512
+#define TX_DESC_MIN_FREE (TX_DESC_CNT >> 3)
+#define TX_DESC_MAX_PROC (TX_DESC_CNT >> 1)
+#define RX_DESC_CNT 512
+
+#define TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1))
+
+#define RX_MIN_BUF_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
+#define RX_BUF_ALIGN 64
+
+#define XGBE_MAX_DMA_CHANNELS 16
+#define DMA_ARDOMAIN_SETTING 0x2
+#define DMA_ARCACHE_SETTING 0xb
+#define DMA_AWDOMAIN_SETTING 0x2
+#define DMA_AWCACHE_SETTING 0x7
+#define DMA_INTERRUPT_MASK 0x31c7
+
+#define XGMAC_MIN_PACKET 60
+#define XGMAC_STD_PACKET_MTU 1500
+#define XGMAC_MAX_STD_PACKET 1518
+#define XGMAC_JUMBO_PACKET_MTU 9000
+#define XGMAC_MAX_JUMBO_PACKET 9018
+
+#define MAX_MULTICAST_LIST 14
+#define TX_FLAGS_IP_PKT 0x00000001
+#define TX_FLAGS_TCP_PKT 0x00000002
+
+/* MDIO bus phy name */
+#define XGBE_PHY_NAME "amd_xgbe_phy"
+#define XGBE_PRTAD 0
+
+/* Driver PMT macros */
+#define XGMAC_DRIVER_CONTEXT 1
+#define XGMAC_IOCTL_CONTEXT 2
+
+#define FIFO_SIZE_B(x) (x)
+#define FIFO_SIZE_KB(x) (x * 1024)
+
+#define XGBE_TC_CNT 2
+
+/* Helper macro for descriptor handling
+ * Always use GET_DESC_DATA to access the descriptor data
+ * since the index is free-running and needs to be and-ed
+ * with the descriptor count value of the ring to index to
+ * the proper descriptor data.
+ */
+#define GET_DESC_DATA(_ring, _idx) \
+ ((_ring)->rdata + \
+ ((_idx) & ((_ring)->rdesc_count - 1)))
+
+
+/* Default coalescing parameters */
+#define XGMAC_INIT_DMA_TX_USECS 100
+#define XGMAC_INIT_DMA_TX_FRAMES 16
+
+#define XGMAC_MAX_DMA_RIWT 0xff
+#define XGMAC_INIT_DMA_RX_USECS 100
+#define XGMAC_INIT_DMA_RX_FRAMES 16
+
+/* Flow control queue count */
+#define XGMAC_MAX_FLOW_CONTROL_QUEUES 8
+
+
+struct xgbe_prv_data;
+
+struct xgbe_packet_data {
+ unsigned int attributes;
+
+ unsigned int errors;
+
+ unsigned int rdesc_count;
+ unsigned int length;
+
+ unsigned int header_len;
+ unsigned int tcp_header_len;
+ unsigned int tcp_payload_len;
+ unsigned short mss;
+
+ unsigned short vlan_ctag;
+};
+
+/* Common Rx and Tx descriptor mapping */
+struct xgbe_ring_desc {
+ unsigned int desc0;
+ unsigned int desc1;
+ unsigned int desc2;
+ unsigned int desc3;
+};
+
+/* Structure used to hold information related to the descriptor
+ * and the packet associated with the descriptor (always use
+ * use the GET_DESC_DATA macro to access this data from the ring)
+ */
+struct xgbe_ring_data {
+ struct xgbe_ring_desc *rdesc; /* Virtual address of descriptor */
+ dma_addr_t rdesc_dma; /* DMA address of descriptor */
+
+ struct sk_buff *skb; /* Virtual address of SKB */
+ dma_addr_t skb_dma; /* DMA address of SKB data */
+ unsigned int skb_dma_len; /* Length of SKB DMA area */
+ unsigned int tso_header; /* TSO header indicator */
+
+ unsigned short len; /* Length of received Rx packet */
+
+ unsigned int interrupt; /* Interrupt indicator */
+
+ unsigned int mapped_as_page;
+};
+
+struct xgbe_ring {
+ /* Ring lock - used just for TX rings at the moment */
+ spinlock_t lock;
+
+ /* Per packet related information */
+ struct xgbe_packet_data packet_data;
+
+ /* Virtual/DMA addresses and count of allocated descriptor memory */
+ struct xgbe_ring_desc *rdesc;
+ dma_addr_t rdesc_dma;
+ unsigned int rdesc_count;
+
+ /* Array of descriptor data corresponding the descriptor memory
+ * (always use the GET_DESC_DATA macro to access this data)
+ */
+ struct xgbe_ring_data *rdata;
+
+ /* Ring index values
+ * cur - Tx: index of descriptor to be used for current transfer
+ * Rx: index of descriptor to check for packet availability
+ * dirty - Tx: index of descriptor to check for transfer complete
+ * Rx: count of descriptors in which a packet has been received
+ * (used with skb_realloc_index to refresh the ring)
+ */
+ unsigned int cur;
+ unsigned int dirty;
+
+ /* Coalesce frame count used for interrupt bit setting */
+ unsigned int coalesce_count;
+
+ union {
+ struct {
+ unsigned int queue_stopped;
+ unsigned short cur_mss;
+ unsigned short cur_vlan_ctag;
+ } tx;
+
+ struct {
+ unsigned int realloc_index;
+ unsigned int realloc_threshold;
+ } rx;
+ };
+} ____cacheline_aligned;
+
+/* Structure used to describe the descriptor rings associated with
+ * a DMA channel.
+ */
+struct xgbe_channel {
+ char name[16];
+
+ /* Address of private data area for device */
+ struct xgbe_prv_data *pdata;
+
+ /* Queue index and base address of queue's DMA registers */
+ unsigned int queue_index;
+ void __iomem *dma_regs;
+
+ unsigned int saved_ier;
+
+ unsigned int tx_timer_active;
+ struct hrtimer tx_timer;
+
+ struct xgbe_ring *tx_ring;
+ struct xgbe_ring *rx_ring;
+} ____cacheline_aligned;
+
+enum xgbe_int {
+ XGMAC_INT_DMA_ISR_DC0IS,
+ XGMAC_INT_DMA_CH_SR_TI,
+ XGMAC_INT_DMA_CH_SR_TPS,
+ XGMAC_INT_DMA_CH_SR_TBU,
+ XGMAC_INT_DMA_CH_SR_RI,
+ XGMAC_INT_DMA_CH_SR_RBU,
+ XGMAC_INT_DMA_CH_SR_RPS,
+ XGMAC_INT_DMA_CH_SR_FBE,
+ XGMAC_INT_DMA_ALL,
+};
+
+enum xgbe_int_state {
+ XGMAC_INT_STATE_SAVE,
+ XGMAC_INT_STATE_RESTORE,
+};
+
+enum xgbe_mtl_fifo_size {
+ XGMAC_MTL_FIFO_SIZE_256 = 0x00,
+ XGMAC_MTL_FIFO_SIZE_512 = 0x01,
+ XGMAC_MTL_FIFO_SIZE_1K = 0x03,
+ XGMAC_MTL_FIFO_SIZE_2K = 0x07,
+ XGMAC_MTL_FIFO_SIZE_4K = 0x0f,
+ XGMAC_MTL_FIFO_SIZE_8K = 0x1f,
+ XGMAC_MTL_FIFO_SIZE_16K = 0x3f,
+ XGMAC_MTL_FIFO_SIZE_32K = 0x7f,
+ XGMAC_MTL_FIFO_SIZE_64K = 0xff,
+ XGMAC_MTL_FIFO_SIZE_128K = 0x1ff,
+ XGMAC_MTL_FIFO_SIZE_256K = 0x3ff,
+};
+
+struct xgbe_mmc_stats {
+ /* Tx Stats */
+ u64 txoctetcount_gb;
+ u64 txframecount_gb;
+ u64 txbroadcastframes_g;
+ u64 txmulticastframes_g;
+ u64 tx64octets_gb;
+ u64 tx65to127octets_gb;
+ u64 tx128to255octets_gb;
+ u64 tx256to511octets_gb;
+ u64 tx512to1023octets_gb;
+ u64 tx1024tomaxoctets_gb;
+ u64 txunicastframes_gb;
+ u64 txmulticastframes_gb;
+ u64 txbroadcastframes_gb;
+ u64 txunderflowerror;
+ u64 txoctetcount_g;
+ u64 txframecount_g;
+ u64 txpauseframes;
+ u64 txvlanframes_g;
+
+ /* Rx Stats */
+ u64 rxframecount_gb;
+ u64 rxoctetcount_gb;
+ u64 rxoctetcount_g;
+ u64 rxbroadcastframes_g;
+ u64 rxmulticastframes_g;
+ u64 rxcrcerror;
+ u64 rxrunterror;
+ u64 rxjabbererror;
+ u64 rxundersize_g;
+ u64 rxoversize_g;
+ u64 rx64octets_gb;
+ u64 rx65to127octets_gb;
+ u64 rx128to255octets_gb;
+ u64 rx256to511octets_gb;
+ u64 rx512to1023octets_gb;
+ u64 rx1024tomaxoctets_gb;
+ u64 rxunicastframes_g;
+ u64 rxlengtherror;
+ u64 rxoutofrangetype;
+ u64 rxpauseframes;
+ u64 rxfifooverflow;
+ u64 rxvlanframes_gb;
+ u64 rxwatchdogerror;
+};
+
+struct xgbe_hw_if {
+ int (*tx_complete)(struct xgbe_ring_desc *);
+
+ int (*set_promiscuous_mode)(struct xgbe_prv_data *, unsigned int);
+ int (*set_all_multicast_mode)(struct xgbe_prv_data *, unsigned int);
+ int (*set_addn_mac_addrs)(struct xgbe_prv_data *, unsigned int);
+ int (*set_mac_address)(struct xgbe_prv_data *, u8 *addr);
+
+ int (*enable_rx_csum)(struct xgbe_prv_data *);
+ int (*disable_rx_csum)(struct xgbe_prv_data *);
+
+ int (*enable_rx_vlan_stripping)(struct xgbe_prv_data *);
+ int (*disable_rx_vlan_stripping)(struct xgbe_prv_data *);
+
+ int (*read_mmd_regs)(struct xgbe_prv_data *, int, int);
+ void (*write_mmd_regs)(struct xgbe_prv_data *, int, int, int);
+ int (*set_gmii_speed)(struct xgbe_prv_data *);
+ int (*set_gmii_2500_speed)(struct xgbe_prv_data *);
+ int (*set_xgmii_speed)(struct xgbe_prv_data *);
+
+ void (*enable_tx)(struct xgbe_prv_data *);
+ void (*disable_tx)(struct xgbe_prv_data *);
+ void (*enable_rx)(struct xgbe_prv_data *);
+ void (*disable_rx)(struct xgbe_prv_data *);
+
+ void (*powerup_tx)(struct xgbe_prv_data *);
+ void (*powerdown_tx)(struct xgbe_prv_data *);
+ void (*powerup_rx)(struct xgbe_prv_data *);
+ void (*powerdown_rx)(struct xgbe_prv_data *);
+
+ int (*init)(struct xgbe_prv_data *);
+ int (*exit)(struct xgbe_prv_data *);
+
+ int (*enable_int)(struct xgbe_channel *, enum xgbe_int);
+ int (*disable_int)(struct xgbe_channel *, enum xgbe_int);
+ void (*pre_xmit)(struct xgbe_channel *);
+ int (*dev_read)(struct xgbe_channel *);
+ void (*tx_desc_init)(struct xgbe_channel *);
+ void (*rx_desc_init)(struct xgbe_channel *);
+ void (*rx_desc_reset)(struct xgbe_ring_data *);
+ void (*tx_desc_reset)(struct xgbe_ring_data *);
+ int (*is_last_desc)(struct xgbe_ring_desc *);
+ int (*is_context_desc)(struct xgbe_ring_desc *);
+
+ /* For FLOW ctrl */
+ int (*config_tx_flow_control)(struct xgbe_prv_data *);
+ int (*config_rx_flow_control)(struct xgbe_prv_data *);
+
+ /* For RX coalescing */
+ int (*config_rx_coalesce)(struct xgbe_prv_data *);
+ int (*config_tx_coalesce)(struct xgbe_prv_data *);
+ unsigned int (*usec_to_riwt)(struct xgbe_prv_data *, unsigned int);
+ unsigned int (*riwt_to_usec)(struct xgbe_prv_data *, unsigned int);
+
+ /* For RX and TX threshold config */
+ int (*config_rx_threshold)(struct xgbe_prv_data *, unsigned int);
+ int (*config_tx_threshold)(struct xgbe_prv_data *, unsigned int);
+
+ /* For RX and TX Store and Forward Mode config */
+ int (*config_rsf_mode)(struct xgbe_prv_data *, unsigned int);
+ int (*config_tsf_mode)(struct xgbe_prv_data *, unsigned int);
+
+ /* For TX DMA Operate on Second Frame config */
+ int (*config_osp_mode)(struct xgbe_prv_data *);
+
+ /* For RX and TX PBL config */
+ int (*config_rx_pbl_val)(struct xgbe_prv_data *);
+ int (*get_rx_pbl_val)(struct xgbe_prv_data *);
+ int (*config_tx_pbl_val)(struct xgbe_prv_data *);
+ int (*get_tx_pbl_val)(struct xgbe_prv_data *);
+ int (*config_pblx8)(struct xgbe_prv_data *);
+
+ /* For MMC statistics */
+ void (*rx_mmc_int)(struct xgbe_prv_data *);
+ void (*tx_mmc_int)(struct xgbe_prv_data *);
+ void (*read_mmc_stats)(struct xgbe_prv_data *);
+};
+
+struct xgbe_desc_if {
+ int (*alloc_ring_resources)(struct xgbe_prv_data *);
+ void (*free_ring_resources)(struct xgbe_prv_data *);
+ int (*map_tx_skb)(struct xgbe_channel *, struct sk_buff *);
+ void (*realloc_skb)(struct xgbe_channel *);
+ void (*unmap_skb)(struct xgbe_prv_data *, struct xgbe_ring_data *);
+ void (*wrapper_tx_desc_init)(struct xgbe_prv_data *);
+ void (*wrapper_rx_desc_init)(struct xgbe_prv_data *);
+};
+
+/* This structure contains flags that indicate what hardware features
+ * or configurations are present in the device.
+ */
+struct xgbe_hw_features {
+ /* HW Feature Register0 */
+ unsigned int gmii; /* 1000 Mbps support */
+ unsigned int vlhash; /* VLAN Hash Filter */
+ unsigned int sma; /* SMA(MDIO) Interface */
+ unsigned int rwk; /* PMT remote wake-up packet */
+ unsigned int mgk; /* PMT magic packet */
+ unsigned int mmc; /* RMON module */
+ unsigned int aoe; /* ARP Offload */
+ unsigned int ts; /* IEEE 1588-2008 Adavanced Timestamp */
+ unsigned int eee; /* Energy Efficient Ethernet */
+ unsigned int tx_coe; /* Tx Checksum Offload */
+ unsigned int rx_coe; /* Rx Checksum Offload */
+ unsigned int addn_mac; /* Additional MAC Addresses */
+ unsigned int ts_src; /* Timestamp Source */
+ unsigned int sa_vlan_ins; /* Source Address or VLAN Insertion */
+
+ /* HW Feature Register1 */
+ unsigned int rx_fifo_size; /* MTL Receive FIFO Size */
+ unsigned int tx_fifo_size; /* MTL Transmit FIFO Size */
+ unsigned int adv_ts_hi; /* Advance Timestamping High Word */
+ unsigned int dcb; /* DCB Feature */
+ unsigned int sph; /* Split Header Feature */
+ unsigned int tso; /* TCP Segmentation Offload */
+ unsigned int dma_debug; /* DMA Debug Registers */
+ unsigned int rss; /* Receive Side Scaling */
+ unsigned int hash_table_size; /* Hash Table Size */
+ unsigned int l3l4_filter_num; /* Number of L3-L4 Filters */
+
+ /* HW Feature Register2 */
+ unsigned int rx_q_cnt; /* Number of MTL Receive Queues */
+ unsigned int tx_q_cnt; /* Number of MTL Transmit Queues */
+ unsigned int rx_ch_cnt; /* Number of DMA Receive Channels */
+ unsigned int tx_ch_cnt; /* Number of DMA Transmit Channels */
+ unsigned int pps_out_num; /* Number of PPS outputs */
+ unsigned int aux_snap_num; /* Number of Aux snapshot inputs */
+};
+
+struct xgbe_prv_data {
+ struct net_device *netdev;
+ struct platform_device *pdev;
+ struct device *dev;
+
+ /* XGMAC/XPCS related mmio registers */
+ void __iomem *xgmac_regs; /* XGMAC CSRs */
+ void __iomem *xpcs_regs; /* XPCS MMD registers */
+
+ /* Overall device lock */
+ spinlock_t lock;
+
+ /* XPCS indirect addressing mutex */
+ struct mutex xpcs_mutex;
+
+ int irq_number;
+
+ struct xgbe_hw_if hw_if;
+ struct xgbe_desc_if desc_if;
+
+ /* Rings for Tx/Rx on a DMA channel */
+ struct xgbe_channel *channel;
+ unsigned int channel_count;
+ unsigned int tx_ring_count;
+ unsigned int tx_desc_count;
+ unsigned int rx_ring_count;
+ unsigned int rx_desc_count;
+
+ /* Tx/Rx common settings */
+ unsigned int pblx8;
+
+ /* Tx settings */
+ unsigned int tx_sf_mode;
+ unsigned int tx_threshold;
+ unsigned int tx_pbl;
+ unsigned int tx_osp_mode;
+
+ /* Rx settings */
+ unsigned int rx_sf_mode;
+ unsigned int rx_threshold;
+ unsigned int rx_pbl;
+
+ /* Tx coalescing settings */
+ unsigned int tx_usecs;
+ unsigned int tx_frames;
+
+ /* Rx coalescing settings */
+ unsigned int rx_riwt;
+ unsigned int rx_frames;
+
+ /* Current MTU */
+ unsigned int rx_buf_size;
+
+ /* Flow control settings */
+ unsigned int pause_autoneg;
+ unsigned int tx_pause;
+ unsigned int rx_pause;
+
+ /* MDIO settings */
+ struct module *phy_module;
+ char *mii_bus_id;
+ struct mii_bus *mii;
+ int mdio_mmd;
+ struct phy_device *phydev;
+ int default_autoneg;
+ int default_speed;
+
+ /* Current PHY settings */
+ phy_interface_t phy_mode;
+ int phy_link;
+ int phy_speed;
+ unsigned int phy_tx_pause;
+ unsigned int phy_rx_pause;
+
+ /* Netdev related settings */
+ netdev_features_t netdev_features;
+ struct napi_struct napi;
+ struct xgbe_mmc_stats mmc_stats;
+
+ /* System clock value used for Rx watchdog */
+ struct clk *sysclock;
+
+ /* Hardware features of the device */
+ struct xgbe_hw_features hw_feat;
+
+ /* Device restart work structure */
+ struct work_struct restart_work;
+
+ /* Keeps track of power mode */
+ unsigned int power_down;
+
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *xgbe_debugfs;
+
+ unsigned int debugfs_xgmac_reg;
+
+ unsigned int debugfs_xpcs_mmd;
+ unsigned int debugfs_xpcs_reg;
+#endif
+};
+
+/* Function prototypes*/
+
+void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *);
+void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *);
+struct net_device_ops *xgbe_get_netdev_ops(void);
+struct ethtool_ops *xgbe_get_ethtool_ops(void);
+
+int xgbe_mdio_register(struct xgbe_prv_data *);
+void xgbe_mdio_unregister(struct xgbe_prv_data *);
+void xgbe_dump_phy_registers(struct xgbe_prv_data *);
+void xgbe_dump_tx_desc(struct xgbe_ring *, unsigned int, unsigned int,
+ unsigned int);
+void xgbe_dump_rx_desc(struct xgbe_ring *, struct xgbe_ring_desc *,
+ unsigned int);
+void xgbe_print_pkt(struct net_device *, struct sk_buff *, bool);
+void xgbe_get_all_hw_features(struct xgbe_prv_data *);
+int xgbe_powerup(struct net_device *, unsigned int);
+int xgbe_powerdown(struct net_device *, unsigned int);
+void xgbe_init_rx_coalesce(struct xgbe_prv_data *);
+void xgbe_init_tx_coalesce(struct xgbe_prv_data *);
+
+#ifdef CONFIG_DEBUG_FS
+void xgbe_debugfs_init(struct xgbe_prv_data *);
+void xgbe_debugfs_exit(struct xgbe_prv_data *);
+#else
+static inline void xgbe_debugfs_init(struct xgbe_prv_data *pdata) {}
+static inline void xgbe_debugfs_exit(struct xgbe_prv_data *pdata) {}
+#endif /* CONFIG_DEBUG_FS */
+
+/* NOTE: Uncomment for TX and RX DESCRIPTOR DUMP in KERNEL LOG */
+#if 0
+#define XGMAC_ENABLE_TX_DESC_DUMP
+#define XGMAC_ENABLE_RX_DESC_DUMP
+#endif
+
+/* NOTE: Uncomment for TX and RX PACKET DUMP in KERNEL LOG */
+#if 0
+#define XGMAC_ENABLE_TX_PKT_DUMP
+#define XGMAC_ENABLE_RX_PKT_DUMP
+#endif
+
+/* NOTE: Uncomment for function trace log messages in KERNEL LOG */
+#if 0
+#define YDEBUG
+#define YDEBUG_MDIO
+#endif
+
+/* For debug prints */
+#ifdef YDEBUG
+#define DBGPR(x...) pr_alert(x)
+#define DBGPHY_REGS(x...) xgbe_dump_phy_registers(x)
+#else
+#define DBGPR(x...) do { } while (0)
+#define DBGPHY_REGS(x...) do { } while (0)
+#endif
+
+#ifdef YDEBUG_MDIO
+#define DBGPR_MDIO(x...) pr_alert(x)
+#else
+#define DBGPR_MDIO(x...) do { } while (0)
+#endif
+
+#endif
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index d647a7d..18e2fac 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -13,6 +13,7 @@
* Vineet Gupta
*/
+#include <linux/crc32.h>
#include <linux/etherdevice.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -362,6 +363,15 @@ static irqreturn_t arc_emac_intr(int irq, void *dev_instance)
return IRQ_HANDLED;
}
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void arc_emac_poll_controller(struct net_device *dev)
+{
+ disable_irq(dev->irq);
+ arc_emac_intr(dev->irq, dev);
+ enable_irq(dev->irq);
+}
+#endif
+
/**
* arc_emac_open - Open the network device.
* @ndev: Pointer to the network device.
@@ -451,6 +461,41 @@ static int arc_emac_open(struct net_device *ndev)
}
/**
+ * arc_emac_set_rx_mode - Change the receive filtering mode.
+ * @ndev: Pointer to the network device.
+ *
+ * This function enables/disables promiscuous or all-multicast mode
+ * and updates the multicast filtering list of the network device.
+ */
+static void arc_emac_set_rx_mode(struct net_device *ndev)
+{
+ struct arc_emac_priv *priv = netdev_priv(ndev);
+
+ if (ndev->flags & IFF_PROMISC) {
+ arc_reg_or(priv, R_CTRL, PROM_MASK);
+ } else {
+ arc_reg_clr(priv, R_CTRL, PROM_MASK);
+
+ if (ndev->flags & IFF_ALLMULTI) {
+ arc_reg_set(priv, R_LAFL, ~0);
+ arc_reg_set(priv, R_LAFH, ~0);
+ } else {
+ struct netdev_hw_addr *ha;
+ unsigned int filter[2] = { 0, 0 };
+ int bit;
+
+ netdev_for_each_mc_addr(ha, ndev) {
+ bit = ether_crc_le(ETH_ALEN, ha->addr) >> 26;
+ filter[bit >> 5] |= 1 << (bit & 31);
+ }
+
+ arc_reg_set(priv, R_LAFL, filter[0]);
+ arc_reg_set(priv, R_LAFH, filter[1]);
+ }
+ }
+}
+
+/**
* arc_emac_stop - Close the network device.
* @ndev: Pointer to the network device.
*
@@ -620,6 +665,10 @@ static const struct net_device_ops arc_emac_netdev_ops = {
.ndo_start_xmit = arc_emac_tx,
.ndo_set_mac_address = arc_emac_set_address,
.ndo_get_stats = arc_emac_stats,
+ .ndo_set_rx_mode = arc_emac_set_rx_mode,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+ .ndo_poll_controller = arc_emac_poll_controller,
+#endif
};
static int arc_emac_probe(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 17bb9ce..49faa97 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1302,7 +1302,7 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
netdev->netdev_ops = &alx_netdev_ops;
- SET_ETHTOOL_OPS(netdev, &alx_ethtool_ops);
+ netdev->ethtool_ops = &alx_ethtool_ops;
netdev->irq = pdev->irq;
netdev->watchdog_timeo = ALX_WATCHDOG_TIME;
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
index 859ea84..48694c2 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_ethtool.c
@@ -56,8 +56,8 @@ static int atl1c_get_settings(struct net_device *netdev,
else
ecmd->duplex = DUPLEX_HALF;
} else {
- ethtool_cmd_speed_set(ecmd, -1);
- ecmd->duplex = -1;
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ ecmd->duplex = DUPLEX_UNKNOWN;
}
ecmd->autoneg = AUTONEG_ENABLE;
@@ -305,5 +305,5 @@ static const struct ethtool_ops atl1c_ethtool_ops = {
void atl1c_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &atl1c_ethtool_ops);
+ netdev->ethtool_ops = &atl1c_ethtool_ops;
}
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
index 82b2386..1be072f 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_ethtool.c
@@ -57,8 +57,8 @@ static int atl1e_get_settings(struct net_device *netdev,
else
ecmd->duplex = DUPLEX_HALF;
} else {
- ethtool_cmd_speed_set(ecmd, -1);
- ecmd->duplex = -1;
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ ecmd->duplex = DUPLEX_UNKNOWN;
}
ecmd->autoneg = AUTONEG_ENABLE;
@@ -388,5 +388,5 @@ static const struct ethtool_ops atl1e_ethtool_ops = {
void atl1e_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &atl1e_ethtool_ops);
+ netdev->ethtool_ops = &atl1e_ethtool_ops;
}
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index dfd0e91..b460db7 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -3258,8 +3258,8 @@ static int atl1_get_settings(struct net_device *netdev,
else
ecmd->duplex = DUPLEX_HALF;
} else {
- ethtool_cmd_speed_set(ecmd, -1);
- ecmd->duplex = -1;
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ ecmd->duplex = DUPLEX_UNKNOWN;
}
if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR ||
hw->media_type == MEDIA_TYPE_1000M_FULL)
diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c
index 78befb5..6746bd7 100644
--- a/drivers/net/ethernet/atheros/atlx/atl2.c
+++ b/drivers/net/ethernet/atheros/atlx/atl2.c
@@ -1396,7 +1396,7 @@ static int atl2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
atl2_setup_pcicmd(pdev);
netdev->netdev_ops = &atl2_netdev_ops;
- SET_ETHTOOL_OPS(netdev, &atl2_ethtool_ops);
+ netdev->ethtool_ops = &atl2_ethtool_ops;
netdev->watchdog_timeo = 5 * HZ;
strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
@@ -1769,8 +1769,8 @@ static int atl2_get_settings(struct net_device *netdev,
else
ecmd->duplex = DUPLEX_HALF;
} else {
- ethtool_cmd_speed_set(ecmd, -1);
- ecmd->duplex = -1;
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ ecmd->duplex = DUPLEX_UNKNOWN;
}
ecmd->autoneg = AUTONEG_ENABLE;
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 85dbddd..3e48809 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -150,4 +150,15 @@ config BGMAC
In case of using this driver on BCM4706 it's also requires to enable
BCMA_DRIVER_GMAC_CMN to make it work.
+config SYSTEMPORT
+ tristate "Broadcom SYSTEMPORT internal MAC support"
+ depends on OF
+ select MII
+ select PHYLIB
+ select FIXED_PHY if SYSTEMPORT=y
+ help
+ This driver supports the built-in Ethernet MACs found in the
+ Broadcom BCM7xxx Set Top Box family chipset using an internal
+ Ethernet switch.
+
endif # NET_VENDOR_BROADCOM
diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile
index fd639a0..e2a958a 100644
--- a/drivers/net/ethernet/broadcom/Makefile
+++ b/drivers/net/ethernet/broadcom/Makefile
@@ -11,3 +11,4 @@ obj-$(CONFIG_BNX2X) += bnx2x/
obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o
obj-$(CONFIG_TIGON3) += tg3.o
obj-$(CONFIG_BGMAC) += bgmac.o
+obj-$(CONFIG_SYSTEMPORT) += bcmsysport.o
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 05ba625..ca5a20a 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -2380,7 +2380,7 @@ static int b44_init_one(struct ssb_device *sdev,
netif_napi_add(dev, &bp->napi, b44_poll, 64);
dev->watchdog_timeo = B44_TX_TIMEOUT;
dev->irq = sdev->irq;
- SET_ETHTOOL_OPS(dev, &b44_ethtool_ops);
+ dev->ethtool_ops = &b44_ethtool_ops;
err = ssb_bus_powerup(sdev->bus, 0);
if (err) {
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index a7d11f5..3e8d1a8 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -1315,8 +1315,7 @@ static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = {
};
-#define BCM_ENET_STATS_LEN \
- (sizeof(bcm_enet_gstrings_stats) / sizeof(struct bcm_enet_stats))
+#define BCM_ENET_STATS_LEN ARRAY_SIZE(bcm_enet_gstrings_stats)
static const u32 unused_mib_regs[] = {
ETH_MIB_TX_ALL_OCTETS,
@@ -1898,7 +1897,7 @@ static int bcm_enet_probe(struct platform_device *pdev)
dev->netdev_ops = &bcm_enet_ops;
netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
- SET_ETHTOOL_OPS(dev, &bcm_enet_ethtool_ops);
+ dev->ethtool_ops = &bcm_enet_ethtool_ops;
SET_NETDEV_DEV(dev, &pdev->dev);
ret = register_netdev(dev);
@@ -2784,7 +2783,7 @@ static int bcm_enetsw_probe(struct platform_device *pdev)
/* register netdevice */
dev->netdev_ops = &bcm_enetsw_ops;
netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);
- SET_ETHTOOL_OPS(dev, &bcm_enetsw_ethtool_ops);
+ dev->ethtool_ops = &bcm_enetsw_ethtool_ops;
SET_NETDEV_DEV(dev, &pdev->dev);
spin_lock_init(&priv->enetsw_mdio_lock);
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
new file mode 100644
index 0000000..141160e
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -0,0 +1,1654 @@
+/*
+ * Broadcom BCM7xxx System Port Ethernet MAC driver
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/phy.h>
+#include <linux/phy_fixed.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+
+#include "bcmsysport.h"
+
+/* I/O accessors register helpers */
+#define BCM_SYSPORT_IO_MACRO(name, offset) \
+static inline u32 name##_readl(struct bcm_sysport_priv *priv, u32 off) \
+{ \
+ u32 reg = __raw_readl(priv->base + offset + off); \
+ return reg; \
+} \
+static inline void name##_writel(struct bcm_sysport_priv *priv, \
+ u32 val, u32 off) \
+{ \
+ __raw_writel(val, priv->base + offset + off); \
+} \
+
+BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET);
+BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET);
+BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET);
+BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET);
+BCM_SYSPORT_IO_MACRO(rdma, SYS_PORT_RDMA_OFFSET);
+BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET);
+BCM_SYSPORT_IO_MACRO(txchk, SYS_PORT_TXCHK_OFFSET);
+BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET);
+BCM_SYSPORT_IO_MACRO(tbuf, SYS_PORT_TBUF_OFFSET);
+BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET);
+
+/* L2-interrupt masking/unmasking helpers, does automatic saving of the applied
+ * mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths.
+ */
+#define BCM_SYSPORT_INTR_L2(which) \
+static inline void intrl2_##which##_mask_clear(struct bcm_sysport_priv *priv, \
+ u32 mask) \
+{ \
+ intrl2_##which##_writel(priv, mask, INTRL2_CPU_MASK_CLEAR); \
+ priv->irq##which##_mask &= ~(mask); \
+} \
+static inline void intrl2_##which##_mask_set(struct bcm_sysport_priv *priv, \
+ u32 mask) \
+{ \
+ intrl2_## which##_writel(priv, mask, INTRL2_CPU_MASK_SET); \
+ priv->irq##which##_mask |= (mask); \
+} \
+
+BCM_SYSPORT_INTR_L2(0)
+BCM_SYSPORT_INTR_L2(1)
+
+/* Register accesses to GISB/RBUS registers are expensive (few hundred
+ * nanoseconds), so keep the check for 64-bits explicit here to save
+ * one register write per-packet on 32-bits platforms.
+ */
+static inline void dma_desc_set_addr(struct bcm_sysport_priv *priv,
+ void __iomem *d,
+ dma_addr_t addr)
+{
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+ __raw_writel(upper_32_bits(addr) & DESC_ADDR_HI_MASK,
+ d + DESC_ADDR_HI_STATUS_LEN);
+#endif
+ __raw_writel(lower_32_bits(addr), d + DESC_ADDR_LO);
+}
+
+static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv,
+ struct dma_desc *desc,
+ unsigned int port)
+{
+ /* Ports are latched, so write upper address first */
+ tdma_writel(priv, desc->addr_status_len, TDMA_WRITE_PORT_HI(port));
+ tdma_writel(priv, desc->addr_lo, TDMA_WRITE_PORT_LO(port));
+}
+
+/* Ethtool operations */
+static int bcm_sysport_set_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ return phy_ethtool_sset(priv->phydev, cmd);
+}
+
+static int bcm_sysport_get_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ return phy_ethtool_gset(priv->phydev, cmd);
+}
+
+static int bcm_sysport_set_rx_csum(struct net_device *dev,
+ netdev_features_t wanted)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ u32 reg;
+
+ priv->rx_csum_en = !!(wanted & NETIF_F_RXCSUM);
+ reg = rxchk_readl(priv, RXCHK_CONTROL);
+ if (priv->rx_csum_en)
+ reg |= RXCHK_EN;
+ else
+ reg &= ~RXCHK_EN;
+
+ /* If UniMAC forwards CRC, we need to skip over it to get
+ * a valid CHK bit to be set in the per-packet status word
+ */
+ if (priv->rx_csum_en && priv->crc_fwd)
+ reg |= RXCHK_SKIP_FCS;
+ else
+ reg &= ~RXCHK_SKIP_FCS;
+
+ rxchk_writel(priv, reg, RXCHK_CONTROL);
+
+ return 0;
+}
+
+static int bcm_sysport_set_tx_csum(struct net_device *dev,
+ netdev_features_t wanted)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ u32 reg;
+
+ /* Hardware transmit checksum requires us to enable the Transmit status
+ * block prepended to the packet contents
+ */
+ priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
+ reg = tdma_readl(priv, TDMA_CONTROL);
+ if (priv->tsb_en)
+ reg |= TSB_EN;
+ else
+ reg &= ~TSB_EN;
+ tdma_writel(priv, reg, TDMA_CONTROL);
+
+ return 0;
+}
+
+static int bcm_sysport_set_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ netdev_features_t changed = features ^ dev->features;
+ netdev_features_t wanted = dev->wanted_features;
+ int ret = 0;
+
+ if (changed & NETIF_F_RXCSUM)
+ ret = bcm_sysport_set_rx_csum(dev, wanted);
+ if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
+ ret = bcm_sysport_set_tx_csum(dev, wanted);
+
+ return ret;
+}
+
+/* Hardware counters must be kept in sync because the order/offset
+ * is important here (order in structure declaration = order in hardware)
+ */
+static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
+ /* general stats */
+ STAT_NETDEV(rx_packets),
+ STAT_NETDEV(tx_packets),
+ STAT_NETDEV(rx_bytes),
+ STAT_NETDEV(tx_bytes),
+ STAT_NETDEV(rx_errors),
+ STAT_NETDEV(tx_errors),
+ STAT_NETDEV(rx_dropped),
+ STAT_NETDEV(tx_dropped),
+ STAT_NETDEV(multicast),
+ /* UniMAC RSV counters */
+ STAT_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
+ STAT_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
+ STAT_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
+ STAT_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
+ STAT_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
+ STAT_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
+ STAT_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
+ STAT_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
+ STAT_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
+ STAT_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
+ STAT_MIB_RX("rx_pkts", mib.rx.pkt),
+ STAT_MIB_RX("rx_bytes", mib.rx.bytes),
+ STAT_MIB_RX("rx_multicast", mib.rx.mca),
+ STAT_MIB_RX("rx_broadcast", mib.rx.bca),
+ STAT_MIB_RX("rx_fcs", mib.rx.fcs),
+ STAT_MIB_RX("rx_control", mib.rx.cf),
+ STAT_MIB_RX("rx_pause", mib.rx.pf),
+ STAT_MIB_RX("rx_unknown", mib.rx.uo),
+ STAT_MIB_RX("rx_align", mib.rx.aln),
+ STAT_MIB_RX("rx_outrange", mib.rx.flr),
+ STAT_MIB_RX("rx_code", mib.rx.cde),
+ STAT_MIB_RX("rx_carrier", mib.rx.fcr),
+ STAT_MIB_RX("rx_oversize", mib.rx.ovr),
+ STAT_MIB_RX("rx_jabber", mib.rx.jbr),
+ STAT_MIB_RX("rx_mtu_err", mib.rx.mtue),
+ STAT_MIB_RX("rx_good_pkts", mib.rx.pok),
+ STAT_MIB_RX("rx_unicast", mib.rx.uc),
+ STAT_MIB_RX("rx_ppp", mib.rx.ppp),
+ STAT_MIB_RX("rx_crc", mib.rx.rcrc),
+ /* UniMAC TSV counters */
+ STAT_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
+ STAT_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
+ STAT_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
+ STAT_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
+ STAT_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
+ STAT_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
+ STAT_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
+ STAT_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
+ STAT_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
+ STAT_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
+ STAT_MIB_TX("tx_pkts", mib.tx.pkts),
+ STAT_MIB_TX("tx_multicast", mib.tx.mca),
+ STAT_MIB_TX("tx_broadcast", mib.tx.bca),
+ STAT_MIB_TX("tx_pause", mib.tx.pf),
+ STAT_MIB_TX("tx_control", mib.tx.cf),
+ STAT_MIB_TX("tx_fcs_err", mib.tx.fcs),
+ STAT_MIB_TX("tx_oversize", mib.tx.ovr),
+ STAT_MIB_TX("tx_defer", mib.tx.drf),
+ STAT_MIB_TX("tx_excess_defer", mib.tx.edf),
+ STAT_MIB_TX("tx_single_col", mib.tx.scl),
+ STAT_MIB_TX("tx_multi_col", mib.tx.mcl),
+ STAT_MIB_TX("tx_late_col", mib.tx.lcl),
+ STAT_MIB_TX("tx_excess_col", mib.tx.ecl),
+ STAT_MIB_TX("tx_frags", mib.tx.frg),
+ STAT_MIB_TX("tx_total_col", mib.tx.ncl),
+ STAT_MIB_TX("tx_jabber", mib.tx.jbr),
+ STAT_MIB_TX("tx_bytes", mib.tx.bytes),
+ STAT_MIB_TX("tx_good_pkts", mib.tx.pok),
+ STAT_MIB_TX("tx_unicast", mib.tx.uc),
+ /* UniMAC RUNT counters */
+ STAT_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
+ STAT_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
+ STAT_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
+ STAT_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
+ /* RXCHK misc statistics */
+ STAT_RXCHK("rxchk_bad_csum", mib.rxchk_bad_csum, RXCHK_BAD_CSUM_CNTR),
+ STAT_RXCHK("rxchk_other_pkt_disc", mib.rxchk_other_pkt_disc,
+ RXCHK_OTHER_DISC_CNTR),
+ /* RBUF misc statistics */
+ STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR),
+ STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR),
+};
+
+#define BCM_SYSPORT_STATS_LEN ARRAY_SIZE(bcm_sysport_gstrings_stats)
+
+static void bcm_sysport_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strlcpy(info->version, "0.1", sizeof(info->version));
+ strlcpy(info->bus_info, "platform", sizeof(info->bus_info));
+ info->n_stats = BCM_SYSPORT_STATS_LEN;
+}
+
+static u32 bcm_sysport_get_msglvl(struct net_device *dev)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+
+ return priv->msg_enable;
+}
+
+static void bcm_sysport_set_msglvl(struct net_device *dev, u32 enable)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+
+ priv->msg_enable = enable;
+}
+
+static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set)
+{
+ switch (string_set) {
+ case ETH_SS_STATS:
+ return BCM_SYSPORT_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void bcm_sysport_get_strings(struct net_device *dev,
+ u32 stringset, u8 *data)
+{
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
+ memcpy(data + i * ETH_GSTRING_LEN,
+ bcm_sysport_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
+{
+ int i, j = 0;
+
+ for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
+ const struct bcm_sysport_stats *s;
+ u8 offset = 0;
+ u32 val = 0;
+ char *p;
+
+ s = &bcm_sysport_gstrings_stats[i];
+ switch (s->type) {
+ case BCM_SYSPORT_STAT_NETDEV:
+ continue;
+ case BCM_SYSPORT_STAT_MIB_RX:
+ case BCM_SYSPORT_STAT_MIB_TX:
+ case BCM_SYSPORT_STAT_RUNT:
+ if (s->type != BCM_SYSPORT_STAT_MIB_RX)
+ offset = UMAC_MIB_STAT_OFFSET;
+ val = umac_readl(priv, UMAC_MIB_START + j + offset);
+ break;
+ case BCM_SYSPORT_STAT_RXCHK:
+ val = rxchk_readl(priv, s->reg_offset);
+ if (val == ~0)
+ rxchk_writel(priv, 0, s->reg_offset);
+ break;
+ case BCM_SYSPORT_STAT_RBUF:
+ val = rbuf_readl(priv, s->reg_offset);
+ if (val == ~0)
+ rbuf_writel(priv, 0, s->reg_offset);
+ break;
+ }
+
+ j += s->stat_sizeof;
+ p = (char *)priv + s->stat_offset;
+ *(u32 *)p = val;
+ }
+
+ netif_dbg(priv, hw, priv->netdev, "updated MIB counters\n");
+}
+
+static void bcm_sysport_get_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ int i;
+
+ if (netif_running(dev))
+ bcm_sysport_update_mib_counters(priv);
+
+ for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
+ const struct bcm_sysport_stats *s;
+ char *p;
+
+ s = &bcm_sysport_gstrings_stats[i];
+ if (s->type == BCM_SYSPORT_STAT_NETDEV)
+ p = (char *)&dev->stats;
+ else
+ p = (char *)priv;
+ p += s->stat_offset;
+ data[i] = *(u32 *)p;
+ }
+}
+
+static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
+{
+ dev_kfree_skb_any(cb->skb);
+ cb->skb = NULL;
+ dma_unmap_addr_set(cb, dma_addr, 0);
+}
+
+static int bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
+ struct bcm_sysport_cb *cb)
+{
+ struct device *kdev = &priv->pdev->dev;
+ struct net_device *ndev = priv->netdev;
+ dma_addr_t mapping;
+ int ret;
+
+ cb->skb = netdev_alloc_skb(priv->netdev, RX_BUF_LENGTH);
+ if (!cb->skb) {
+ netif_err(priv, rx_err, ndev, "SKB alloc failed\n");
+ return -ENOMEM;
+ }
+
+ mapping = dma_map_single(kdev, cb->skb->data,
+ RX_BUF_LENGTH, DMA_FROM_DEVICE);
+ ret = dma_mapping_error(kdev, mapping);
+ if (ret) {
+ bcm_sysport_free_cb(cb);
+ netif_err(priv, rx_err, ndev, "DMA mapping failure\n");
+ return ret;
+ }
+
+ dma_unmap_addr_set(cb, dma_addr, mapping);
+ dma_desc_set_addr(priv, priv->rx_bd_assign_ptr, mapping);
+
+ priv->rx_bd_assign_index++;
+ priv->rx_bd_assign_index &= (priv->num_rx_bds - 1);
+ priv->rx_bd_assign_ptr = priv->rx_bds +
+ (priv->rx_bd_assign_index * DESC_SIZE);
+
+ netif_dbg(priv, rx_status, ndev, "RX refill\n");
+
+ return 0;
+}
+
+static int bcm_sysport_alloc_rx_bufs(struct bcm_sysport_priv *priv)
+{
+ struct bcm_sysport_cb *cb;
+ int ret = 0;
+ unsigned int i;
+
+ for (i = 0; i < priv->num_rx_bds; i++) {
+ cb = &priv->rx_cbs[priv->rx_bd_assign_index];
+ if (cb->skb)
+ continue;
+
+ ret = bcm_sysport_rx_refill(priv, cb);
+ if (ret)
+ break;
+ }
+
+ return ret;
+}
+
+/* Poll the hardware for up to budget packets to process */
+static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
+ unsigned int budget)
+{
+ struct device *kdev = &priv->pdev->dev;
+ struct net_device *ndev = priv->netdev;
+ unsigned int processed = 0, to_process;
+ struct bcm_sysport_cb *cb;
+ struct sk_buff *skb;
+ unsigned int p_index;
+ u16 len, status;
+ struct bcm_rsb *rsb;
+
+ /* Determine how much we should process since last call */
+ p_index = rdma_readl(priv, RDMA_PROD_INDEX);
+ p_index &= RDMA_PROD_INDEX_MASK;
+
+ if (p_index < priv->rx_c_index)
+ to_process = (RDMA_CONS_INDEX_MASK + 1) -
+ priv->rx_c_index + p_index;
+ else
+ to_process = p_index - priv->rx_c_index;
+
+ netif_dbg(priv, rx_status, ndev,
+ "p_index=%d rx_c_index=%d to_process=%d\n",
+ p_index, priv->rx_c_index, to_process);
+
+ while ((processed < to_process) &&
+ (processed < budget)) {
+
+ cb = &priv->rx_cbs[priv->rx_read_ptr];
+ skb = cb->skb;
+ dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
+ RX_BUF_LENGTH, DMA_FROM_DEVICE);
+
+ /* Extract the Receive Status Block prepended */
+ rsb = (struct bcm_rsb *)skb->data;
+ len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK;
+ status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) &
+ DESC_STATUS_MASK;
+
+ processed++;
+ priv->rx_read_ptr++;
+ if (priv->rx_read_ptr == priv->num_rx_bds)
+ priv->rx_read_ptr = 0;
+
+ netif_dbg(priv, rx_status, ndev,
+ "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n",
+ p_index, priv->rx_c_index, priv->rx_read_ptr,
+ len, status);
+
+ if (unlikely(!skb)) {
+ netif_err(priv, rx_err, ndev, "out of memory!\n");
+ ndev->stats.rx_dropped++;
+ ndev->stats.rx_errors++;
+ goto refill;
+ }
+
+ if (unlikely(!(status & DESC_EOP) || !(status & DESC_SOP))) {
+ netif_err(priv, rx_status, ndev, "fragmented packet!\n");
+ ndev->stats.rx_dropped++;
+ ndev->stats.rx_errors++;
+ bcm_sysport_free_cb(cb);
+ goto refill;
+ }
+
+ if (unlikely(status & (RX_STATUS_ERR | RX_STATUS_OVFLOW))) {
+ netif_err(priv, rx_err, ndev, "error packet\n");
+ if (status & RX_STATUS_OVFLOW)
+ ndev->stats.rx_over_errors++;
+ ndev->stats.rx_dropped++;
+ ndev->stats.rx_errors++;
+ bcm_sysport_free_cb(cb);
+ goto refill;
+ }
+
+ skb_put(skb, len);
+
+ /* Hardware validated our checksum */
+ if (likely(status & DESC_L4_CSUM))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ /* Hardware pre-pends packets with 2bytes before Ethernet
+ * header plus we have the Receive Status Block, strip off all
+ * of this from the SKB.
+ */
+ skb_pull(skb, sizeof(*rsb) + 2);
+ len -= (sizeof(*rsb) + 2);
+
+ /* UniMAC may forward CRC */
+ if (priv->crc_fwd) {
+ skb_trim(skb, len - ETH_FCS_LEN);
+ len -= ETH_FCS_LEN;
+ }
+
+ skb->protocol = eth_type_trans(skb, ndev);
+ ndev->stats.rx_packets++;
+ ndev->stats.rx_bytes += len;
+
+ napi_gro_receive(&priv->napi, skb);
+refill:
+ bcm_sysport_rx_refill(priv, cb);
+ }
+
+ return processed;
+}
+
+static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_priv *priv,
+ struct bcm_sysport_cb *cb,
+ unsigned int *bytes_compl,
+ unsigned int *pkts_compl)
+{
+ struct device *kdev = &priv->pdev->dev;
+ struct net_device *ndev = priv->netdev;
+
+ if (cb->skb) {
+ ndev->stats.tx_bytes += cb->skb->len;
+ *bytes_compl += cb->skb->len;
+ dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
+ dma_unmap_len(cb, dma_len),
+ DMA_TO_DEVICE);
+ ndev->stats.tx_packets++;
+ (*pkts_compl)++;
+ bcm_sysport_free_cb(cb);
+ /* SKB fragment */
+ } else if (dma_unmap_addr(cb, dma_addr)) {
+ ndev->stats.tx_bytes += dma_unmap_len(cb, dma_len);
+ dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr),
+ dma_unmap_len(cb, dma_len), DMA_TO_DEVICE);
+ dma_unmap_addr_set(cb, dma_addr, 0);
+ }
+}
+
+/* Reclaim queued SKBs for transmission completion, lockless version */
+static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
+ struct bcm_sysport_tx_ring *ring)
+{
+ struct net_device *ndev = priv->netdev;
+ unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs;
+ unsigned int pkts_compl = 0, bytes_compl = 0;
+ struct bcm_sysport_cb *cb;
+ struct netdev_queue *txq;
+ u32 hw_ind;
+
+ txq = netdev_get_tx_queue(ndev, ring->index);
+
+ /* Compute how many descriptors have been processed since last call */
+ hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
+ c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
+ ring->p_index = (hw_ind & RING_PROD_INDEX_MASK);
+
+ last_c_index = ring->c_index;
+ num_tx_cbs = ring->size;
+
+ c_index &= (num_tx_cbs - 1);
+
+ if (c_index >= last_c_index)
+ last_tx_cn = c_index - last_c_index;
+ else
+ last_tx_cn = num_tx_cbs - last_c_index + c_index;
+
+ netif_dbg(priv, tx_done, ndev,
+ "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n",
+ ring->index, c_index, last_tx_cn, last_c_index);
+
+ while (last_tx_cn-- > 0) {
+ cb = ring->cbs + last_c_index;
+ bcm_sysport_tx_reclaim_one(priv, cb, &bytes_compl, &pkts_compl);
+
+ ring->desc_count++;
+ last_c_index++;
+ last_c_index &= (num_tx_cbs - 1);
+ }
+
+ ring->c_index = c_index;
+
+ if (netif_tx_queue_stopped(txq) && pkts_compl)
+ netif_tx_wake_queue(txq);
+
+ netif_dbg(priv, tx_done, ndev,
+ "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n",
+ ring->index, ring->c_index, pkts_compl, bytes_compl);
+
+ return pkts_compl;
+}
+
+/* Locked version of the per-ring TX reclaim routine */
+static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
+ struct bcm_sysport_tx_ring *ring)
+{
+ unsigned int released;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ring->lock, flags);
+ released = __bcm_sysport_tx_reclaim(priv, ring);
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ return released;
+}
+
+static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
+{
+ struct bcm_sysport_tx_ring *ring =
+ container_of(napi, struct bcm_sysport_tx_ring, napi);
+ unsigned int work_done = 0;
+
+ work_done = bcm_sysport_tx_reclaim(ring->priv, ring);
+
+ if (work_done < budget) {
+ napi_complete(napi);
+ /* re-enable TX interrupt */
+ intrl2_1_mask_clear(ring->priv, BIT(ring->index));
+ }
+
+ return work_done;
+}
+
+static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv)
+{
+ unsigned int q;
+
+ for (q = 0; q < priv->netdev->num_tx_queues; q++)
+ bcm_sysport_tx_reclaim(priv, &priv->tx_rings[q]);
+}
+
+static int bcm_sysport_poll(struct napi_struct *napi, int budget)
+{
+ struct bcm_sysport_priv *priv =
+ container_of(napi, struct bcm_sysport_priv, napi);
+ unsigned int work_done = 0;
+
+ work_done = bcm_sysport_desc_rx(priv, budget);
+
+ priv->rx_c_index += work_done;
+ priv->rx_c_index &= RDMA_CONS_INDEX_MASK;
+ rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX);
+
+ if (work_done < budget) {
+ napi_complete(napi);
+ /* re-enable RX interrupts */
+ intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE);
+ }
+
+ return work_done;
+}
+
+
+/* RX and misc interrupt routine */
+static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+
+ priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
+ ~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
+ intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
+
+ if (unlikely(priv->irq0_stat == 0)) {
+ netdev_warn(priv->netdev, "spurious RX interrupt\n");
+ return IRQ_NONE;
+ }
+
+ if (priv->irq0_stat & INTRL2_0_RDMA_MBDONE) {
+ if (likely(napi_schedule_prep(&priv->napi))) {
+ /* disable RX interrupts */
+ intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE);
+ __napi_schedule(&priv->napi);
+ }
+ }
+
+ /* TX ring is full, perform a full reclaim since we do not know
+ * which one would trigger this interrupt
+ */
+ if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
+ bcm_sysport_tx_reclaim_all(priv);
+
+ return IRQ_HANDLED;
+}
+
+/* TX interrupt service routine */
+static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id)
+{
+ struct net_device *dev = dev_id;
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ struct bcm_sysport_tx_ring *txr;
+ unsigned int ring;
+
+ priv->irq1_stat = intrl2_1_readl(priv, INTRL2_CPU_STATUS) &
+ ~intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
+ intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+
+ if (unlikely(priv->irq1_stat == 0)) {
+ netdev_warn(priv->netdev, "spurious TX interrupt\n");
+ return IRQ_NONE;
+ }
+
+ for (ring = 0; ring < dev->num_tx_queues; ring++) {
+ if (!(priv->irq1_stat & BIT(ring)))
+ continue;
+
+ txr = &priv->tx_rings[ring];
+
+ if (likely(napi_schedule_prep(&txr->napi))) {
+ intrl2_1_mask_set(priv, BIT(ring));
+ __napi_schedule(&txr->napi);
+ }
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int bcm_sysport_insert_tsb(struct sk_buff *skb, struct net_device *dev)
+{
+ struct sk_buff *nskb;
+ struct bcm_tsb *tsb;
+ u32 csum_info;
+ u8 ip_proto;
+ u16 csum_start;
+ u16 ip_ver;
+
+ /* Re-allocate SKB if needed */
+ if (unlikely(skb_headroom(skb) < sizeof(*tsb))) {
+ nskb = skb_realloc_headroom(skb, sizeof(*tsb));
+ dev_kfree_skb(skb);
+ if (!nskb) {
+ dev->stats.tx_errors++;
+ dev->stats.tx_dropped++;
+ return -ENOMEM;
+ }
+ skb = nskb;
+ }
+
+ tsb = (struct bcm_tsb *)skb_push(skb, sizeof(*tsb));
+ /* Zero-out TSB by default */
+ memset(tsb, 0, sizeof(*tsb));
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ ip_ver = htons(skb->protocol);
+ switch (ip_ver) {
+ case ETH_P_IP:
+ ip_proto = ip_hdr(skb)->protocol;
+ break;
+ case ETH_P_IPV6:
+ ip_proto = ipv6_hdr(skb)->nexthdr;
+ break;
+ default:
+ return 0;
+ }
+
+ /* Get the checksum offset and the L4 (transport) offset */
+ csum_start = skb_checksum_start_offset(skb) - sizeof(*tsb);
+ csum_info = (csum_start + skb->csum_offset) & L4_CSUM_PTR_MASK;
+ csum_info |= (csum_start << L4_PTR_SHIFT);
+
+ if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
+ csum_info |= L4_LENGTH_VALID;
+ if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP)
+ csum_info |= L4_UDP;
+ } else
+ csum_info = 0;
+
+ tsb->l4_ptr_dest_map = csum_info;
+ }
+
+ return 0;
+}
+
+static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ struct device *kdev = &priv->pdev->dev;
+ struct bcm_sysport_tx_ring *ring;
+ struct bcm_sysport_cb *cb;
+ struct netdev_queue *txq;
+ struct dma_desc *desc;
+ unsigned int skb_len;
+ unsigned long flags;
+ dma_addr_t mapping;
+ u32 len_status;
+ u16 queue;
+ int ret;
+
+ queue = skb_get_queue_mapping(skb);
+ txq = netdev_get_tx_queue(dev, queue);
+ ring = &priv->tx_rings[queue];
+
+ /* lock against tx reclaim in BH context and TX ring full interrupt */
+ spin_lock_irqsave(&ring->lock, flags);
+ if (unlikely(ring->desc_count == 0)) {
+ netif_tx_stop_queue(txq);
+ netdev_err(dev, "queue %d awake and ring full!\n", queue);
+ ret = NETDEV_TX_BUSY;
+ goto out;
+ }
+
+ /* Insert TSB and checksum infos */
+ if (priv->tsb_en) {
+ ret = bcm_sysport_insert_tsb(skb, dev);
+ if (ret) {
+ ret = NETDEV_TX_OK;
+ goto out;
+ }
+ }
+
+ /* The Ethernet switch we are interfaced with needs packets to be at
+ * least 64 bytes (including FCS) otherwise they will be discarded when
+ * they enter the switch port logic. When Broadcom tags are enabled, we
+ * need to make sure that packets are at least 68 bytes
+ * (including FCS and tag) because the length verification is done after
+ * the Broadcom tag is stripped off the ingress packet.
+ */
+ if (skb_padto(skb, ETH_ZLEN + ENET_BRCM_TAG_LEN)) {
+ ret = NETDEV_TX_OK;
+ goto out;
+ }
+
+ skb_len = skb->len < ETH_ZLEN + ENET_BRCM_TAG_LEN ?
+ ETH_ZLEN + ENET_BRCM_TAG_LEN : skb->len;
+
+ mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(kdev, mapping)) {
+ netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n",
+ skb->data, skb_len);
+ ret = NETDEV_TX_OK;
+ goto out;
+ }
+
+ /* Remember the SKB for future freeing */
+ cb = &ring->cbs[ring->curr_desc];
+ cb->skb = skb;
+ dma_unmap_addr_set(cb, dma_addr, mapping);
+ dma_unmap_len_set(cb, dma_len, skb_len);
+
+ /* Fetch a descriptor entry from our pool */
+ desc = ring->desc_cpu;
+
+ desc->addr_lo = lower_32_bits(mapping);
+ len_status = upper_32_bits(mapping) & DESC_ADDR_HI_MASK;
+ len_status |= (skb_len << DESC_LEN_SHIFT);
+ len_status |= (DESC_SOP | DESC_EOP | TX_STATUS_APP_CRC) <<
+ DESC_STATUS_SHIFT;
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ len_status |= (DESC_L4_CSUM << DESC_STATUS_SHIFT);
+
+ ring->curr_desc++;
+ if (ring->curr_desc == ring->size)
+ ring->curr_desc = 0;
+ ring->desc_count--;
+
+ /* Ensure write completion of the descriptor status/length
+ * in DRAM before the System Port WRITE_PORT register latches
+ * the value
+ */
+ wmb();
+ desc->addr_status_len = len_status;
+ wmb();
+
+ /* Write this descriptor address to the RING write port */
+ tdma_port_write_desc_addr(priv, desc, ring->index);
+
+ /* Check ring space and update SW control flow */
+ if (ring->desc_count == 0)
+ netif_tx_stop_queue(txq);
+
+ netif_dbg(priv, tx_queued, dev, "ring=%d desc_count=%d, curr_desc=%d\n",
+ ring->index, ring->desc_count, ring->curr_desc);
+
+ ret = NETDEV_TX_OK;
+out:
+ spin_unlock_irqrestore(&ring->lock, flags);
+ return ret;
+}
+
+static void bcm_sysport_tx_timeout(struct net_device *dev)
+{
+ netdev_warn(dev, "transmit timeout!\n");
+
+ dev->trans_start = jiffies;
+ dev->stats.tx_errors++;
+
+ netif_tx_wake_all_queues(dev);
+}
+
+/* phylib adjust link callback */
+static void bcm_sysport_adj_link(struct net_device *dev)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev = priv->phydev;
+ unsigned int changed = 0;
+ u32 cmd_bits = 0, reg;
+
+ if (priv->old_link != phydev->link) {
+ changed = 1;
+ priv->old_link = phydev->link;
+ }
+
+ if (priv->old_duplex != phydev->duplex) {
+ changed = 1;
+ priv->old_duplex = phydev->duplex;
+ }
+
+ switch (phydev->speed) {
+ case SPEED_2500:
+ cmd_bits = CMD_SPEED_2500;
+ break;
+ case SPEED_1000:
+ cmd_bits = CMD_SPEED_1000;
+ break;
+ case SPEED_100:
+ cmd_bits = CMD_SPEED_100;
+ break;
+ case SPEED_10:
+ cmd_bits = CMD_SPEED_10;
+ break;
+ default:
+ break;
+ }
+ cmd_bits <<= CMD_SPEED_SHIFT;
+
+ if (phydev->duplex == DUPLEX_HALF)
+ cmd_bits |= CMD_HD_EN;
+
+ if (priv->old_pause != phydev->pause) {
+ changed = 1;
+ priv->old_pause = phydev->pause;
+ }
+
+ if (!phydev->pause)
+ cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE;
+
+ if (changed) {
+ reg = umac_readl(priv, UMAC_CMD);
+ reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
+ CMD_HD_EN | CMD_RX_PAUSE_IGNORE |
+ CMD_TX_PAUSE_IGNORE);
+ reg |= cmd_bits;
+ umac_writel(priv, reg, UMAC_CMD);
+
+ phy_print_status(priv->phydev);
+ }
+}
+
+static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
+ unsigned int index)
+{
+ struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
+ struct device *kdev = &priv->pdev->dev;
+ size_t size;
+ void *p;
+ u32 reg;
+
+ /* Simple descriptors partitioning for now */
+ size = 256;
+
+ /* We just need one DMA descriptor which is DMA-able, since writing to
+ * the port will allocate a new descriptor in its internal linked-list
+ */
+ p = dma_zalloc_coherent(kdev, 1, &ring->desc_dma, GFP_KERNEL);
+ if (!p) {
+ netif_err(priv, hw, priv->netdev, "DMA alloc failed\n");
+ return -ENOMEM;
+ }
+
+ ring->cbs = kzalloc(sizeof(struct bcm_sysport_cb) * size, GFP_KERNEL);
+ if (!ring->cbs) {
+ netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
+ return -ENOMEM;
+ }
+
+ /* Initialize SW view of the ring */
+ spin_lock_init(&ring->lock);
+ ring->priv = priv;
+ netif_napi_add(priv->netdev, &ring->napi, bcm_sysport_tx_poll, 64);
+ ring->index = index;
+ ring->size = size;
+ ring->alloc_size = ring->size;
+ ring->desc_cpu = p;
+ ring->desc_count = ring->size;
+ ring->curr_desc = 0;
+
+ /* Initialize HW ring */
+ tdma_writel(priv, RING_EN, TDMA_DESC_RING_HEAD_TAIL_PTR(index));
+ tdma_writel(priv, 0, TDMA_DESC_RING_COUNT(index));
+ tdma_writel(priv, 1, TDMA_DESC_RING_INTR_CONTROL(index));
+ tdma_writel(priv, 0, TDMA_DESC_RING_PROD_CONS_INDEX(index));
+ tdma_writel(priv, RING_IGNORE_STATUS, TDMA_DESC_RING_MAPPING(index));
+ tdma_writel(priv, 0, TDMA_DESC_RING_PCP_DEI_VID(index));
+
+ /* Program the number of descriptors as MAX_THRESHOLD and half of
+ * its size for the hysteresis trigger
+ */
+ tdma_writel(priv, ring->size |
+ 1 << RING_HYST_THRESH_SHIFT,
+ TDMA_DESC_RING_MAX_HYST(index));
+
+ /* Enable the ring queue in the arbiter */
+ reg = tdma_readl(priv, TDMA_TIER1_ARB_0_QUEUE_EN);
+ reg |= (1 << index);
+ tdma_writel(priv, reg, TDMA_TIER1_ARB_0_QUEUE_EN);
+
+ napi_enable(&ring->napi);
+
+ netif_dbg(priv, hw, priv->netdev,
+ "TDMA cfg, size=%d, desc_cpu=%p\n",
+ ring->size, ring->desc_cpu);
+
+ return 0;
+}
+
+static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
+ unsigned int index)
+{
+ struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
+ struct device *kdev = &priv->pdev->dev;
+ u32 reg;
+
+ /* Caller should stop the TDMA engine */
+ reg = tdma_readl(priv, TDMA_STATUS);
+ if (!(reg & TDMA_DISABLED))
+ netdev_warn(priv->netdev, "TDMA not stopped!\n");
+
+ napi_disable(&ring->napi);
+ netif_napi_del(&ring->napi);
+
+ bcm_sysport_tx_reclaim(priv, ring);
+
+ kfree(ring->cbs);
+ ring->cbs = NULL;
+
+ if (ring->desc_dma) {
+ dma_free_coherent(kdev, 1, ring->desc_cpu, ring->desc_dma);
+ ring->desc_dma = 0;
+ }
+ ring->size = 0;
+ ring->alloc_size = 0;
+
+ netif_dbg(priv, hw, priv->netdev, "TDMA fini done\n");
+}
+
+/* RDMA helper */
+static inline int rdma_enable_set(struct bcm_sysport_priv *priv,
+ unsigned int enable)
+{
+ unsigned int timeout = 1000;
+ u32 reg;
+
+ reg = rdma_readl(priv, RDMA_CONTROL);
+ if (enable)
+ reg |= RDMA_EN;
+ else
+ reg &= ~RDMA_EN;
+ rdma_writel(priv, reg, RDMA_CONTROL);
+
+ /* Poll for RMDA disabling completion */
+ do {
+ reg = rdma_readl(priv, RDMA_STATUS);
+ if (!!(reg & RDMA_DISABLED) == !enable)
+ return 0;
+ usleep_range(1000, 2000);
+ } while (timeout-- > 0);
+
+ netdev_err(priv->netdev, "timeout waiting for RDMA to finish\n");
+
+ return -ETIMEDOUT;
+}
+
+/* TDMA helper */
+static inline int tdma_enable_set(struct bcm_sysport_priv *priv,
+ unsigned int enable)
+{
+ unsigned int timeout = 1000;
+ u32 reg;
+
+ reg = tdma_readl(priv, TDMA_CONTROL);
+ if (enable)
+ reg |= TDMA_EN;
+ else
+ reg &= ~TDMA_EN;
+ tdma_writel(priv, reg, TDMA_CONTROL);
+
+ /* Poll for TMDA disabling completion */
+ do {
+ reg = tdma_readl(priv, TDMA_STATUS);
+ if (!!(reg & TDMA_DISABLED) == !enable)
+ return 0;
+
+ usleep_range(1000, 2000);
+ } while (timeout-- > 0);
+
+ netdev_err(priv->netdev, "timeout waiting for TDMA to finish\n");
+
+ return -ETIMEDOUT;
+}
+
+static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
+{
+ u32 reg;
+ int ret;
+
+ /* Initialize SW view of the RX ring */
+ priv->num_rx_bds = NUM_RX_DESC;
+ priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET;
+ priv->rx_bd_assign_ptr = priv->rx_bds;
+ priv->rx_bd_assign_index = 0;
+ priv->rx_c_index = 0;
+ priv->rx_read_ptr = 0;
+ priv->rx_cbs = kzalloc(priv->num_rx_bds *
+ sizeof(struct bcm_sysport_cb), GFP_KERNEL);
+ if (!priv->rx_cbs) {
+ netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
+ return -ENOMEM;
+ }
+
+ ret = bcm_sysport_alloc_rx_bufs(priv);
+ if (ret) {
+ netif_err(priv, hw, priv->netdev, "SKB allocation failed\n");
+ return ret;
+ }
+
+ /* Initialize HW, ensure RDMA is disabled */
+ reg = rdma_readl(priv, RDMA_STATUS);
+ if (!(reg & RDMA_DISABLED))
+ rdma_enable_set(priv, 0);
+
+ rdma_writel(priv, 0, RDMA_WRITE_PTR_LO);
+ rdma_writel(priv, 0, RDMA_WRITE_PTR_HI);
+ rdma_writel(priv, 0, RDMA_PROD_INDEX);
+ rdma_writel(priv, 0, RDMA_CONS_INDEX);
+ rdma_writel(priv, priv->num_rx_bds << RDMA_RING_SIZE_SHIFT |
+ RX_BUF_LENGTH, RDMA_RING_BUF_SIZE);
+ /* Operate the queue in ring mode */
+ rdma_writel(priv, 0, RDMA_START_ADDR_HI);
+ rdma_writel(priv, 0, RDMA_START_ADDR_LO);
+ rdma_writel(priv, 0, RDMA_END_ADDR_HI);
+ rdma_writel(priv, NUM_HW_RX_DESC_WORDS - 1, RDMA_END_ADDR_LO);
+
+ rdma_writel(priv, 1, RDMA_MBDONE_INTR);
+
+ netif_dbg(priv, hw, priv->netdev,
+ "RDMA cfg, num_rx_bds=%d, rx_bds=%p\n",
+ priv->num_rx_bds, priv->rx_bds);
+
+ return 0;
+}
+
+static void bcm_sysport_fini_rx_ring(struct bcm_sysport_priv *priv)
+{
+ struct bcm_sysport_cb *cb;
+ unsigned int i;
+ u32 reg;
+
+ /* Caller should ensure RDMA is disabled */
+ reg = rdma_readl(priv, RDMA_STATUS);
+ if (!(reg & RDMA_DISABLED))
+ netdev_warn(priv->netdev, "RDMA not stopped!\n");
+
+ for (i = 0; i < priv->num_rx_bds; i++) {
+ cb = &priv->rx_cbs[i];
+ if (dma_unmap_addr(cb, dma_addr))
+ dma_unmap_single(&priv->pdev->dev,
+ dma_unmap_addr(cb, dma_addr),
+ RX_BUF_LENGTH, DMA_FROM_DEVICE);
+ bcm_sysport_free_cb(cb);
+ }
+
+ kfree(priv->rx_cbs);
+ priv->rx_cbs = NULL;
+
+ netif_dbg(priv, hw, priv->netdev, "RDMA fini done\n");
+}
+
+static void bcm_sysport_set_rx_mode(struct net_device *dev)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ u32 reg;
+
+ reg = umac_readl(priv, UMAC_CMD);
+ if (dev->flags & IFF_PROMISC)
+ reg |= CMD_PROMISC;
+ else
+ reg &= ~CMD_PROMISC;
+ umac_writel(priv, reg, UMAC_CMD);
+
+ /* No support for ALLMULTI */
+ if (dev->flags & IFF_ALLMULTI)
+ return;
+}
+
+static inline void umac_enable_set(struct bcm_sysport_priv *priv,
+ unsigned int enable)
+{
+ u32 reg;
+
+ reg = umac_readl(priv, UMAC_CMD);
+ if (enable)
+ reg |= CMD_RX_EN | CMD_TX_EN;
+ else
+ reg &= ~(CMD_RX_EN | CMD_TX_EN);
+ umac_writel(priv, reg, UMAC_CMD);
+
+ /* UniMAC stops on a packet boundary, wait for a full-sized packet
+ * to be processed (1 msec).
+ */
+ if (enable == 0)
+ usleep_range(1000, 2000);
+}
+
+static inline int umac_reset(struct bcm_sysport_priv *priv)
+{
+ unsigned int timeout = 0;
+ u32 reg;
+ int ret = 0;
+
+ umac_writel(priv, 0, UMAC_CMD);
+ while (timeout++ < 1000) {
+ reg = umac_readl(priv, UMAC_CMD);
+ if (!(reg & CMD_SW_RESET))
+ break;
+
+ udelay(1);
+ }
+
+ if (timeout == 1000) {
+ dev_err(&priv->pdev->dev,
+ "timeout waiting for MAC to come out of reset\n");
+ ret = -ETIMEDOUT;
+ }
+
+ return ret;
+}
+
+static void umac_set_hw_addr(struct bcm_sysport_priv *priv,
+ unsigned char *addr)
+{
+ umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) |
+ (addr[2] << 8) | addr[3], UMAC_MAC0);
+ umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1);
+}
+
+static void topctrl_flush(struct bcm_sysport_priv *priv)
+{
+ topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
+ topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
+ mdelay(1);
+ topctrl_writel(priv, 0, RX_FLUSH_CNTL);
+ topctrl_writel(priv, 0, TX_FLUSH_CNTL);
+}
+
+static int bcm_sysport_open(struct net_device *dev)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ unsigned int i;
+ u32 reg;
+ int ret;
+
+ /* Reset UniMAC */
+ ret = umac_reset(priv);
+ if (ret) {
+ netdev_err(dev, "UniMAC reset failed\n");
+ return ret;
+ }
+
+ /* Flush TX and RX FIFOs at TOPCTRL level */
+ topctrl_flush(priv);
+
+ /* Disable the UniMAC RX/TX */
+ umac_enable_set(priv, 0);
+
+ /* Enable RBUF 2bytes alignment and Receive Status Block */
+ reg = rbuf_readl(priv, RBUF_CONTROL);
+ reg |= RBUF_4B_ALGN | RBUF_RSB_EN;
+ rbuf_writel(priv, reg, RBUF_CONTROL);
+
+ /* Set maximum frame length */
+ umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
+
+ /* Set MAC address */
+ umac_set_hw_addr(priv, dev->dev_addr);
+
+ /* Read CRC forward */
+ priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
+
+ priv->phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link,
+ 0, priv->phy_interface);
+ if (!priv->phydev) {
+ netdev_err(dev, "could not attach to PHY\n");
+ return -ENODEV;
+ }
+
+ /* Reset house keeping link status */
+ priv->old_duplex = -1;
+ priv->old_link = -1;
+ priv->old_pause = -1;
+
+ /* mask all interrupts and request them */
+ intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
+ intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+ intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
+ intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
+ intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+ intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
+
+ ret = request_irq(priv->irq0, bcm_sysport_rx_isr, 0, dev->name, dev);
+ if (ret) {
+ netdev_err(dev, "failed to request RX interrupt\n");
+ goto out_phy_disconnect;
+ }
+
+ ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0, dev->name, dev);
+ if (ret) {
+ netdev_err(dev, "failed to request TX interrupt\n");
+ goto out_free_irq0;
+ }
+
+ /* Initialize both hardware and software ring */
+ for (i = 0; i < dev->num_tx_queues; i++) {
+ ret = bcm_sysport_init_tx_ring(priv, i);
+ if (ret) {
+ netdev_err(dev, "failed to initialize TX ring %d\n",
+ i);
+ goto out_free_tx_ring;
+ }
+ }
+
+ /* Initialize linked-list */
+ tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
+
+ /* Initialize RX ring */
+ ret = bcm_sysport_init_rx_ring(priv);
+ if (ret) {
+ netdev_err(dev, "failed to initialize RX ring\n");
+ goto out_free_rx_ring;
+ }
+
+ /* Turn on RDMA */
+ ret = rdma_enable_set(priv, 1);
+ if (ret)
+ goto out_free_rx_ring;
+
+ /* Enable RX interrupt and TX ring full interrupt */
+ intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
+
+ /* Turn on TDMA */
+ ret = tdma_enable_set(priv, 1);
+ if (ret)
+ goto out_clear_rx_int;
+
+ /* Enable NAPI */
+ napi_enable(&priv->napi);
+
+ /* Turn on UniMAC TX/RX */
+ umac_enable_set(priv, 1);
+
+ phy_start(priv->phydev);
+
+ /* Enable TX interrupts for the 32 TXQs */
+ intrl2_1_mask_clear(priv, 0xffffffff);
+
+ /* Last call before we start the real business */
+ netif_tx_start_all_queues(dev);
+
+ return 0;
+
+out_clear_rx_int:
+ intrl2_0_mask_set(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
+out_free_rx_ring:
+ bcm_sysport_fini_rx_ring(priv);
+out_free_tx_ring:
+ for (i = 0; i < dev->num_tx_queues; i++)
+ bcm_sysport_fini_tx_ring(priv, i);
+ free_irq(priv->irq1, dev);
+out_free_irq0:
+ free_irq(priv->irq0, dev);
+out_phy_disconnect:
+ phy_disconnect(priv->phydev);
+ return ret;
+}
+
+static int bcm_sysport_stop(struct net_device *dev)
+{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ unsigned int i;
+ u32 reg;
+ int ret;
+
+ /* stop all software from updating hardware */
+ netif_tx_stop_all_queues(dev);
+ napi_disable(&priv->napi);
+ phy_stop(priv->phydev);
+
+ /* mask all interrupts */
+ intrl2_0_mask_set(priv, 0xffffffff);
+ intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+ intrl2_1_mask_set(priv, 0xffffffff);
+ intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+
+ /* Disable UniMAC RX */
+ reg = umac_readl(priv, UMAC_CMD);
+ reg &= ~CMD_RX_EN;
+ umac_writel(priv, reg, UMAC_CMD);
+
+ ret = tdma_enable_set(priv, 0);
+ if (ret) {
+ netdev_err(dev, "timeout disabling RDMA\n");
+ return ret;
+ }
+
+ /* Wait for a maximum packet size to be drained */
+ usleep_range(2000, 3000);
+
+ ret = rdma_enable_set(priv, 0);
+ if (ret) {
+ netdev_err(dev, "timeout disabling TDMA\n");
+ return ret;
+ }
+
+ /* Disable UniMAC TX */
+ reg = umac_readl(priv, UMAC_CMD);
+ reg &= ~CMD_TX_EN;
+ umac_writel(priv, reg, UMAC_CMD);
+
+ /* Free RX/TX rings SW structures */
+ for (i = 0; i < dev->num_tx_queues; i++)
+ bcm_sysport_fini_tx_ring(priv, i);
+ bcm_sysport_fini_rx_ring(priv);
+
+ free_irq(priv->irq0, dev);
+ free_irq(priv->irq1, dev);
+
+ /* Disconnect from PHY */
+ phy_disconnect(priv->phydev);
+
+ return 0;
+}
+
+static struct ethtool_ops bcm_sysport_ethtool_ops = {
+ .get_settings = bcm_sysport_get_settings,
+ .set_settings = bcm_sysport_set_settings,
+ .get_drvinfo = bcm_sysport_get_drvinfo,
+ .get_msglevel = bcm_sysport_get_msglvl,
+ .set_msglevel = bcm_sysport_set_msglvl,
+ .get_link = ethtool_op_get_link,
+ .get_strings = bcm_sysport_get_strings,
+ .get_ethtool_stats = bcm_sysport_get_stats,
+ .get_sset_count = bcm_sysport_get_sset_count,
+};
+
+static const struct net_device_ops bcm_sysport_netdev_ops = {
+ .ndo_start_xmit = bcm_sysport_xmit,
+ .ndo_tx_timeout = bcm_sysport_tx_timeout,
+ .ndo_open = bcm_sysport_open,
+ .ndo_stop = bcm_sysport_stop,
+ .ndo_set_features = bcm_sysport_set_features,
+ .ndo_set_rx_mode = bcm_sysport_set_rx_mode,
+};
+
+#define REV_FMT "v%2x.%02x"
+
+static int bcm_sysport_probe(struct platform_device *pdev)
+{
+ struct bcm_sysport_priv *priv;
+ struct device_node *dn;
+ struct net_device *dev;
+ const void *macaddr;
+ struct resource *r;
+ u32 txq, rxq;
+ int ret;
+
+ dn = pdev->dev.of_node;
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ /* Read the Transmit/Receive Queue properties */
+ if (of_property_read_u32(dn, "systemport,num-txq", &txq))
+ txq = TDMA_NUM_RINGS;
+ if (of_property_read_u32(dn, "systemport,num-rxq", &rxq))
+ rxq = 1;
+
+ dev = alloc_etherdev_mqs(sizeof(*priv), txq, rxq);
+ if (!dev)
+ return -ENOMEM;
+
+ /* Initialize private members */
+ priv = netdev_priv(dev);
+
+ priv->irq0 = platform_get_irq(pdev, 0);
+ priv->irq1 = platform_get_irq(pdev, 1);
+ if (priv->irq0 <= 0 || priv->irq1 <= 0) {
+ dev_err(&pdev->dev, "invalid interrupts\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ priv->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(priv->base)) {
+ ret = PTR_ERR(priv->base);
+ goto err;
+ }
+
+ priv->netdev = dev;
+ priv->pdev = pdev;
+
+ priv->phy_interface = of_get_phy_mode(dn);
+ /* Default to GMII interface mode */
+ if (priv->phy_interface < 0)
+ priv->phy_interface = PHY_INTERFACE_MODE_GMII;
+
+ /* In the case of a fixed PHY, the DT node associated
+ * to the PHY is the Ethernet MAC DT node.
+ */
+ if (of_phy_is_fixed_link(dn)) {
+ ret = of_phy_register_fixed_link(dn);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register fixed PHY\n");
+ goto err;
+ }
+
+ priv->phy_dn = dn;
+ }
+
+ /* Initialize netdevice members */
+ macaddr = of_get_mac_address(dn);
+ if (!macaddr || !is_valid_ether_addr(macaddr)) {
+ dev_warn(&pdev->dev, "using random Ethernet MAC\n");
+ random_ether_addr(dev->dev_addr);
+ } else {
+ ether_addr_copy(dev->dev_addr, macaddr);
+ }
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ dev_set_drvdata(&pdev->dev, dev);
+ dev->ethtool_ops = &bcm_sysport_ethtool_ops;
+ dev->netdev_ops = &bcm_sysport_netdev_ops;
+ netif_napi_add(dev, &priv->napi, bcm_sysport_poll, 64);
+
+ /* HW supported features, none enabled by default */
+ dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA |
+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+
+ /* Set the needed headroom once and for all */
+ BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
+ dev->needed_headroom += sizeof(struct bcm_tsb);
+
+ /* We are interfaced to a switch which handles the multicast
+ * filtering for us, so we do not support programming any
+ * multicast hash table in this Ethernet MAC.
+ */
+ dev->flags &= ~IFF_MULTICAST;
+
+ /* libphy will adjust the link state accordingly */
+ netif_carrier_off(dev);
+
+ ret = register_netdev(dev);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register net_device\n");
+ goto err;
+ }
+
+ priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
+ dev_info(&pdev->dev,
+ "Broadcom SYSTEMPORT" REV_FMT
+ " at 0x%p (irqs: %d, %d, TXQs: %d, RXQs: %d)\n",
+ (priv->rev >> 8) & 0xff, priv->rev & 0xff,
+ priv->base, priv->irq0, priv->irq1, txq, rxq);
+
+ return 0;
+err:
+ free_netdev(dev);
+ return ret;
+}
+
+static int bcm_sysport_remove(struct platform_device *pdev)
+{
+ struct net_device *dev = dev_get_drvdata(&pdev->dev);
+
+ /* Not much to do, ndo_close has been called
+ * and we use managed allocations
+ */
+ unregister_netdev(dev);
+ free_netdev(dev);
+ dev_set_drvdata(&pdev->dev, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id bcm_sysport_of_match[] = {
+ { .compatible = "brcm,systemport-v1.00" },
+ { .compatible = "brcm,systemport" },
+ { /* sentinel */ }
+};
+
+static struct platform_driver bcm_sysport_driver = {
+ .probe = bcm_sysport_probe,
+ .remove = bcm_sysport_remove,
+ .driver = {
+ .name = "brcm-systemport",
+ .owner = THIS_MODULE,
+ .of_match_table = bcm_sysport_of_match,
+ },
+};
+module_platform_driver(bcm_sysport_driver);
+
+MODULE_AUTHOR("Broadcom Corporation");
+MODULE_DESCRIPTION("Broadcom System Port Ethernet MAC driver");
+MODULE_ALIAS("platform:brcm-systemport");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
new file mode 100644
index 0000000..281c082
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -0,0 +1,678 @@
+/*
+ * Broadcom BCM7xxx System Port Ethernet MAC driver
+ *
+ * Copyright (C) 2014 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __BCM_SYSPORT_H
+#define __BCM_SYSPORT_H
+
+#include <linux/if_vlan.h>
+
+/* Receive/transmit descriptor format */
+#define DESC_ADDR_HI_STATUS_LEN 0x00
+#define DESC_ADDR_HI_SHIFT 0
+#define DESC_ADDR_HI_MASK 0xff
+#define DESC_STATUS_SHIFT 8
+#define DESC_STATUS_MASK 0x3ff
+#define DESC_LEN_SHIFT 18
+#define DESC_LEN_MASK 0x7fff
+#define DESC_ADDR_LO 0x04
+
+/* HW supports 40-bit addressing hence the */
+#define DESC_SIZE (WORDS_PER_DESC * sizeof(u32))
+
+/* Default RX buffer allocation size */
+#define RX_BUF_LENGTH 2048
+
+/* Body(1500) + EH_SIZE(14) + VLANTAG(4) + BRCMTAG(4) + FCS(4) = 1526.
+ * 1536 is multiple of 256 bytes
+ */
+#define ENET_BRCM_TAG_LEN 4
+#define ENET_PAD 10
+#define UMAC_MAX_MTU_SIZE (ETH_DATA_LEN + ETH_HLEN + VLAN_HLEN + \
+ ENET_BRCM_TAG_LEN + ETH_FCS_LEN + ENET_PAD)
+
+/* Transmit status block */
+struct bcm_tsb {
+ u32 pcp_dei_vid;
+#define PCP_DEI_MASK 0xf
+#define VID_SHIFT 4
+#define VID_MASK 0xfff
+ u32 l4_ptr_dest_map;
+#define L4_CSUM_PTR_MASK 0x1ff
+#define L4_PTR_SHIFT 9
+#define L4_PTR_MASK 0x1ff
+#define L4_UDP (1 << 18)
+#define L4_LENGTH_VALID (1 << 19)
+#define DEST_MAP_SHIFT 20
+#define DEST_MAP_MASK 0x1ff
+};
+
+/* Receive status block uses the same
+ * definitions as the DMA descriptor
+ */
+struct bcm_rsb {
+ u32 rx_status_len;
+ u32 brcm_egress_tag;
+};
+
+/* Common Receive/Transmit status bits */
+#define DESC_L4_CSUM (1 << 7)
+#define DESC_SOP (1 << 8)
+#define DESC_EOP (1 << 9)
+
+/* Receive Status bits */
+#define RX_STATUS_UCAST 0
+#define RX_STATUS_BCAST 0x04
+#define RX_STATUS_MCAST 0x08
+#define RX_STATUS_L2_MCAST 0x0c
+#define RX_STATUS_ERR (1 << 4)
+#define RX_STATUS_OVFLOW (1 << 5)
+#define RX_STATUS_PARSE_FAIL (1 << 6)
+
+/* Transmit Status bits */
+#define TX_STATUS_VLAN_NO_ACT 0x00
+#define TX_STATUS_VLAN_PCP_TSB 0x01
+#define TX_STATUS_VLAN_QUEUE 0x02
+#define TX_STATUS_VLAN_VID_TSB 0x03
+#define TX_STATUS_OWR_CRC (1 << 2)
+#define TX_STATUS_APP_CRC (1 << 3)
+#define TX_STATUS_BRCM_TAG_NO_ACT 0
+#define TX_STATUS_BRCM_TAG_ZERO 0x10
+#define TX_STATUS_BRCM_TAG_ONE_QUEUE 0x20
+#define TX_STATUS_BRCM_TAG_ONE_TSB 0x30
+#define TX_STATUS_SKIP_BYTES (1 << 6)
+
+/* Specific register definitions */
+#define SYS_PORT_TOPCTRL_OFFSET 0
+#define REV_CNTL 0x00
+#define REV_MASK 0xffff
+
+#define RX_FLUSH_CNTL 0x04
+#define RX_FLUSH (1 << 0)
+
+#define TX_FLUSH_CNTL 0x08
+#define TX_FLUSH (1 << 0)
+
+#define MISC_CNTL 0x0c
+#define SYS_CLK_SEL (1 << 0)
+#define TDMA_EOP_SEL (1 << 1)
+
+/* Level-2 Interrupt controller offsets and defines */
+#define SYS_PORT_INTRL2_0_OFFSET 0x200
+#define SYS_PORT_INTRL2_1_OFFSET 0x240
+#define INTRL2_CPU_STATUS 0x00
+#define INTRL2_CPU_SET 0x04
+#define INTRL2_CPU_CLEAR 0x08
+#define INTRL2_CPU_MASK_STATUS 0x0c
+#define INTRL2_CPU_MASK_SET 0x10
+#define INTRL2_CPU_MASK_CLEAR 0x14
+
+/* Level-2 instance 0 interrupt bits */
+#define INTRL2_0_GISB_ERR (1 << 0)
+#define INTRL2_0_RBUF_OVFLOW (1 << 1)
+#define INTRL2_0_TBUF_UNDFLOW (1 << 2)
+#define INTRL2_0_MPD (1 << 3)
+#define INTRL2_0_BRCM_MATCH_TAG (1 << 4)
+#define INTRL2_0_RDMA_MBDONE (1 << 5)
+#define INTRL2_0_OVER_MAX_THRESH (1 << 6)
+#define INTRL2_0_BELOW_HYST_THRESH (1 << 7)
+#define INTRL2_0_FREE_LIST_EMPTY (1 << 8)
+#define INTRL2_0_TX_RING_FULL (1 << 9)
+#define INTRL2_0_DESC_ALLOC_ERR (1 << 10)
+#define INTRL2_0_UNEXP_PKTSIZE_ACK (1 << 11)
+
+/* RXCHK offset and defines */
+#define SYS_PORT_RXCHK_OFFSET 0x300
+
+#define RXCHK_CONTROL 0x00
+#define RXCHK_EN (1 << 0)
+#define RXCHK_SKIP_FCS (1 << 1)
+#define RXCHK_BAD_CSUM_DIS (1 << 2)
+#define RXCHK_BRCM_TAG_EN (1 << 3)
+#define RXCHK_BRCM_TAG_MATCH_SHIFT 4
+#define RXCHK_BRCM_TAG_MATCH_MASK 0xff
+#define RXCHK_PARSE_TNL (1 << 12)
+#define RXCHK_VIOL_EN (1 << 13)
+#define RXCHK_VIOL_DIS (1 << 14)
+#define RXCHK_INCOM_PKT (1 << 15)
+#define RXCHK_V6_DUPEXT_EN (1 << 16)
+#define RXCHK_V6_DUPEXT_DIS (1 << 17)
+#define RXCHK_ETHERTYPE_DIS (1 << 18)
+#define RXCHK_L2_HDR_DIS (1 << 19)
+#define RXCHK_L3_HDR_DIS (1 << 20)
+#define RXCHK_MAC_RX_ERR_DIS (1 << 21)
+#define RXCHK_PARSE_AUTH (1 << 22)
+
+#define RXCHK_BRCM_TAG0 0x04
+#define RXCHK_BRCM_TAG(i) ((i) * RXCHK_BRCM_TAG0)
+#define RXCHK_BRCM_TAG0_MASK 0x24
+#define RXCHK_BRCM_TAG_MASK(i) ((i) * RXCHK_BRCM_TAG0_MASK)
+#define RXCHK_BRCM_TAG_MATCH_STATUS 0x44
+#define RXCHK_ETHERTYPE 0x48
+#define RXCHK_BAD_CSUM_CNTR 0x4C
+#define RXCHK_OTHER_DISC_CNTR 0x50
+
+/* TXCHCK offsets and defines */
+#define SYS_PORT_TXCHK_OFFSET 0x380
+#define TXCHK_PKT_RDY_THRESH 0x00
+
+/* Receive buffer offset and defines */
+#define SYS_PORT_RBUF_OFFSET 0x400
+
+#define RBUF_CONTROL 0x00
+#define RBUF_RSB_EN (1 << 0)
+#define RBUF_4B_ALGN (1 << 1)
+#define RBUF_BRCM_TAG_STRIP (1 << 2)
+#define RBUF_BAD_PKT_DISC (1 << 3)
+#define RBUF_RESUME_THRESH_SHIFT 4
+#define RBUF_RESUME_THRESH_MASK 0xff
+#define RBUF_OK_TO_SEND_SHIFT 12
+#define RBUF_OK_TO_SEND_MASK 0xff
+#define RBUF_CRC_REPLACE (1 << 20)
+#define RBUF_OK_TO_SEND_MODE (1 << 21)
+#define RBUF_RSB_SWAP (1 << 22)
+#define RBUF_ACPI_EN (1 << 23)
+
+#define RBUF_PKT_RDY_THRESH 0x04
+
+#define RBUF_STATUS 0x08
+#define RBUF_WOL_MODE (1 << 0)
+#define RBUF_MPD (1 << 1)
+#define RBUF_ACPI (1 << 2)
+
+#define RBUF_OVFL_DISC_CNTR 0x0c
+#define RBUF_ERR_PKT_CNTR 0x10
+
+/* Transmit buffer offset and defines */
+#define SYS_PORT_TBUF_OFFSET 0x600
+
+#define TBUF_CONTROL 0x00
+#define TBUF_BP_EN (1 << 0)
+#define TBUF_MAX_PKT_THRESH_SHIFT 1
+#define TBUF_MAX_PKT_THRESH_MASK 0x1f
+#define TBUF_FULL_THRESH_SHIFT 8
+#define TBUF_FULL_THRESH_MASK 0x1f
+
+/* UniMAC offset and defines */
+#define SYS_PORT_UMAC_OFFSET 0x800
+
+#define UMAC_CMD 0x008
+#define CMD_TX_EN (1 << 0)
+#define CMD_RX_EN (1 << 1)
+#define CMD_SPEED_SHIFT 2
+#define CMD_SPEED_10 0
+#define CMD_SPEED_100 1
+#define CMD_SPEED_1000 2
+#define CMD_SPEED_2500 3
+#define CMD_SPEED_MASK 3
+#define CMD_PROMISC (1 << 4)
+#define CMD_PAD_EN (1 << 5)
+#define CMD_CRC_FWD (1 << 6)
+#define CMD_PAUSE_FWD (1 << 7)
+#define CMD_RX_PAUSE_IGNORE (1 << 8)
+#define CMD_TX_ADDR_INS (1 << 9)
+#define CMD_HD_EN (1 << 10)
+#define CMD_SW_RESET (1 << 13)
+#define CMD_LCL_LOOP_EN (1 << 15)
+#define CMD_AUTO_CONFIG (1 << 22)
+#define CMD_CNTL_FRM_EN (1 << 23)
+#define CMD_NO_LEN_CHK (1 << 24)
+#define CMD_RMT_LOOP_EN (1 << 25)
+#define CMD_PRBL_EN (1 << 27)
+#define CMD_TX_PAUSE_IGNORE (1 << 28)
+#define CMD_TX_RX_EN (1 << 29)
+#define CMD_RUNT_FILTER_DIS (1 << 30)
+
+#define UMAC_MAC0 0x00c
+#define UMAC_MAC1 0x010
+#define UMAC_MAX_FRAME_LEN 0x014
+
+#define UMAC_TX_FLUSH 0x334
+
+#define UMAC_MIB_START 0x400
+
+/* There is a 0xC gap between the end of RX and beginning of TX stats and then
+ * between the end of TX stats and the beginning of the RX RUNT
+ */
+#define UMAC_MIB_STAT_OFFSET 0xc
+
+#define UMAC_MIB_CTRL 0x580
+#define MIB_RX_CNT_RST (1 << 0)
+#define MIB_RUNT_CNT_RST (1 << 1)
+#define MIB_TX_CNT_RST (1 << 2)
+#define UMAC_MDF_CTRL 0x650
+#define UMAC_MDF_ADDR 0x654
+
+/* Receive DMA offset and defines */
+#define SYS_PORT_RDMA_OFFSET 0x2000
+
+#define RDMA_CONTROL 0x1000
+#define RDMA_EN (1 << 0)
+#define RDMA_RING_CFG (1 << 1)
+#define RDMA_DISC_EN (1 << 2)
+#define RDMA_BUF_DATA_OFFSET_SHIFT 4
+#define RDMA_BUF_DATA_OFFSET_MASK 0x3ff
+
+#define RDMA_STATUS 0x1004
+#define RDMA_DISABLED (1 << 0)
+#define RDMA_DESC_RAM_INIT_BUSY (1 << 1)
+#define RDMA_BP_STATUS (1 << 2)
+
+#define RDMA_SCB_BURST_SIZE 0x1008
+
+#define RDMA_RING_BUF_SIZE 0x100c
+#define RDMA_RING_SIZE_SHIFT 16
+
+#define RDMA_WRITE_PTR_HI 0x1010
+#define RDMA_WRITE_PTR_LO 0x1014
+#define RDMA_PROD_INDEX 0x1018
+#define RDMA_PROD_INDEX_MASK 0xffff
+
+#define RDMA_CONS_INDEX 0x101c
+#define RDMA_CONS_INDEX_MASK 0xffff
+
+#define RDMA_START_ADDR_HI 0x1020
+#define RDMA_START_ADDR_LO 0x1024
+#define RDMA_END_ADDR_HI 0x1028
+#define RDMA_END_ADDR_LO 0x102c
+
+#define RDMA_MBDONE_INTR 0x1030
+#define RDMA_INTR_THRESH_MASK 0xff
+#define RDMA_TIMEOUT_SHIFT 16
+#define RDMA_TIMEOUT_MASK 0xffff
+
+#define RDMA_XON_XOFF_THRESH 0x1034
+#define RDMA_XON_XOFF_THRESH_MASK 0xffff
+#define RDMA_XOFF_THRESH_SHIFT 16
+
+#define RDMA_READ_PTR_HI 0x1038
+#define RDMA_READ_PTR_LO 0x103c
+
+#define RDMA_OVERRIDE 0x1040
+#define RDMA_LE_MODE (1 << 0)
+#define RDMA_REG_MODE (1 << 1)
+
+#define RDMA_TEST 0x1044
+#define RDMA_TP_OUT_SEL (1 << 0)
+#define RDMA_MEM_SEL (1 << 1)
+
+#define RDMA_DEBUG 0x1048
+
+/* Transmit DMA offset and defines */
+#define TDMA_NUM_RINGS 32 /* rings = queues */
+#define TDMA_PORT_SIZE DESC_SIZE /* two 32-bits words */
+
+#define SYS_PORT_TDMA_OFFSET 0x4000
+#define TDMA_WRITE_PORT_OFFSET 0x0000
+#define TDMA_WRITE_PORT_HI(i) (TDMA_WRITE_PORT_OFFSET + \
+ (i) * TDMA_PORT_SIZE)
+#define TDMA_WRITE_PORT_LO(i) (TDMA_WRITE_PORT_OFFSET + \
+ sizeof(u32) + (i) * TDMA_PORT_SIZE)
+
+#define TDMA_READ_PORT_OFFSET (TDMA_WRITE_PORT_OFFSET + \
+ (TDMA_NUM_RINGS * TDMA_PORT_SIZE))
+#define TDMA_READ_PORT_HI(i) (TDMA_READ_PORT_OFFSET + \
+ (i) * TDMA_PORT_SIZE)
+#define TDMA_READ_PORT_LO(i) (TDMA_READ_PORT_OFFSET + \
+ sizeof(u32) + (i) * TDMA_PORT_SIZE)
+
+#define TDMA_READ_PORT_CMD_OFFSET (TDMA_READ_PORT_OFFSET + \
+ (TDMA_NUM_RINGS * TDMA_PORT_SIZE))
+#define TDMA_READ_PORT_CMD(i) (TDMA_READ_PORT_CMD_OFFSET + \
+ (i) * sizeof(u32))
+
+#define TDMA_DESC_RING_00_BASE (TDMA_READ_PORT_CMD_OFFSET + \
+ (TDMA_NUM_RINGS * sizeof(u32)))
+
+/* Register offsets and defines relatives to a specific ring number */
+#define RING_HEAD_TAIL_PTR 0x00
+#define RING_HEAD_MASK 0x7ff
+#define RING_TAIL_SHIFT 11
+#define RING_TAIL_MASK 0x7ff
+#define RING_FLUSH (1 << 24)
+#define RING_EN (1 << 25)
+
+#define RING_COUNT 0x04
+#define RING_COUNT_MASK 0x7ff
+#define RING_BUFF_DONE_SHIFT 11
+#define RING_BUFF_DONE_MASK 0x7ff
+
+#define RING_MAX_HYST 0x08
+#define RING_MAX_THRESH_MASK 0x7ff
+#define RING_HYST_THRESH_SHIFT 11
+#define RING_HYST_THRESH_MASK 0x7ff
+
+#define RING_INTR_CONTROL 0x0c
+#define RING_INTR_THRESH_MASK 0x7ff
+#define RING_EMPTY_INTR_EN (1 << 15)
+#define RING_TIMEOUT_SHIFT 16
+#define RING_TIMEOUT_MASK 0xffff
+
+#define RING_PROD_CONS_INDEX 0x10
+#define RING_PROD_INDEX_MASK 0xffff
+#define RING_CONS_INDEX_SHIFT 16
+#define RING_CONS_INDEX_MASK 0xffff
+
+#define RING_MAPPING 0x14
+#define RING_QID_MASK 0x3
+#define RING_PORT_ID_SHIFT 3
+#define RING_PORT_ID_MASK 0x7
+#define RING_IGNORE_STATUS (1 << 6)
+#define RING_FAILOVER_EN (1 << 7)
+#define RING_CREDIT_SHIFT 8
+#define RING_CREDIT_MASK 0xffff
+
+#define RING_PCP_DEI_VID 0x18
+#define RING_VID_MASK 0x7ff
+#define RING_DEI (1 << 12)
+#define RING_PCP_SHIFT 13
+#define RING_PCP_MASK 0x7
+#define RING_PKT_SIZE_ADJ_SHIFT 16
+#define RING_PKT_SIZE_ADJ_MASK 0xf
+
+#define TDMA_DESC_RING_SIZE 28
+
+/* Defininition for a given TX ring base address */
+#define TDMA_DESC_RING_BASE(i) (TDMA_DESC_RING_00_BASE + \
+ ((i) * TDMA_DESC_RING_SIZE))
+
+/* Ring indexed register addreses */
+#define TDMA_DESC_RING_HEAD_TAIL_PTR(i) (TDMA_DESC_RING_BASE(i) + \
+ RING_HEAD_TAIL_PTR)
+#define TDMA_DESC_RING_COUNT(i) (TDMA_DESC_RING_BASE(i) + \
+ RING_COUNT)
+#define TDMA_DESC_RING_MAX_HYST(i) (TDMA_DESC_RING_BASE(i) + \
+ RING_MAX_HYST)
+#define TDMA_DESC_RING_INTR_CONTROL(i) (TDMA_DESC_RING_BASE(i) + \
+ RING_INTR_CONTROL)
+#define TDMA_DESC_RING_PROD_CONS_INDEX(i) \
+ (TDMA_DESC_RING_BASE(i) + \
+ RING_PROD_CONS_INDEX)
+#define TDMA_DESC_RING_MAPPING(i) (TDMA_DESC_RING_BASE(i) + \
+ RING_MAPPING)
+#define TDMA_DESC_RING_PCP_DEI_VID(i) (TDMA_DESC_RING_BASE(i) + \
+ RING_PCP_DEI_VID)
+
+#define TDMA_CONTROL 0x600
+#define TDMA_EN (1 << 0)
+#define TSB_EN (1 << 1)
+#define TSB_SWAP (1 << 2)
+#define ACB_ALGO (1 << 3)
+#define BUF_DATA_OFFSET_SHIFT 4
+#define BUF_DATA_OFFSET_MASK 0x3ff
+#define VLAN_EN (1 << 14)
+#define SW_BRCM_TAG (1 << 15)
+#define WNC_KPT_SIZE_UPDATE (1 << 16)
+#define SYNC_PKT_SIZE (1 << 17)
+#define ACH_TXDONE_DELAY_SHIFT 18
+#define ACH_TXDONE_DELAY_MASK 0xff
+
+#define TDMA_STATUS 0x604
+#define TDMA_DISABLED (1 << 0)
+#define TDMA_LL_RAM_INIT_BUSY (1 << 1)
+
+#define TDMA_SCB_BURST_SIZE 0x608
+#define TDMA_OVER_MAX_THRESH_STATUS 0x60c
+#define TDMA_OVER_HYST_THRESH_STATUS 0x610
+#define TDMA_TPID 0x614
+
+#define TDMA_FREE_LIST_HEAD_TAIL_PTR 0x618
+#define TDMA_FREE_HEAD_MASK 0x7ff
+#define TDMA_FREE_TAIL_SHIFT 11
+#define TDMA_FREE_TAIL_MASK 0x7ff
+
+#define TDMA_FREE_LIST_COUNT 0x61c
+#define TDMA_FREE_LIST_COUNT_MASK 0x7ff
+
+#define TDMA_TIER2_ARB_CTRL 0x620
+#define TDMA_ARB_MODE_RR 0
+#define TDMA_ARB_MODE_WEIGHT_RR 0x1
+#define TDMA_ARB_MODE_STRICT 0x2
+#define TDMA_ARB_MODE_DEFICIT_RR 0x3
+#define TDMA_CREDIT_SHIFT 4
+#define TDMA_CREDIT_MASK 0xffff
+
+#define TDMA_TIER1_ARB_0_CTRL 0x624
+#define TDMA_ARB_EN (1 << 0)
+
+#define TDMA_TIER1_ARB_0_QUEUE_EN 0x628
+#define TDMA_TIER1_ARB_1_CTRL 0x62c
+#define TDMA_TIER1_ARB_1_QUEUE_EN 0x630
+#define TDMA_TIER1_ARB_2_CTRL 0x634
+#define TDMA_TIER1_ARB_2_QUEUE_EN 0x638
+#define TDMA_TIER1_ARB_3_CTRL 0x63c
+#define TDMA_TIER1_ARB_3_QUEUE_EN 0x640
+
+#define TDMA_SCB_ENDIAN_OVERRIDE 0x644
+#define TDMA_LE_MODE (1 << 0)
+#define TDMA_REG_MODE (1 << 1)
+
+#define TDMA_TEST 0x648
+#define TDMA_TP_OUT_SEL (1 << 0)
+#define TDMA_MEM_TM (1 << 1)
+
+#define TDMA_DEBUG 0x64c
+
+/* Transmit/Receive descriptor */
+struct dma_desc {
+ u32 addr_status_len;
+ u32 addr_lo;
+};
+
+/* Number of Receive hardware descriptor words */
+#define NUM_HW_RX_DESC_WORDS 1024
+/* Real number of usable descriptors */
+#define NUM_RX_DESC (NUM_HW_RX_DESC_WORDS / WORDS_PER_DESC)
+
+/* Internal linked-list RAM has up to 1536 entries */
+#define NUM_TX_DESC 1536
+
+#define WORDS_PER_DESC (sizeof(struct dma_desc) / sizeof(u32))
+
+/* Rx/Tx common counter group.*/
+struct bcm_sysport_pkt_counters {
+ u32 cnt_64; /* RO Received/Transmited 64 bytes packet */
+ u32 cnt_127; /* RO Rx/Tx 127 bytes packet */
+ u32 cnt_255; /* RO Rx/Tx 65-255 bytes packet */
+ u32 cnt_511; /* RO Rx/Tx 256-511 bytes packet */
+ u32 cnt_1023; /* RO Rx/Tx 512-1023 bytes packet */
+ u32 cnt_1518; /* RO Rx/Tx 1024-1518 bytes packet */
+ u32 cnt_mgv; /* RO Rx/Tx 1519-1522 good VLAN packet */
+ u32 cnt_2047; /* RO Rx/Tx 1522-2047 bytes packet*/
+ u32 cnt_4095; /* RO Rx/Tx 2048-4095 bytes packet*/
+ u32 cnt_9216; /* RO Rx/Tx 4096-9216 bytes packet*/
+};
+
+/* RSV, Receive Status Vector */
+struct bcm_sysport_rx_counters {
+ struct bcm_sysport_pkt_counters pkt_cnt;
+ u32 pkt; /* RO (0x428) Received pkt count*/
+ u32 bytes; /* RO Received byte count */
+ u32 mca; /* RO # of Received multicast pkt */
+ u32 bca; /* RO # of Receive broadcast pkt */
+ u32 fcs; /* RO # of Received FCS error */
+ u32 cf; /* RO # of Received control frame pkt*/
+ u32 pf; /* RO # of Received pause frame pkt */
+ u32 uo; /* RO # of unknown op code pkt */
+ u32 aln; /* RO # of alignment error count */
+ u32 flr; /* RO # of frame length out of range count */
+ u32 cde; /* RO # of code error pkt */
+ u32 fcr; /* RO # of carrier sense error pkt */
+ u32 ovr; /* RO # of oversize pkt*/
+ u32 jbr; /* RO # of jabber count */
+ u32 mtue; /* RO # of MTU error pkt*/
+ u32 pok; /* RO # of Received good pkt */
+ u32 uc; /* RO # of unicast pkt */
+ u32 ppp; /* RO # of PPP pkt */
+ u32 rcrc; /* RO (0x470),# of CRC match pkt */
+};
+
+/* TSV, Transmit Status Vector */
+struct bcm_sysport_tx_counters {
+ struct bcm_sysport_pkt_counters pkt_cnt;
+ u32 pkts; /* RO (0x4a8) Transmited pkt */
+ u32 mca; /* RO # of xmited multicast pkt */
+ u32 bca; /* RO # of xmited broadcast pkt */
+ u32 pf; /* RO # of xmited pause frame count */
+ u32 cf; /* RO # of xmited control frame count */
+ u32 fcs; /* RO # of xmited FCS error count */
+ u32 ovr; /* RO # of xmited oversize pkt */
+ u32 drf; /* RO # of xmited deferral pkt */
+ u32 edf; /* RO # of xmited Excessive deferral pkt*/
+ u32 scl; /* RO # of xmited single collision pkt */
+ u32 mcl; /* RO # of xmited multiple collision pkt*/
+ u32 lcl; /* RO # of xmited late collision pkt */
+ u32 ecl; /* RO # of xmited excessive collision pkt*/
+ u32 frg; /* RO # of xmited fragments pkt*/
+ u32 ncl; /* RO # of xmited total collision count */
+ u32 jbr; /* RO # of xmited jabber count*/
+ u32 bytes; /* RO # of xmited byte count */
+ u32 pok; /* RO # of xmited good pkt */
+ u32 uc; /* RO (0x0x4f0)# of xmited unitcast pkt */
+};
+
+struct bcm_sysport_mib {
+ struct bcm_sysport_rx_counters rx;
+ struct bcm_sysport_tx_counters tx;
+ u32 rx_runt_cnt;
+ u32 rx_runt_fcs;
+ u32 rx_runt_fcs_align;
+ u32 rx_runt_bytes;
+ u32 rxchk_bad_csum;
+ u32 rxchk_other_pkt_disc;
+ u32 rbuf_ovflow_cnt;
+ u32 rbuf_err_cnt;
+};
+
+/* HW maintains a large list of counters */
+enum bcm_sysport_stat_type {
+ BCM_SYSPORT_STAT_NETDEV = -1,
+ BCM_SYSPORT_STAT_MIB_RX,
+ BCM_SYSPORT_STAT_MIB_TX,
+ BCM_SYSPORT_STAT_RUNT,
+ BCM_SYSPORT_STAT_RXCHK,
+ BCM_SYSPORT_STAT_RBUF,
+};
+
+/* Macros to help define ethtool statistics */
+#define STAT_NETDEV(m) { \
+ .stat_string = __stringify(m), \
+ .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \
+ .stat_offset = offsetof(struct net_device_stats, m), \
+ .type = BCM_SYSPORT_STAT_NETDEV, \
+}
+
+#define STAT_MIB(str, m, _type) { \
+ .stat_string = str, \
+ .stat_sizeof = sizeof(((struct bcm_sysport_priv *)0)->m), \
+ .stat_offset = offsetof(struct bcm_sysport_priv, m), \
+ .type = _type, \
+}
+
+#define STAT_MIB_RX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_RX)
+#define STAT_MIB_TX(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_MIB_TX)
+#define STAT_RUNT(str, m) STAT_MIB(str, m, BCM_SYSPORT_STAT_RUNT)
+
+#define STAT_RXCHK(str, m, ofs) { \
+ .stat_string = str, \
+ .stat_sizeof = sizeof(((struct bcm_sysport_priv *)0)->m), \
+ .stat_offset = offsetof(struct bcm_sysport_priv, m), \
+ .type = BCM_SYSPORT_STAT_RXCHK, \
+ .reg_offset = ofs, \
+}
+
+#define STAT_RBUF(str, m, ofs) { \
+ .stat_string = str, \
+ .stat_sizeof = sizeof(((struct bcm_sysport_priv *)0)->m), \
+ .stat_offset = offsetof(struct bcm_sysport_priv, m), \
+ .type = BCM_SYSPORT_STAT_RBUF, \
+ .reg_offset = ofs, \
+}
+
+struct bcm_sysport_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int stat_sizeof;
+ int stat_offset;
+ enum bcm_sysport_stat_type type;
+ /* reg offset from UMAC base for misc counters */
+ u16 reg_offset;
+};
+
+/* Software house keeping helper structure */
+struct bcm_sysport_cb {
+ struct sk_buff *skb; /* SKB for RX packets */
+ void __iomem *bd_addr; /* Buffer descriptor PHYS addr */
+
+ DEFINE_DMA_UNMAP_ADDR(dma_addr);
+ DEFINE_DMA_UNMAP_LEN(dma_len);
+};
+
+/* Software view of the TX ring */
+struct bcm_sysport_tx_ring {
+ spinlock_t lock; /* Ring lock for tx reclaim/xmit */
+ struct napi_struct napi; /* NAPI per tx queue */
+ dma_addr_t desc_dma; /* DMA cookie */
+ unsigned int index; /* Ring index */
+ unsigned int size; /* Ring current size */
+ unsigned int alloc_size; /* Ring one-time allocated size */
+ unsigned int desc_count; /* Number of descriptors */
+ unsigned int curr_desc; /* Current descriptor */
+ unsigned int c_index; /* Last consumer index */
+ unsigned int p_index; /* Current producer index */
+ struct bcm_sysport_cb *cbs; /* Transmit control blocks */
+ struct dma_desc *desc_cpu; /* CPU view of the descriptor */
+ struct bcm_sysport_priv *priv; /* private context backpointer */
+};
+
+/* Driver private structure */
+struct bcm_sysport_priv {
+ void __iomem *base;
+ u32 irq0_stat;
+ u32 irq0_mask;
+ u32 irq1_stat;
+ u32 irq1_mask;
+ struct napi_struct napi ____cacheline_aligned;
+ struct net_device *netdev;
+ struct platform_device *pdev;
+ int irq0;
+ int irq1;
+
+ /* Transmit rings */
+ struct bcm_sysport_tx_ring tx_rings[TDMA_NUM_RINGS];
+
+ /* Receive queue */
+ void __iomem *rx_bds;
+ void __iomem *rx_bd_assign_ptr;
+ unsigned int rx_bd_assign_index;
+ struct bcm_sysport_cb *rx_cbs;
+ unsigned int num_rx_bds;
+ unsigned int rx_read_ptr;
+ unsigned int rx_c_index;
+
+ /* PHY device */
+ struct device_node *phy_dn;
+ struct phy_device *phydev;
+ phy_interface_t phy_interface;
+ int old_pause;
+ int old_link;
+ int old_duplex;
+
+ /* Misc fields */
+ unsigned int rx_csum_en:1;
+ unsigned int tsb_en:1;
+ unsigned int crc_fwd:1;
+ u16 rev;
+
+ /* MIB related fields */
+ struct bcm_sysport_mib mib;
+
+ /* Ethtool */
+ u32 msg_enable;
+};
+#endif /* __BCM_SYSPORT_H */
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 0297a79..05c6af6 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1436,7 +1436,7 @@ static int bgmac_probe(struct bcma_device *core)
return -ENOMEM;
net_dev->netdev_ops = &bgmac_netdev_ops;
net_dev->irq = core->irq;
- SET_ETHTOOL_OPS(net_dev, &bgmac_ethtool_ops);
+ net_dev->ethtool_ops = &bgmac_ethtool_ops;
bgmac = netdev_priv(net_dev);
bgmac->net_dev = net_dev;
bgmac->core = core;
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 0ab8370..67d2b00 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -6916,8 +6916,8 @@ bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
}
}
else {
- ethtool_cmd_speed_set(cmd, -1);
- cmd->duplex = -1;
+ ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+ cmd->duplex = DUPLEX_UNKNOWN;
}
spin_unlock_bh(&bp->phy_lock);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 4d8f8ab..4cab09d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -6,7 +6,7 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Eliezer Tamir
* Based on code from Michael Chan's bnx2 driver
*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index dd57c7c..47c5814 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -6,7 +6,7 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Eliezer Tamir
* Based on code from Michael Chan's bnx2 driver
* UDP CSUM errata workaround by Arik Gendelman
@@ -906,6 +906,18 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
bd_prod = RX_BD(bd_prod);
bd_cons = RX_BD(bd_cons);
+ /* A rmb() is required to ensure that the CQE is not read
+ * before it is written by the adapter DMA. PCI ordering
+ * rules will make sure the other fields are written before
+ * the marker at the end of struct eth_fast_path_rx_cqe
+ * but without rmb() a weakly ordered processor can process
+ * stale data. Without the barrier TPA state-machine might
+ * enter inconsistent state and kernel stack might be
+ * provided with incorrect packet description - these lead
+ * to various kernel crashed.
+ */
+ rmb();
+
cqe_fp_flags = cqe_fp->type_error_flags;
cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 3448cc0..571427c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -6,7 +6,7 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Eliezer Tamir
* Based on code from Michael Chan's bnx2 driver
* UDP CSUM errata workaround by Arik Gendelman
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index 97ea542..51a952c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -12,7 +12,7 @@
* license other than the GPL, without Broadcom's express prior written
* consent.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Dmitry Kravkov
*
*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
index 804b8f6..c6939ec 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.h
@@ -12,7 +12,7 @@
* license other than the GPL, without Broadcom's express prior written
* consent.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Dmitry Kravkov
*
*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index b6de05e..bd0600c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -6,7 +6,7 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Eliezer Tamir
* Based on code from Michael Chan's bnx2 driver
* UDP CSUM errata workaround by Arik Gendelman
@@ -3316,7 +3316,7 @@ static u32 bnx2x_get_rxfh_indir_size(struct net_device *dev)
return T_ETH_INDIRECTION_TABLE_SIZE;
}
-static int bnx2x_get_rxfh_indir(struct net_device *dev, u32 *indir)
+static int bnx2x_get_rxfh(struct net_device *dev, u32 *indir, u8 *key)
{
struct bnx2x *bp = netdev_priv(dev);
u8 ind_table[T_ETH_INDIRECTION_TABLE_SIZE] = {0};
@@ -3340,14 +3340,15 @@ static int bnx2x_get_rxfh_indir(struct net_device *dev, u32 *indir)
return 0;
}
-static int bnx2x_set_rxfh_indir(struct net_device *dev, const u32 *indir)
+static int bnx2x_set_rxfh(struct net_device *dev, const u32 *indir,
+ const u8 *key)
{
struct bnx2x *bp = netdev_priv(dev);
size_t i;
for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) {
/*
- * The same as in bnx2x_get_rxfh_indir: we can't use a memcpy()
+ * The same as in bnx2x_get_rxfh: we can't use a memcpy()
* as an internal storage of an indirection table is a u8 array
* while indir->ring_index points to an array of u32.
*
@@ -3471,8 +3472,8 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
.get_rxnfc = bnx2x_get_rxnfc,
.set_rxnfc = bnx2x_set_rxnfc,
.get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
- .get_rxfh_indir = bnx2x_get_rxfh_indir,
- .set_rxfh_indir = bnx2x_set_rxfh_indir,
+ .get_rxfh = bnx2x_get_rxfh,
+ .set_rxfh = bnx2x_set_rxfh,
.get_channels = bnx2x_get_channels,
.set_channels = bnx2x_set_channels,
.get_module_info = bnx2x_get_module_info,
@@ -3498,16 +3499,14 @@ static const struct ethtool_ops bnx2x_vf_ethtool_ops = {
.get_rxnfc = bnx2x_get_rxnfc,
.set_rxnfc = bnx2x_set_rxnfc,
.get_rxfh_indir_size = bnx2x_get_rxfh_indir_size,
- .get_rxfh_indir = bnx2x_get_rxfh_indir,
- .set_rxfh_indir = bnx2x_set_rxfh_indir,
+ .get_rxfh = bnx2x_get_rxfh,
+ .set_rxfh = bnx2x_set_rxfh,
.get_channels = bnx2x_get_channels,
.set_channels = bnx2x_set_channels,
};
void bnx2x_set_ethtool_ops(struct bnx2x *bp, struct net_device *netdev)
{
- if (IS_PF(bp))
- SET_ETHTOOL_OPS(netdev, &bnx2x_ethtool_ops);
- else /* vf */
- SET_ETHTOOL_OPS(netdev, &bnx2x_vf_ethtool_ops);
+ netdev->ethtool_ops = (IS_PF(bp)) ?
+ &bnx2x_ethtool_ops : &bnx2x_vf_ethtool_ops;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
index f572ae1..8aafd9b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_file_hdr.h
@@ -6,8 +6,8 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
- * Written by: Vladislav Zolotarov <vladz@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Vladislav Zolotarov
* Based on the original idea of John Wright <john.wright@hp.com>.
*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
index c2dfea7..bd90e50 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init.h
@@ -7,9 +7,9 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Eliezer Tamir
- * Modified by: Vladislav Zolotarov <vladz@broadcom.com>
+ * Modified by: Vladislav Zolotarov
*/
#ifndef BNX2X_INIT_H
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
index 8ab0dd9..5669ed2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_init_ops.h
@@ -8,8 +8,8 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
- * Written by: Vladislav Zolotarov <vladz@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Vladislav Zolotarov
*/
#ifndef BNX2X_INIT_OPS_H
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 9b6b3d7..53fb4fa 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -2218,7 +2218,6 @@ int bnx2x_update_pfc(struct link_params *params,
*/
u32 val;
struct bnx2x *bp = params->bp;
- int bnx2x_status = 0;
u8 bmac_loopback = (params->loopback_mode == LOOPBACK_BMAC);
if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
@@ -2232,7 +2231,7 @@ int bnx2x_update_pfc(struct link_params *params,
bnx2x_update_pfc_nig(params, vars, pfc_params);
if (!vars->link_up)
- return bnx2x_status;
+ return 0;
DP(NETIF_MSG_LINK, "About to update PFC in BMAC\n");
@@ -2246,7 +2245,7 @@ int bnx2x_update_pfc(struct link_params *params,
== 0) {
DP(NETIF_MSG_LINK, "About to update PFC in EMAC\n");
bnx2x_emac_enable(params, vars, 0);
- return bnx2x_status;
+ return 0;
}
if (CHIP_IS_E2(bp))
bnx2x_update_pfc_bmac2(params, vars, bmac_loopback);
@@ -2260,7 +2259,7 @@ int bnx2x_update_pfc(struct link_params *params,
val = 1;
REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + params->port*4, val);
}
- return bnx2x_status;
+ return 0;
}
static int bnx2x_bmac1_enable(struct link_params *params,
@@ -3703,7 +3702,8 @@ static void bnx2x_warpcore_restart_AN_KR(struct bnx2x_phy *phy,
static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
struct link_params *params,
struct link_vars *vars) {
- u16 lane, i, cl72_ctrl, an_adv = 0;
+ u16 lane, i, cl72_ctrl, an_adv = 0, val;
+ u32 wc_lane_config;
struct bnx2x *bp = params->bp;
static struct bnx2x_reg_set reg_set[] = {
{MDIO_WC_DEVAD, MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X2, 0x7},
@@ -3822,15 +3822,27 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
/* Enable Auto-Detect to support 1G over CL37 as well */
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0x10);
-
+ wc_lane_config = REG_RD(bp, params->shmem_base +
+ offsetof(struct shmem_region, dev_info.
+ shared_hw_config.wc_lane_config));
+ bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RX0_PCI_CTRL + (lane << 4), &val);
/* Force cl48 sync_status LOW to avoid getting stuck in CL73
* parallel-detect loop when CL73 and CL37 are enabled.
*/
- CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
- MDIO_AER_BLOCK_AER_REG, 0);
+ val |= 1 << 11;
+
+ /* Restore Polarity settings in case it was run over by
+ * previous link owner
+ */
+ if (wc_lane_config &
+ (SHARED_HW_CFG_RX_LANE0_POL_FLIP_ENABLED << lane))
+ val |= 3 << 2;
+ else
+ val &= ~(3 << 2);
bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
- MDIO_WC_REG_RXB_ANA_RX_CONTROL_PCI, 0x0800);
- bnx2x_set_aer_mmd(params, phy);
+ MDIO_WC_REG_RX0_PCI_CTRL + (lane << 4),
+ val);
bnx2x_disable_kr2(params, vars, phy);
}
@@ -6473,7 +6485,6 @@ int bnx2x_test_link(struct link_params *params, struct link_vars *vars,
static int bnx2x_link_initialize(struct link_params *params,
struct link_vars *vars)
{
- int rc = 0;
u8 phy_index, non_ext_phy;
struct bnx2x *bp = params->bp;
/* In case of external phy existence, the line speed would be the
@@ -6546,7 +6557,7 @@ static int bnx2x_link_initialize(struct link_params *params,
NIG_STATUS_XGXS0_LINK_STATUS |
NIG_STATUS_SERDES0_LINK_STATUS |
NIG_MASK_MI_INT));
- return rc;
+ return 0;
}
static void bnx2x_int_link_reset(struct bnx2x_phy *phy,
@@ -12461,6 +12472,7 @@ static int bnx2x_avoid_link_flap(struct link_params *params,
u32 dont_clear_stat, lfa_sts;
struct bnx2x *bp = params->bp;
+ bnx2x_set_mdio_emac_per_phy(bp, params);
/* Sync the link parameters */
bnx2x_link_status_update(params, vars);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 3a8e51e..2887034 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -6,7 +6,7 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Eliezer Tamir
* Based on code from Michael Chan's bnx2 driver
* UDP CSUM errata workaround by Arik Gendelman
@@ -10053,6 +10053,24 @@ static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
#define BCM_5710_UNDI_FW_MF_VERS (0x05)
#define BNX2X_PREV_UNDI_MF_PORT(p) (BAR_TSTRORM_INTMEM + 0x150c + ((p) << 4))
#define BNX2X_PREV_UNDI_MF_FUNC(f) (BAR_TSTRORM_INTMEM + 0x184c + ((f) << 4))
+
+static bool bnx2x_prev_is_after_undi(struct bnx2x *bp)
+{
+ /* UNDI marks its presence in DORQ -
+ * it initializes CID offset for normal bell to 0x7
+ */
+ if (!(REG_RD(bp, MISC_REG_RESET_REG_1) &
+ MISC_REGISTERS_RESET_REG_1_RST_DORQ))
+ return false;
+
+ if (REG_RD(bp, DORQ_REG_NORM_CID_OFST) == 0x7) {
+ BNX2X_DEV_INFO("UNDI previously loaded\n");
+ return true;
+ }
+
+ return false;
+}
+
static bool bnx2x_prev_unload_undi_fw_supports_mf(struct bnx2x *bp)
{
u8 major, minor, version;
@@ -10302,6 +10320,10 @@ static int bnx2x_prev_unload_uncommon(struct bnx2x *bp)
BNX2X_DEV_INFO("Path is unmarked\n");
+ /* Cannot proceed with FLR if UNDI is loaded, since FW does not match */
+ if (bnx2x_prev_is_after_undi(bp))
+ goto out;
+
/* If function has FLR capabilities, and existing FW version matches
* the one required, then FLR will be sufficient to clean any residue
* left by previous driver
@@ -10322,6 +10344,7 @@ static int bnx2x_prev_unload_uncommon(struct bnx2x *bp)
BNX2X_DEV_INFO("Could not FLR\n");
+out:
/* Close the MCP request, return failure*/
rc = bnx2x_prev_mcp_done(bp);
if (!rc)
@@ -10360,19 +10383,13 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
/* close LLH filters towards the BRB */
bnx2x_set_rx_filter(&bp->link_params, 0);
- /* Check if the UNDI driver was previously loaded
- * UNDI driver initializes CID offset for normal bell to 0x7
- */
- if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) {
- tmp_reg = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
- if (tmp_reg == 0x7) {
- BNX2X_DEV_INFO("UNDI previously loaded\n");
- prev_undi = true;
- /* clear the UNDI indication */
- REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
- /* clear possible idle check errors */
- REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0);
- }
+ /* Check if the UNDI driver was previously loaded */
+ if (bnx2x_prev_is_after_undi(bp)) {
+ prev_undi = true;
+ /* clear the UNDI indication */
+ REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
+ /* clear possible idle check errors */
+ REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0);
}
if (!CHIP_IS_E1x(bp))
/* block FW from writing to host */
@@ -13283,8 +13300,8 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
netdev_reset_tc(bp->dev);
del_timer_sync(&bp->timer);
- cancel_delayed_work(&bp->sp_task);
- cancel_delayed_work(&bp->period_task);
+ cancel_delayed_work_sync(&bp->sp_task);
+ cancel_delayed_work_sync(&bp->period_task);
spin_lock_bh(&bp->stats_lock);
bp->stats_state = STATS_STATE_DISABLED;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index d725317c..b193604 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -12,7 +12,7 @@
* license other than the GPL, without Broadcom's express prior written
* consent.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Vladislav Zolotarov
*
*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 80f6c79..718ecd2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -12,7 +12,7 @@
* license other than the GPL, without Broadcom's express prior written
* consent.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Vladislav Zolotarov
*
*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index faf0148..eda8583 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -12,9 +12,9 @@
* license other than the GPL, without Broadcom's express prior written
* consent.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
- * Written by: Shmulik Ravid <shmulikr@broadcom.com>
- * Ariel Elior <ariele@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Shmulik Ravid
+ * Ariel Elior <ariel.elior@qlogic.com>
*
*/
#include "bnx2x.h"
@@ -1071,8 +1071,10 @@ void bnx2x_iov_init_dq(struct bnx2x *bp)
REG_WR(bp, DORQ_REG_VF_TYPE_MIN_MCID_0, 0);
REG_WR(bp, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff);
- /* set the VF doorbell threshold */
- REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 4);
+ /* set the VF doorbell threshold. This threshold represents the amount
+ * of doorbells allowed in the main DORQ fifo for a specific VF.
+ */
+ REG_WR(bp, DORQ_REG_VF_USAGE_CT_LIMIT, 64);
}
void bnx2x_iov_init_dmae(struct bnx2x *bp)
@@ -2576,7 +2578,8 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
ivi->vf = vfidx;
ivi->qos = 0;
- ivi->tx_rate = 10000; /* always 10G. TBA take from link struct */
+ ivi->max_tx_rate = 10000; /* always 10G. TBA take from link struct */
+ ivi->min_tx_rate = 0;
ivi->spoofchk = 1; /*always enabled */
if (vf->state == VF_ENABLED) {
/* mac and vlan are in vlan_mac objects */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 6929adb..96c575e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -12,9 +12,9 @@
* license other than the GPL, without Broadcom's express prior written
* consent.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
- * Written by: Shmulik Ravid <shmulikr@broadcom.com>
- * Ariel Elior <ariele@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Shmulik Ravid
+ * Ariel Elior <ariel.elior@qlogic.com>
*/
#ifndef BNX2X_SRIOV_H
#define BNX2X_SRIOV_H
@@ -571,7 +571,7 @@ static inline void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
return NULL;
}
-static inline void bnx2x_vf_pci_dealloc(struct bnx2 *bp) {return 0; }
+static inline void bnx2x_vf_pci_dealloc(struct bnx2x *bp) {}
static inline int bnx2x_vf_pci_alloc(struct bnx2x *bp) {return 0; }
static inline void bnx2x_pf_set_vfs_vlan(struct bnx2x *bp) {}
static inline int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs) {return 0; }
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
index 3b75070..ca47665 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
@@ -6,7 +6,7 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Eliezer Tamir
* Based on code from Michael Chan's bnx2 driver
* UDP CSUM errata workaround by Arik Gendelman
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
index f358450..2beceae 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.h
@@ -6,7 +6,7 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
* Written by: Eliezer Tamir
* Based on code from Michael Chan's bnx2 driver
* UDP CSUM errata workaround by Arik Gendelman
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 784c715..d712d0d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -12,9 +12,9 @@
* license other than the GPL, without Broadcom's express prior written
* consent.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
- * Written by: Shmulik Ravid <shmulikr@broadcom.com>
- * Ariel Elior <ariele@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Shmulik Ravid
+ * Ariel Elior <ariel.elior@qlogic.com>
*/
#include "bnx2x.h"
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
index c922b81..e21e706 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
@@ -12,8 +12,8 @@
* license other than the GPL, without Broadcom's express prior written
* consent.
*
- * Maintained by: Eilon Greenstein <eilong@broadcom.com>
- * Written by: Ariel Elior <ariele@broadcom.com>
+ * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
+ * Written by: Ariel Elior <ariel.elior@qlogic.com>
*/
#ifndef VF_PF_IF_H
#define VF_PF_IF_H
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 4dd48d2..8244e2b 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -608,6 +608,10 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
pr_err("%s: Bad type %d\n", __func__, ulp_type);
return -EINVAL;
}
+
+ if (ulp_type == CNIC_ULP_ISCSI)
+ cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
+
mutex_lock(&cnic_lock);
if (rcu_dereference(cp->ulp_ops[ulp_type])) {
RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL);
@@ -620,9 +624,7 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
}
mutex_unlock(&cnic_lock);
- if (ulp_type == CNIC_ULP_ISCSI)
- cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
- else if (ulp_type == CNIC_ULP_FCOE)
+ if (ulp_type == CNIC_ULP_FCOE)
dev->fcoe_cap = NULL;
synchronize_rcu();
@@ -1039,21 +1041,17 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
struct cnic_local *cp = dev->cnic_priv;
struct cnic_uio_dev *udev;
- read_lock(&cnic_dev_lock);
list_for_each_entry(udev, &cnic_udev_list, list) {
if (udev->pdev == dev->pcidev) {
udev->dev = dev;
if (__cnic_alloc_uio_rings(udev, pages)) {
udev->dev = NULL;
- read_unlock(&cnic_dev_lock);
return -ENOMEM;
}
cp->udev = udev;
- read_unlock(&cnic_dev_lock);
return 0;
}
}
- read_unlock(&cnic_dev_lock);
udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC);
if (!udev)
@@ -1067,9 +1065,7 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
if (__cnic_alloc_uio_rings(udev, pages))
goto err_udev;
- write_lock(&cnic_dev_lock);
list_add(&udev->list, &cnic_udev_list);
- write_unlock(&cnic_dev_lock);
pci_dev_get(udev->pdev);
@@ -5624,20 +5620,27 @@ static void cnic_rcv_netevent(struct cnic_local *cp, unsigned long event,
{
int if_type;
- rcu_read_lock();
for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
struct cnic_ulp_ops *ulp_ops;
void *ctx;
- ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
- if (!ulp_ops || !ulp_ops->indicate_netevent)
+ mutex_lock(&cnic_lock);
+ ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
+ lockdep_is_held(&cnic_lock));
+ if (!ulp_ops || !ulp_ops->indicate_netevent) {
+ mutex_unlock(&cnic_lock);
continue;
+ }
ctx = cp->ulp_handle[if_type];
+ set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
+ mutex_unlock(&cnic_lock);
+
ulp_ops->indicate_netevent(ctx, event, vlan_id);
+
+ clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
}
- rcu_read_unlock();
}
/* netdev event handler */
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 0966bd0..5ba1cfb 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2481,7 +2481,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
dev_set_drvdata(&pdev->dev, dev);
ether_addr_copy(dev->dev_addr, macaddr);
dev->watchdog_timeo = 2 * HZ;
- SET_ETHTOOL_OPS(dev, &bcmgenet_ethtool_ops);
+ dev->ethtool_ops = &bcmgenet_ethtool_ops;
dev->netdev_ops = &bcmgenet_netdev_ops;
netif_napi_add(dev, &priv->napi, bcmgenet_poll, 64);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 4608673..add8d85 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -298,6 +298,7 @@ int bcmgenet_mii_config(struct net_device *dev)
static int bcmgenet_mii_probe(struct net_device *dev)
{
struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct device_node *dn = priv->pdev->dev.of_node;
struct phy_device *phydev;
unsigned int phy_flags;
int ret;
@@ -307,15 +308,19 @@ static int bcmgenet_mii_probe(struct net_device *dev)
return 0;
}
- if (priv->phy_dn)
- phydev = of_phy_connect(dev, priv->phy_dn,
- bcmgenet_mii_setup, 0,
- priv->phy_interface);
- else
- phydev = of_phy_connect_fixed_link(dev,
- bcmgenet_mii_setup,
- priv->phy_interface);
+ /* In the case of a fixed PHY, the DT node associated
+ * to the PHY is the Ethernet MAC DT node.
+ */
+ if (of_phy_is_fixed_link(dn)) {
+ ret = of_phy_register_fixed_link(dn);
+ if (ret)
+ return ret;
+
+ priv->phy_dn = dn;
+ }
+ phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup, 0,
+ priv->phy_interface);
if (!phydev) {
pr_err("could not attach to PHY\n");
return -ENODEV;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index e5d95c5..df2792d 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -4,7 +4,7 @@
* Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
* Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
* Copyright (C) 2004 Sun Microsystems Inc.
- * Copyright (C) 2005-2013 Broadcom Corporation.
+ * Copyright (C) 2005-2014 Broadcom Corporation.
*
* Firmware is:
* Derived from proprietary unpublished source code,
@@ -94,10 +94,10 @@ static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
#define DRV_MODULE_NAME "tg3"
#define TG3_MAJ_NUM 3
-#define TG3_MIN_NUM 136
+#define TG3_MIN_NUM 137
#define DRV_MODULE_VERSION \
__stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
-#define DRV_MODULE_RELDATE "Jan 03, 2014"
+#define DRV_MODULE_RELDATE "May 11, 2014"
#define RESET_KIND_SHUTDOWN 0
#define RESET_KIND_INIT 1
@@ -3224,7 +3224,7 @@ static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
return 0;
}
-#define NVRAM_CMD_TIMEOUT 10000
+#define NVRAM_CMD_TIMEOUT 100
static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
{
@@ -7871,9 +7871,7 @@ tg3_tso_bug_end:
return NETDEV_TX_OK;
}
-/* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
- * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
- */
+/* hard_start_xmit for all devices */
static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct tg3 *tp = netdev_priv(dev);
@@ -7884,6 +7882,10 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
struct tg3_napi *tnapi;
struct netdev_queue *txq;
unsigned int last;
+ struct iphdr *iph = NULL;
+ struct tcphdr *tcph = NULL;
+ __sum16 tcp_csum = 0, ip_csum = 0;
+ __be16 ip_tot_len = 0;
txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
tnapi = &tp->napi[skb_get_queue_mapping(skb)];
@@ -7915,7 +7917,6 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
mss = skb_shinfo(skb)->gso_size;
if (mss) {
- struct iphdr *iph;
u32 tcp_opt_len, hdr_len;
if (skb_cow_head(skb, 0))
@@ -7927,27 +7928,31 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
if (!skb_is_gso_v6(skb)) {
+ if (unlikely((ETH_HLEN + hdr_len) > 80) &&
+ tg3_flag(tp, TSO_BUG))
+ return tg3_tso_bug(tp, skb);
+
+ ip_csum = iph->check;
+ ip_tot_len = iph->tot_len;
iph->check = 0;
iph->tot_len = htons(mss + hdr_len);
}
- if (unlikely((ETH_HLEN + hdr_len) > 80) &&
- tg3_flag(tp, TSO_BUG))
- return tg3_tso_bug(tp, skb);
-
base_flags |= (TXD_FLAG_CPU_PRE_DMA |
TXD_FLAG_CPU_POST_DMA);
+ tcph = tcp_hdr(skb);
+ tcp_csum = tcph->check;
+
if (tg3_flag(tp, HW_TSO_1) ||
tg3_flag(tp, HW_TSO_2) ||
tg3_flag(tp, HW_TSO_3)) {
- tcp_hdr(skb)->check = 0;
+ tcph->check = 0;
base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
- } else
- tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
- iph->daddr, 0,
- IPPROTO_TCP,
- 0);
+ } else {
+ tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+ 0, IPPROTO_TCP, 0);
+ }
if (tg3_flag(tp, HW_TSO_3)) {
mss |= (hdr_len & 0xc) << 12;
@@ -8047,6 +8052,18 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (would_hit_hwbug) {
tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
+ if (mss) {
+ /* If it's a TSO packet, do GSO instead of
+ * allocating and copying to a large linear SKB
+ */
+ if (ip_tot_len) {
+ iph->check = ip_csum;
+ iph->tot_len = ip_tot_len;
+ }
+ tcph->check = tcp_csum;
+ return tg3_tso_bug(tp, skb);
+ }
+
/* If the workaround fails due to memory/mapping
* failure, silently drop this packet.
*/
@@ -11876,9 +11893,9 @@ static int tg3_get_eeprom_len(struct net_device *dev)
static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
{
struct tg3 *tp = netdev_priv(dev);
- int ret;
+ int ret, cpmu_restore = 0;
u8 *pd;
- u32 i, offset, len, b_offset, b_count;
+ u32 i, offset, len, b_offset, b_count, cpmu_val = 0;
__be32 val;
if (tg3_flag(tp, NO_NVRAM))
@@ -11890,6 +11907,19 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
eeprom->magic = TG3_EEPROM_MAGIC;
+ /* Override clock, link aware and link idle modes */
+ if (tg3_flag(tp, CPMU_PRESENT)) {
+ cpmu_val = tr32(TG3_CPMU_CTRL);
+ if (cpmu_val & (CPMU_CTRL_LINK_AWARE_MODE |
+ CPMU_CTRL_LINK_IDLE_MODE)) {
+ tw32(TG3_CPMU_CTRL, cpmu_val &
+ ~(CPMU_CTRL_LINK_AWARE_MODE |
+ CPMU_CTRL_LINK_IDLE_MODE));
+ cpmu_restore = 1;
+ }
+ }
+ tg3_override_clk(tp);
+
if (offset & 3) {
/* adjustments to start on required 4 byte boundary */
b_offset = offset & 3;
@@ -11900,7 +11930,7 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
}
ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
if (ret)
- return ret;
+ goto eeprom_done;
memcpy(data, ((char *)&val) + b_offset, b_count);
len -= b_count;
offset += b_count;
@@ -11912,10 +11942,20 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
for (i = 0; i < (len - (len & 3)); i += 4) {
ret = tg3_nvram_read_be32(tp, offset + i, &val);
if (ret) {
+ if (i)
+ i -= 4;
eeprom->len += i;
- return ret;
+ goto eeprom_done;
}
memcpy(pd + i, &val, 4);
+ if (need_resched()) {
+ if (signal_pending(current)) {
+ eeprom->len += i;
+ ret = -EINTR;
+ goto eeprom_done;
+ }
+ cond_resched();
+ }
}
eeprom->len += i;
@@ -11926,11 +11966,19 @@ static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
b_offset = offset + len - b_count;
ret = tg3_nvram_read_be32(tp, b_offset, &val);
if (ret)
- return ret;
+ goto eeprom_done;
memcpy(pd, &val, b_count);
eeprom->len += b_count;
}
- return 0;
+ ret = 0;
+
+eeprom_done:
+ /* Restore clock, link aware and link idle modes */
+ tg3_restore_clk(tp);
+ if (cpmu_restore)
+ tw32(TG3_CPMU_CTRL, cpmu_val);
+
+ return ret;
}
static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
@@ -12484,7 +12532,7 @@ static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
return size;
}
-static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
+static int tg3_get_rxfh(struct net_device *dev, u32 *indir, u8 *key)
{
struct tg3 *tp = netdev_priv(dev);
int i;
@@ -12495,7 +12543,7 @@ static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
return 0;
}
-static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
+static int tg3_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key)
{
struct tg3 *tp = netdev_priv(dev);
size_t i;
@@ -14027,8 +14075,8 @@ static const struct ethtool_ops tg3_ethtool_ops = {
.get_sset_count = tg3_get_sset_count,
.get_rxnfc = tg3_get_rxnfc,
.get_rxfh_indir_size = tg3_get_rxfh_indir_size,
- .get_rxfh_indir = tg3_get_rxfh_indir,
- .set_rxfh_indir = tg3_set_rxfh_indir,
+ .get_rxfh = tg3_get_rxfh,
+ .set_rxfh = tg3_set_rxfh,
.get_channels = tg3_get_channels,
.set_channels = tg3_set_channels,
.get_ts_info = tg3_get_ts_info,
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 04321e5..461acca 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -4,7 +4,7 @@
* Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
* Copyright (C) 2001 Jeff Garzik (jgarzik@pobox.com)
* Copyright (C) 2004 Sun Microsystems Inc.
- * Copyright (C) 2007-2013 Broadcom Corporation.
+ * Copyright (C) 2007-2014 Broadcom Corporation.
*/
#ifndef _T3_H
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index f9e1508..882cad7 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -266,8 +266,8 @@ bnad_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
ethtool_cmd_speed_set(cmd, SPEED_10000);
cmd->duplex = DUPLEX_FULL;
} else {
- ethtool_cmd_speed_set(cmd, -1);
- cmd->duplex = -1;
+ ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+ cmd->duplex = DUPLEX_UNKNOWN;
}
cmd->transceiver = XCVR_EXTERNAL;
cmd->maxtxpkt = 0;
@@ -1137,5 +1137,5 @@ static const struct ethtool_ops bnad_ethtool_ops = {
void
bnad_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &bnad_ethtool_ops);
+ netdev->ethtool_ops = &bnad_ethtool_ops;
}
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index 521dfea..25d6b2a 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -1737,7 +1737,7 @@ static int xgmac_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ndev);
ether_setup(ndev);
ndev->netdev_ops = &xgmac_netdev_ops;
- SET_ETHTOOL_OPS(ndev, &xgmac_ethtool_ops);
+ ndev->ethtool_ops = &xgmac_ethtool_ops;
spin_lock_init(&priv->stats_lock);
INIT_WORK(&priv->tx_timeout_work, xgmac_tx_timeout_work);
diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
index 05613a8..186566b 100644
--- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
+++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c
@@ -580,8 +580,8 @@ static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
ethtool_cmd_speed_set(cmd, p->link_config.speed);
cmd->duplex = p->link_config.duplex;
} else {
- ethtool_cmd_speed_set(cmd, -1);
- cmd->duplex = -1;
+ ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+ cmd->duplex = DUPLEX_UNKNOWN;
}
cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
@@ -1100,7 +1100,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netif_napi_add(netdev, &adapter->napi, t1_poll, 64);
- SET_ETHTOOL_OPS(netdev, &t1_ethtool_ops);
+ netdev->ethtool_ops = &t1_ethtool_ops;
}
if (t1_init_sw_modules(adapter, bi) < 0) {
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 07bbb71..5d9cce0 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -1809,8 +1809,8 @@ static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
ethtool_cmd_speed_set(cmd, p->link_config.speed);
cmd->duplex = p->link_config.duplex;
} else {
- ethtool_cmd_speed_set(cmd, -1);
- cmd->duplex = -1;
+ ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+ cmd->duplex = DUPLEX_UNKNOWN;
}
cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
@@ -3291,7 +3291,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->features |= NETIF_F_HIGHDMA;
netdev->netdev_ops = &cxgb_netdev_ops;
- SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
+ netdev->ethtool_ops = &cxgb_ethtool_ops;
}
pci_set_drvdata(pdev, adapter);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
index c0a9dd5..b0cbb2b 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c
@@ -185,7 +185,7 @@ static struct net_device *get_iff_from_mac(struct adapter *adapter,
if (ether_addr_equal(dev->dev_addr, mac)) {
rcu_read_lock();
if (vlan && vlan != VLAN_VID_MASK) {
- dev = __vlan_find_dev_deep(dev, htons(ETH_P_8021Q), vlan);
+ dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), vlan);
} else if (netif_is_bond_slave(dev)) {
struct net_device *upper_dev;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 32db377..f503dce 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -357,11 +357,17 @@ enum {
MAX_OFLD_QSETS = 16, /* # of offload Tx/Rx queue sets */
MAX_CTRL_QUEUES = NCHAN, /* # of control Tx queues */
MAX_RDMA_QUEUES = NCHAN, /* # of streaming RDMA Rx queues */
+ MAX_RDMA_CIQS = NCHAN, /* # of RDMA concentrator IQs */
+ MAX_ISCSI_QUEUES = NCHAN, /* # of streaming iSCSI Rx queues */
};
enum {
- MAX_EGRQ = 128, /* max # of egress queues, including FLs */
- MAX_INGQ = 64 /* max # of interrupt-capable ingress queues */
+ INGQ_EXTRAS = 2, /* firmware event queue and */
+ /* forwarded interrupts */
+ MAX_EGRQ = MAX_ETH_QSETS*2 + MAX_OFLD_QSETS*2
+ + MAX_CTRL_QUEUES + MAX_RDMA_QUEUES + MAX_ISCSI_QUEUES,
+ MAX_INGQ = MAX_ETH_QSETS + MAX_OFLD_QSETS + MAX_RDMA_QUEUES
+ + MAX_RDMA_CIQS + MAX_ISCSI_QUEUES + INGQ_EXTRAS,
};
struct adapter;
@@ -538,6 +544,7 @@ struct sge {
struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
struct sge_ofld_rxq ofldrxq[MAX_OFLD_QSETS];
struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES];
+ struct sge_ofld_rxq rdmaciq[MAX_RDMA_CIQS];
struct sge_rspq fw_evtq ____cacheline_aligned_in_smp;
struct sge_rspq intrq ____cacheline_aligned_in_smp;
@@ -548,8 +555,10 @@ struct sge {
u16 ethtxq_rover; /* Tx queue to clean up next */
u16 ofldqsets; /* # of active offload queue sets */
u16 rdmaqs; /* # of available RDMA Rx queues */
+ u16 rdmaciqs; /* # of available RDMA concentrator IQs */
u16 ofld_rxq[MAX_OFLD_QSETS];
u16 rdma_rxq[NCHAN];
+ u16 rdma_ciq[NCHAN];
u16 timer_val[SGE_NTIMERS];
u8 counter_val[SGE_NCOUNTERS];
u32 fl_pg_order; /* large page allocation size */
@@ -577,6 +586,7 @@ struct sge {
#define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++)
#define for_each_ofldrxq(sge, i) for (i = 0; i < (sge)->ofldqsets; i++)
#define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++)
+#define for_each_rdmaciq(sge, i) for (i = 0; i < (sge)->rdmaciqs; i++)
struct l2t_data;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 24e16e3..2f8d6b9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -818,12 +818,17 @@ static void name_msix_vecs(struct adapter *adap)
for_each_rdmarxq(&adap->sge, i)
snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
adap->port[0]->name, i);
+
+ for_each_rdmaciq(&adap->sge, i)
+ snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma-ciq%d",
+ adap->port[0]->name, i);
}
static int request_msix_queue_irqs(struct adapter *adap)
{
struct sge *s = &adap->sge;
- int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi_index = 2;
+ int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, rdmaciqqidx = 0;
+ int msi_index = 2;
err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
adap->msix_info[1].desc, &s->fw_evtq);
@@ -857,9 +862,21 @@ static int request_msix_queue_irqs(struct adapter *adap)
goto unwind;
msi_index++;
}
+ for_each_rdmaciq(s, rdmaciqqidx) {
+ err = request_irq(adap->msix_info[msi_index].vec,
+ t4_sge_intr_msix, 0,
+ adap->msix_info[msi_index].desc,
+ &s->rdmaciq[rdmaciqqidx].rspq);
+ if (err)
+ goto unwind;
+ msi_index++;
+ }
return 0;
unwind:
+ while (--rdmaciqqidx >= 0)
+ free_irq(adap->msix_info[--msi_index].vec,
+ &s->rdmaciq[rdmaciqqidx].rspq);
while (--rdmaqidx >= 0)
free_irq(adap->msix_info[--msi_index].vec,
&s->rdmarxq[rdmaqidx].rspq);
@@ -885,6 +902,8 @@ static void free_msix_queue_irqs(struct adapter *adap)
free_irq(adap->msix_info[msi_index++].vec, &s->ofldrxq[i].rspq);
for_each_rdmarxq(s, i)
free_irq(adap->msix_info[msi_index++].vec, &s->rdmarxq[i].rspq);
+ for_each_rdmaciq(s, i)
+ free_irq(adap->msix_info[msi_index++].vec, &s->rdmaciq[i].rspq);
}
/**
@@ -1047,7 +1066,8 @@ freeout: t4_free_sge_resources(adap);
if (msi_idx > 0)
msi_idx++;
err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
- &q->fl, uldrx_handler);
+ q->fl.size ? &q->fl : NULL,
+ uldrx_handler);
if (err)
goto freeout;
memset(&q->stats, 0, sizeof(q->stats));
@@ -1064,13 +1084,28 @@ freeout: t4_free_sge_resources(adap);
if (msi_idx > 0)
msi_idx++;
err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
- msi_idx, &q->fl, uldrx_handler);
+ msi_idx, q->fl.size ? &q->fl : NULL,
+ uldrx_handler);
if (err)
goto freeout;
memset(&q->stats, 0, sizeof(q->stats));
s->rdma_rxq[i] = q->rspq.abs_id;
}
+ for_each_rdmaciq(s, i) {
+ struct sge_ofld_rxq *q = &s->rdmaciq[i];
+
+ if (msi_idx > 0)
+ msi_idx++;
+ err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
+ msi_idx, q->fl.size ? &q->fl : NULL,
+ uldrx_handler);
+ if (err)
+ goto freeout;
+ memset(&q->stats, 0, sizeof(q->stats));
+ s->rdma_ciq[i] = q->rspq.abs_id;
+ }
+
for_each_port(adap, i) {
/*
* Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
@@ -2252,12 +2287,19 @@ static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
p->port_type == FW_PORT_TYPE_FIBER_XAUI)
cmd->port = PORT_FIBRE;
- else if (p->port_type == FW_PORT_TYPE_SFP) {
- if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
- p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
+ else if (p->port_type == FW_PORT_TYPE_SFP ||
+ p->port_type == FW_PORT_TYPE_QSFP_10G ||
+ p->port_type == FW_PORT_TYPE_QSFP) {
+ if (p->mod_type == FW_PORT_MOD_TYPE_LR ||
+ p->mod_type == FW_PORT_MOD_TYPE_SR ||
+ p->mod_type == FW_PORT_MOD_TYPE_ER ||
+ p->mod_type == FW_PORT_MOD_TYPE_LRM)
+ cmd->port = PORT_FIBRE;
+ else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
+ p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
cmd->port = PORT_DA;
else
- cmd->port = PORT_FIBRE;
+ cmd->port = PORT_OTHER;
} else
cmd->port = PORT_OTHER;
@@ -2461,8 +2503,7 @@ static unsigned int qtimer_val(const struct adapter *adap,
}
/**
- * set_rxq_intr_params - set a queue's interrupt holdoff parameters
- * @adap: the adapter
+ * set_rspq_intr_params - set a queue's interrupt holdoff parameters
* @q: the Rx queue
* @us: the hold-off time in us, or 0 to disable timer
* @cnt: the hold-off packet count, or 0 to disable counter
@@ -2470,9 +2511,11 @@ static unsigned int qtimer_val(const struct adapter *adap,
* Sets an Rx queue's interrupt hold-off time and packet count. At least
* one of the two needs to be enabled for the queue to generate interrupts.
*/
-static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q,
- unsigned int us, unsigned int cnt)
+static int set_rspq_intr_params(struct sge_rspq *q,
+ unsigned int us, unsigned int cnt)
{
+ struct adapter *adap = q->adap;
+
if ((us | cnt) == 0)
cnt = 1;
@@ -2499,24 +2542,34 @@ static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q,
return 0;
}
-static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
+/**
+ * set_rx_intr_params - set a net devices's RX interrupt holdoff paramete!
+ * @dev: the network device
+ * @us: the hold-off time in us, or 0 to disable timer
+ * @cnt: the hold-off packet count, or 0 to disable counter
+ *
+ * Set the RX interrupt hold-off parameters for a network device.
+ */
+static int set_rx_intr_params(struct net_device *dev,
+ unsigned int us, unsigned int cnt)
{
- const struct port_info *pi = netdev_priv(dev);
+ int i, err;
+ struct port_info *pi = netdev_priv(dev);
struct adapter *adap = pi->adapter;
- struct sge_rspq *q;
- int i;
- int r = 0;
-
- for (i = pi->first_qset; i < pi->first_qset + pi->nqsets; i++) {
- q = &adap->sge.ethrxq[i].rspq;
- r = set_rxq_intr_params(adap, q, c->rx_coalesce_usecs,
- c->rx_max_coalesced_frames);
- if (r) {
- dev_err(&dev->dev, "failed to set coalesce %d\n", r);
- break;
- }
+ struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
+
+ for (i = 0; i < pi->nqsets; i++, q++) {
+ err = set_rspq_intr_params(&q->rspq, us, cnt);
+ if (err)
+ return err;
}
- return r;
+ return 0;
+}
+
+static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
+{
+ return set_rx_intr_params(dev, c->rx_coalesce_usecs,
+ c->rx_max_coalesced_frames);
}
static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
@@ -2732,7 +2785,7 @@ static u32 get_rss_table_size(struct net_device *dev)
return pi->rss_size;
}
-static int get_rss_table(struct net_device *dev, u32 *p)
+static int get_rss_table(struct net_device *dev, u32 *p, u8 *key)
{
const struct port_info *pi = netdev_priv(dev);
unsigned int n = pi->rss_size;
@@ -2742,7 +2795,7 @@ static int get_rss_table(struct net_device *dev, u32 *p)
return 0;
}
-static int set_rss_table(struct net_device *dev, const u32 *p)
+static int set_rss_table(struct net_device *dev, const u32 *p, const u8 *key)
{
unsigned int i;
struct port_info *pi = netdev_priv(dev);
@@ -2844,8 +2897,8 @@ static const struct ethtool_ops cxgb_ethtool_ops = {
.set_wol = set_wol,
.get_rxnfc = get_rxnfc,
.get_rxfh_indir_size = get_rss_table_size,
- .get_rxfh_indir = get_rss_table,
- .set_rxfh_indir = set_rss_table,
+ .get_rxfh = get_rss_table,
+ .set_rxfh = set_rss_table,
.flash_device = set_flash,
};
@@ -3386,6 +3439,77 @@ unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
EXPORT_SYMBOL(cxgb4_best_mtu);
/**
+ * cxgb4_best_aligned_mtu - find best MTU, [hopefully] data size aligned
+ * @mtus: the HW MTU table
+ * @header_size: Header Size
+ * @data_size_max: maximum Data Segment Size
+ * @data_size_align: desired Data Segment Size Alignment (2^N)
+ * @mtu_idxp: HW MTU Table Index return value pointer (possibly NULL)
+ *
+ * Similar to cxgb4_best_mtu() but instead of searching the Hardware
+ * MTU Table based solely on a Maximum MTU parameter, we break that
+ * parameter up into a Header Size and Maximum Data Segment Size, and
+ * provide a desired Data Segment Size Alignment. If we find an MTU in
+ * the Hardware MTU Table which will result in a Data Segment Size with
+ * the requested alignment _and_ that MTU isn't "too far" from the
+ * closest MTU, then we'll return that rather than the closest MTU.
+ */
+unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
+ unsigned short header_size,
+ unsigned short data_size_max,
+ unsigned short data_size_align,
+ unsigned int *mtu_idxp)
+{
+ unsigned short max_mtu = header_size + data_size_max;
+ unsigned short data_size_align_mask = data_size_align - 1;
+ int mtu_idx, aligned_mtu_idx;
+
+ /* Scan the MTU Table till we find an MTU which is larger than our
+ * Maximum MTU or we reach the end of the table. Along the way,
+ * record the last MTU found, if any, which will result in a Data
+ * Segment Length matching the requested alignment.
+ */
+ for (mtu_idx = 0, aligned_mtu_idx = -1; mtu_idx < NMTUS; mtu_idx++) {
+ unsigned short data_size = mtus[mtu_idx] - header_size;
+
+ /* If this MTU minus the Header Size would result in a
+ * Data Segment Size of the desired alignment, remember it.
+ */
+ if ((data_size & data_size_align_mask) == 0)
+ aligned_mtu_idx = mtu_idx;
+
+ /* If we're not at the end of the Hardware MTU Table and the
+ * next element is larger than our Maximum MTU, drop out of
+ * the loop.
+ */
+ if (mtu_idx+1 < NMTUS && mtus[mtu_idx+1] > max_mtu)
+ break;
+ }
+
+ /* If we fell out of the loop because we ran to the end of the table,
+ * then we just have to use the last [largest] entry.
+ */
+ if (mtu_idx == NMTUS)
+ mtu_idx--;
+
+ /* If we found an MTU which resulted in the requested Data Segment
+ * Length alignment and that's "not far" from the largest MTU which is
+ * less than or equal to the maximum MTU, then use that.
+ */
+ if (aligned_mtu_idx >= 0 &&
+ mtu_idx - aligned_mtu_idx <= 1)
+ mtu_idx = aligned_mtu_idx;
+
+ /* If the caller has passed in an MTU Index pointer, pass the
+ * MTU Index back. Return the MTU value.
+ */
+ if (mtu_idxp)
+ *mtu_idxp = mtu_idx;
+ return mtus[mtu_idx];
+}
+EXPORT_SYMBOL(cxgb4_best_aligned_mtu);
+
+/**
* cxgb4_port_chan - get the HW channel of a port
* @dev: the net device for the port
*
@@ -3782,7 +3906,9 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
lli.mtus = adap->params.mtus;
if (uld == CXGB4_ULD_RDMA) {
lli.rxq_ids = adap->sge.rdma_rxq;
+ lli.ciq_ids = adap->sge.rdma_ciq;
lli.nrxq = adap->sge.rdmaqs;
+ lli.nciq = adap->sge.rdmaciqs;
} else if (uld == CXGB4_ULD_ISCSI) {
lli.rxq_ids = adap->sge.ofld_rxq;
lli.nrxq = adap->sge.ofldqsets;
@@ -4061,7 +4187,7 @@ static int update_root_dev_clip(struct net_device *dev)
/* Parse all bond and vlan devices layered on top of the physical dev */
for (i = 0; i < VLAN_N_VID; i++) {
- root_dev = __vlan_find_dev_deep(dev, htons(ETH_P_8021Q), i);
+ root_dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), i);
if (!root_dev)
continue;
@@ -5528,13 +5654,41 @@ static int adap_init0(struct adapter *adap)
#undef FW_PARAM_PFVF
#undef FW_PARAM_DEV
- /*
- * These are finalized by FW initialization, load their values now.
+ /* The MTU/MSS Table is initialized by now, so load their values. If
+ * we're initializing the adapter, then we'll make any modifications
+ * we want to the MTU/MSS Table and also initialize the congestion
+ * parameters.
*/
t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
- t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
- adap->params.b_wnd);
+ if (state != DEV_STATE_INIT) {
+ int i;
+
+ /* The default MTU Table contains values 1492 and 1500.
+ * However, for TCP, it's better to have two values which are
+ * a multiple of 8 +/- 4 bytes apart near this popular MTU.
+ * This allows us to have a TCP Data Payload which is a
+ * multiple of 8 regardless of what combination of TCP Options
+ * are in use (always a multiple of 4 bytes) which is
+ * important for performance reasons. For instance, if no
+ * options are in use, then we have a 20-byte IP header and a
+ * 20-byte TCP header. In this case, a 1500-byte MSS would
+ * result in a TCP Data Payload of 1500 - 40 == 1460 bytes
+ * which is not a multiple of 8. So using an MSS of 1488 in
+ * this case results in a TCP Data Payload of 1448 bytes which
+ * is a multiple of 8. On the other hand, if 12-byte TCP Time
+ * Stamps have been negotiated, then an MTU of 1500 bytes
+ * results in a TCP Data Payload of 1448 bytes which, as
+ * above, is a multiple of 8 bytes ...
+ */
+ for (i = 0; i < NMTUS; i++)
+ if (adap->params.mtus[i] == 1492) {
+ adap->params.mtus[i] = 1488;
+ break;
+ }
+ t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
+ adap->params.b_wnd);
+ }
t4_init_tp_params(adap);
adap->flags |= FW_OK;
return 0;
@@ -5669,12 +5823,12 @@ static inline bool is_x_10g_port(const struct link_config *lc)
(lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
}
-static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
+static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
+ unsigned int us, unsigned int cnt,
unsigned int size, unsigned int iqe_size)
{
- q->intr_params = QINTR_TIMER_IDX(timer_idx) |
- (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0);
- q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0;
+ q->adap = adap;
+ set_rspq_intr_params(q, us, cnt);
q->iqe_len = iqe_size;
q->size = size;
}
@@ -5688,6 +5842,7 @@ static void cfg_queues(struct adapter *adap)
{
struct sge *s = &adap->sge;
int i, q10g = 0, n10g = 0, qidx = 0;
+ int ciq_size;
for_each_port(adap, i)
n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
@@ -5726,12 +5881,13 @@ static void cfg_queues(struct adapter *adap)
s->ofldqsets = adap->params.nports;
/* For RDMA one Rx queue per channel suffices */
s->rdmaqs = adap->params.nports;
+ s->rdmaciqs = adap->params.nports;
}
for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
struct sge_eth_rxq *r = &s->ethrxq[i];
- init_rspq(&r->rspq, 0, 0, 1024, 64);
+ init_rspq(adap, &r->rspq, 5, 10, 1024, 64);
r->fl.size = 72;
}
@@ -5747,7 +5903,7 @@ static void cfg_queues(struct adapter *adap)
for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
struct sge_ofld_rxq *r = &s->ofldrxq[i];
- init_rspq(&r->rspq, 0, 0, 1024, 64);
+ init_rspq(adap, &r->rspq, 5, 1, 1024, 64);
r->rspq.uld = CXGB4_ULD_ISCSI;
r->fl.size = 72;
}
@@ -5755,13 +5911,26 @@ static void cfg_queues(struct adapter *adap)
for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
struct sge_ofld_rxq *r = &s->rdmarxq[i];
- init_rspq(&r->rspq, 0, 0, 511, 64);
+ init_rspq(adap, &r->rspq, 5, 1, 511, 64);
r->rspq.uld = CXGB4_ULD_RDMA;
r->fl.size = 72;
}
- init_rspq(&s->fw_evtq, 6, 0, 512, 64);
- init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64);
+ ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
+ if (ciq_size > SGE_MAX_IQ_SIZE) {
+ CH_WARN(adap, "CIQ size too small for available IQs\n");
+ ciq_size = SGE_MAX_IQ_SIZE;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(s->rdmaciq); i++) {
+ struct sge_ofld_rxq *r = &s->rdmaciq[i];
+
+ init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
+ r->rspq.uld = CXGB4_ULD_RDMA;
+ }
+
+ init_rspq(adap, &s->fw_evtq, 0, 1, 1024, 64);
+ init_rspq(adap, &s->intrq, 0, 1, 2 * MAX_INGQ, 64);
}
/*
@@ -5808,9 +5977,9 @@ static int enable_msix(struct adapter *adap)
want = s->max_ethqsets + EXTRA_VECS;
if (is_offload(adap)) {
- want += s->rdmaqs + s->ofldqsets;
+ want += s->rdmaqs + s->rdmaciqs + s->ofldqsets;
/* need nchan for each possible ULD */
- ofld_need = 2 * nchan;
+ ofld_need = 3 * nchan;
}
need = adap->params.nports + EXTRA_VECS + ofld_need;
@@ -6076,7 +6245,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->netdev_ops = &cxgb4_netdev_ops;
- SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
+ netdev->ethtool_ops = &cxgb_ethtool_ops;
}
pci_set_drvdata(pdev, adapter);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index e274a04..55e9daf 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -232,8 +232,10 @@ struct cxgb4_lld_info {
const struct cxgb4_virt_res *vr; /* assorted HW resources */
const unsigned short *mtus; /* MTU table */
const unsigned short *rxq_ids; /* the ULD's Rx queue ids */
+ const unsigned short *ciq_ids; /* the ULD's concentrator IQ ids */
unsigned short nrxq; /* # of Rx queues */
unsigned short ntxq; /* # of Tx queues */
+ unsigned short nciq; /* # of concentrator IQ */
unsigned char nchan:4; /* # of channels */
unsigned char nports:4; /* # of ports */
unsigned char wr_cred; /* WR 16-byte credits */
@@ -274,6 +276,11 @@ unsigned int cxgb4_port_viid(const struct net_device *dev);
unsigned int cxgb4_port_idx(const struct net_device *dev);
unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
unsigned int *idx);
+unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus,
+ unsigned short header_size,
+ unsigned short data_size_max,
+ unsigned short data_size_align,
+ unsigned int *mtu_idxp);
void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
struct tp_tcp_stats *v6);
void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index e249528..dd4355d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -1697,7 +1697,8 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
return handle_trace_pkt(q->adap, si);
pkt = (const struct cpl_rx_pkt *)rsp;
- csum_ok = pkt->csum_calc && !pkt->err_vec;
+ csum_ok = pkt->csum_calc && !pkt->err_vec &&
+ (q->netdev->features & NETIF_F_RXCSUM);
if ((pkt->l2info & htonl(RXF_TCP)) &&
(q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
do_gro(rxq, si, pkt);
@@ -1720,8 +1721,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
rxq->stats.pkts++;
- if (csum_ok && (q->netdev->features & NETIF_F_RXCSUM) &&
- (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) {
+ if (csum_ok && (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) {
if (!pkt->ip_frag) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
rxq->stats.rx_cso++;
@@ -2215,7 +2215,6 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
iq->cntxt_id = ntohs(c.iqid);
iq->abs_id = ntohs(c.physiqid);
iq->size--; /* subtract status entry */
- iq->adap = adap;
iq->netdev = dev;
iq->handler = hnd;
@@ -2515,6 +2514,10 @@ void t4_free_sge_resources(struct adapter *adap)
if (oq->rspq.desc)
free_rspq_fl(adap, &oq->rspq, &oq->fl);
}
+ for (i = 0, oq = adap->sge.rdmaciq; i < adap->sge.rdmaciqs; i++, oq++) {
+ if (oq->rspq.desc)
+ free_rspq_fl(adap, &oq->rspq, &oq->fl);
+ }
/* clean up offload Tx queues */
for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
index 1d1623b..71b799b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
@@ -68,6 +68,7 @@ enum {
SGE_MAX_WR_LEN = 512, /* max WR size in bytes */
SGE_NTIMERS = 6, /* # of interrupt holdoff timer values */
SGE_NCOUNTERS = 4, /* # of interrupt packet counter values */
+ SGE_MAX_IQ_SIZE = 65520,
SGE_TIMER_RSTRT_CNTR = 6, /* restart RX packet threshold counter */
SGE_TIMER_UPD_CIDX = 7, /* update cidx only */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
index f2738c7..973eb11 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
@@ -227,6 +227,7 @@ struct cpl_pass_open_req {
#define DELACK(x) ((x) << 5)
#define ULP_MODE(x) ((x) << 8)
#define RCV_BUFSIZ(x) ((x) << 12)
+#define RCV_BUFSIZ_MASK 0x3FFU
#define DSCP(x) ((x) << 22)
#define SMAC_SEL(x) ((u64)(x) << 28)
#define L2T_IDX(x) ((u64)(x) << 36)
@@ -278,6 +279,15 @@ struct cpl_pass_accept_rpl {
__be64 opt0;
};
+struct cpl_t5_pass_accept_rpl {
+ WR_HDR;
+ union opcode_tid ot;
+ __be32 opt2;
+ __be64 opt0;
+ __be32 iss;
+ __be32 rsvd;
+};
+
struct cpl_act_open_req {
WR_HDR;
union opcode_tid ot;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 5285928..ff1cdd1 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -2664,7 +2664,7 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev,
netdev->priv_flags |= IFF_UNICAST_FLT;
netdev->netdev_ops = &cxgb4vf_netdev_ops;
- SET_ETHTOOL_OPS(netdev, &cxgb4vf_ethtool_ops);
+ netdev->ethtool_ops = &cxgb4vf_ethtool_ops;
/*
* Initialize the hardware/software state for the port.
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 9d88c1d..bdfa80c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -1510,7 +1510,8 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
{
struct sk_buff *skb;
const struct cpl_rx_pkt *pkt = (void *)rsp;
- bool csum_ok = pkt->csum_calc && !pkt->err_vec;
+ bool csum_ok = pkt->csum_calc && !pkt->err_vec &&
+ (rspq->netdev->features & NETIF_F_RXCSUM);
struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
/*
@@ -1538,8 +1539,8 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
skb_record_rx_queue(skb, rspq->idx);
rxq->stats.pkts++;
- if (csum_ok && (rspq->netdev->features & NETIF_F_RXCSUM) &&
- !pkt->err_vec && (be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) {
+ if (csum_ok && !pkt->err_vec &&
+ (be32_to_cpu(pkt->l2info) & (RXF_UDP|RXF_TCP))) {
if (!pkt->ip_frag)
skb->ip_summed = CHECKSUM_UNNECESSARY;
else {
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index e35c8e0..14f465f 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -43,6 +43,8 @@
#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX)
#define ENIC_INTR_MAX (ENIC_CQ_MAX + 2)
+#define ENIC_AIC_LARGE_PKT_DIFF 3
+
struct enic_msix_entry {
int requested;
char devname[IFNAMSIZ];
@@ -50,6 +52,33 @@ struct enic_msix_entry {
void *devid;
};
+/* Store only the lower range. Higher range is given by fw. */
+struct enic_intr_mod_range {
+ u32 small_pkt_range_start;
+ u32 large_pkt_range_start;
+};
+
+struct enic_intr_mod_table {
+ u32 rx_rate;
+ u32 range_percent;
+};
+
+#define ENIC_MAX_LINK_SPEEDS 3
+#define ENIC_LINK_SPEED_10G 10000
+#define ENIC_LINK_SPEED_4G 4000
+#define ENIC_LINK_40G_INDEX 2
+#define ENIC_LINK_10G_INDEX 1
+#define ENIC_LINK_4G_INDEX 0
+#define ENIC_RX_COALESCE_RANGE_END 125
+#define ENIC_AIC_TS_BREAK 100
+
+struct enic_rx_coal {
+ u32 small_pkt_range_start;
+ u32 large_pkt_range_start;
+ u32 range_end;
+ u32 use_adaptive_rx_coalesce;
+};
+
/* priv_flags */
#define ENIC_SRIOV_ENABLED (1 << 0)
@@ -85,13 +114,12 @@ struct enic {
u32 msg_enable;
spinlock_t devcmd_lock;
u8 mac_addr[ETH_ALEN];
- u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
- u8 uc_addr[ENIC_UNICAST_PERFECT_FILTERS][ETH_ALEN];
unsigned int flags;
unsigned int priv_flags;
unsigned int mc_count;
unsigned int uc_count;
u32 port_mtu;
+ struct enic_rx_coal rx_coalesce_setting;
u32 rx_coalesce_usecs;
u32 tx_coalesce_usecs;
#ifdef CONFIG_PCI_IOV
diff --git a/drivers/net/ethernet/cisco/enic/enic_dev.c b/drivers/net/ethernet/cisco/enic/enic_dev.c
index 4b6e569..3e27df5 100644
--- a/drivers/net/ethernet/cisco/enic/enic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/enic_dev.c
@@ -88,7 +88,7 @@ int enic_dev_packet_filter(struct enic *enic, int directed, int multicast,
return err;
}
-int enic_dev_add_addr(struct enic *enic, u8 *addr)
+int enic_dev_add_addr(struct enic *enic, const u8 *addr)
{
int err;
@@ -99,7 +99,7 @@ int enic_dev_add_addr(struct enic *enic, u8 *addr)
return err;
}
-int enic_dev_del_addr(struct enic *enic, u8 *addr)
+int enic_dev_del_addr(struct enic *enic, const u8 *addr)
{
int err;
diff --git a/drivers/net/ethernet/cisco/enic/enic_dev.h b/drivers/net/ethernet/cisco/enic/enic_dev.h
index 129b14a..36ea1ab 100644
--- a/drivers/net/ethernet/cisco/enic/enic_dev.h
+++ b/drivers/net/ethernet/cisco/enic/enic_dev.h
@@ -45,8 +45,8 @@ int enic_dev_add_station_addr(struct enic *enic);
int enic_dev_del_station_addr(struct enic *enic);
int enic_dev_packet_filter(struct enic *enic, int directed, int multicast,
int broadcast, int promisc, int allmulti);
-int enic_dev_add_addr(struct enic *enic, u8 *addr);
-int enic_dev_del_addr(struct enic *enic, u8 *addr);
+int enic_dev_add_addr(struct enic *enic, const u8 *addr);
+int enic_dev_del_addr(struct enic *enic, const u8 *addr);
int enic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid);
int enic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid);
int enic_dev_notify_unset(struct enic *enic);
diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
index 47e3562..2e50b54 100644
--- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c
+++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c
@@ -79,6 +79,17 @@ static const struct enic_stat enic_rx_stats[] = {
static const unsigned int enic_n_tx_stats = ARRAY_SIZE(enic_tx_stats);
static const unsigned int enic_n_rx_stats = ARRAY_SIZE(enic_rx_stats);
+void enic_intr_coal_set_rx(struct enic *enic, u32 timer)
+{
+ int i;
+ int intr;
+
+ for (i = 0; i < enic->rq_count; i++) {
+ intr = enic_msix_rq_intr(enic, i);
+ vnic_intr_coalescing_timer_set(&enic->intr[intr], timer);
+ }
+}
+
static int enic_get_settings(struct net_device *netdev,
struct ethtool_cmd *ecmd)
{
@@ -93,8 +104,8 @@ static int enic_get_settings(struct net_device *netdev,
ethtool_cmd_speed_set(ecmd, vnic_dev_port_speed(enic->vdev));
ecmd->duplex = DUPLEX_FULL;
} else {
- ethtool_cmd_speed_set(ecmd, -1);
- ecmd->duplex = -1;
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ ecmd->duplex = DUPLEX_UNKNOWN;
}
ecmd->autoneg = AUTONEG_DISABLE;
@@ -178,9 +189,14 @@ static int enic_get_coalesce(struct net_device *netdev,
struct ethtool_coalesce *ecmd)
{
struct enic *enic = netdev_priv(netdev);
+ struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting;
ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs;
ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs;
+ if (rxcoal->use_adaptive_rx_coalesce)
+ ecmd->use_adaptive_rx_coalesce = 1;
+ ecmd->rx_coalesce_usecs_low = rxcoal->small_pkt_range_start;
+ ecmd->rx_coalesce_usecs_high = rxcoal->range_end;
return 0;
}
@@ -191,17 +207,31 @@ static int enic_set_coalesce(struct net_device *netdev,
struct enic *enic = netdev_priv(netdev);
u32 tx_coalesce_usecs;
u32 rx_coalesce_usecs;
+ u32 rx_coalesce_usecs_low;
+ u32 rx_coalesce_usecs_high;
+ u32 coalesce_usecs_max;
unsigned int i, intr;
+ struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting;
+ coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev);
tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs,
- vnic_dev_get_intr_coal_timer_max(enic->vdev));
+ coalesce_usecs_max);
rx_coalesce_usecs = min_t(u32, ecmd->rx_coalesce_usecs,
- vnic_dev_get_intr_coal_timer_max(enic->vdev));
+ coalesce_usecs_max);
+
+ rx_coalesce_usecs_low = min_t(u32, ecmd->rx_coalesce_usecs_low,
+ coalesce_usecs_max);
+ rx_coalesce_usecs_high = min_t(u32, ecmd->rx_coalesce_usecs_high,
+ coalesce_usecs_max);
switch (vnic_dev_get_intr_mode(enic->vdev)) {
case VNIC_DEV_INTR_MODE_INTX:
if (tx_coalesce_usecs != rx_coalesce_usecs)
return -EINVAL;
+ if (ecmd->use_adaptive_rx_coalesce ||
+ ecmd->rx_coalesce_usecs_low ||
+ ecmd->rx_coalesce_usecs_high)
+ return -EOPNOTSUPP;
intr = enic_legacy_io_intr();
vnic_intr_coalescing_timer_set(&enic->intr[intr],
@@ -210,6 +240,10 @@ static int enic_set_coalesce(struct net_device *netdev,
case VNIC_DEV_INTR_MODE_MSI:
if (tx_coalesce_usecs != rx_coalesce_usecs)
return -EINVAL;
+ if (ecmd->use_adaptive_rx_coalesce ||
+ ecmd->rx_coalesce_usecs_low ||
+ ecmd->rx_coalesce_usecs_high)
+ return -EOPNOTSUPP;
vnic_intr_coalescing_timer_set(&enic->intr[0],
tx_coalesce_usecs);
@@ -221,12 +255,27 @@ static int enic_set_coalesce(struct net_device *netdev,
tx_coalesce_usecs);
}
- for (i = 0; i < enic->rq_count; i++) {
- intr = enic_msix_rq_intr(enic, i);
- vnic_intr_coalescing_timer_set(&enic->intr[intr],
- rx_coalesce_usecs);
+ if (rxcoal->use_adaptive_rx_coalesce) {
+ if (!ecmd->use_adaptive_rx_coalesce) {
+ rxcoal->use_adaptive_rx_coalesce = 0;
+ enic_intr_coal_set_rx(enic, rx_coalesce_usecs);
+ }
+ } else {
+ if (ecmd->use_adaptive_rx_coalesce)
+ rxcoal->use_adaptive_rx_coalesce = 1;
+ else
+ enic_intr_coal_set_rx(enic, rx_coalesce_usecs);
}
+ if (ecmd->rx_coalesce_usecs_high) {
+ if (rx_coalesce_usecs_high <
+ (rx_coalesce_usecs_low + ENIC_AIC_LARGE_PKT_DIFF))
+ return -EINVAL;
+ rxcoal->range_end = rx_coalesce_usecs_high;
+ rxcoal->small_pkt_range_start = rx_coalesce_usecs_low;
+ rxcoal->large_pkt_range_start = rx_coalesce_usecs_low +
+ ENIC_AIC_LARGE_PKT_DIFF;
+ }
break;
default:
break;
@@ -253,5 +302,5 @@ static const struct ethtool_ops enic_ethtool_ops = {
void enic_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &enic_ethtool_ops);
+ netdev->ethtool_ops = &enic_ethtool_ops;
}
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 2945718..f32f828 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -38,6 +38,7 @@
#include <linux/rtnetlink.h>
#include <linux/prefetch.h>
#include <net/ip6_checksum.h>
+#include <linux/ktime.h>
#include "cq_enet_desc.h"
#include "vnic_dev.h"
@@ -72,6 +73,35 @@ MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, enic_id_table);
+#define ENIC_LARGE_PKT_THRESHOLD 1000
+#define ENIC_MAX_COALESCE_TIMERS 10
+/* Interrupt moderation table, which will be used to decide the
+ * coalescing timer values
+ * {rx_rate in Mbps, mapping percentage of the range}
+ */
+struct enic_intr_mod_table mod_table[ENIC_MAX_COALESCE_TIMERS + 1] = {
+ {4000, 0},
+ {4400, 10},
+ {5060, 20},
+ {5230, 30},
+ {5540, 40},
+ {5820, 50},
+ {6120, 60},
+ {6435, 70},
+ {6745, 80},
+ {7000, 90},
+ {0xFFFFFFFF, 100}
+};
+
+/* This table helps the driver to pick different ranges for rx coalescing
+ * timer depending on the link speed.
+ */
+struct enic_intr_mod_range mod_range[ENIC_MAX_LINK_SPEEDS] = {
+ {0, 0}, /* 0 - 4 Gbps */
+ {0, 3}, /* 4 - 10 Gbps */
+ {3, 6}, /* 10 - 40 Gbps */
+};
+
int enic_is_dynamic(struct enic *enic)
{
return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_DYN;
@@ -586,8 +616,71 @@ static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev,
return net_stats;
}
+static int enic_mc_sync(struct net_device *netdev, const u8 *mc_addr)
+{
+ struct enic *enic = netdev_priv(netdev);
+
+ if (enic->mc_count == ENIC_MULTICAST_PERFECT_FILTERS) {
+ unsigned int mc_count = netdev_mc_count(netdev);
+
+ netdev_warn(netdev, "Registering only %d out of %d multicast addresses\n",
+ ENIC_MULTICAST_PERFECT_FILTERS, mc_count);
+
+ return -ENOSPC;
+ }
+
+ enic_dev_add_addr(enic, mc_addr);
+ enic->mc_count++;
+
+ return 0;
+}
+
+static int enic_mc_unsync(struct net_device *netdev, const u8 *mc_addr)
+{
+ struct enic *enic = netdev_priv(netdev);
+
+ enic_dev_del_addr(enic, mc_addr);
+ enic->mc_count--;
+
+ return 0;
+}
+
+static int enic_uc_sync(struct net_device *netdev, const u8 *uc_addr)
+{
+ struct enic *enic = netdev_priv(netdev);
+
+ if (enic->uc_count == ENIC_UNICAST_PERFECT_FILTERS) {
+ unsigned int uc_count = netdev_uc_count(netdev);
+
+ netdev_warn(netdev, "Registering only %d out of %d unicast addresses\n",
+ ENIC_UNICAST_PERFECT_FILTERS, uc_count);
+
+ return -ENOSPC;
+ }
+
+ enic_dev_add_addr(enic, uc_addr);
+ enic->uc_count++;
+
+ return 0;
+}
+
+static int enic_uc_unsync(struct net_device *netdev, const u8 *uc_addr)
+{
+ struct enic *enic = netdev_priv(netdev);
+
+ enic_dev_del_addr(enic, uc_addr);
+ enic->uc_count--;
+
+ return 0;
+}
+
void enic_reset_addr_lists(struct enic *enic)
{
+ struct net_device *netdev = enic->netdev;
+
+ __dev_uc_unsync(netdev, NULL);
+ __dev_mc_unsync(netdev, NULL);
+
enic->mc_count = 0;
enic->uc_count = 0;
enic->flags = 0;
@@ -654,112 +747,6 @@ static int enic_set_mac_address(struct net_device *netdev, void *p)
return enic_dev_add_station_addr(enic);
}
-static void enic_update_multicast_addr_list(struct enic *enic)
-{
- struct net_device *netdev = enic->netdev;
- struct netdev_hw_addr *ha;
- unsigned int mc_count = netdev_mc_count(netdev);
- u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
- unsigned int i, j;
-
- if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS) {
- netdev_warn(netdev, "Registering only %d out of %d "
- "multicast addresses\n",
- ENIC_MULTICAST_PERFECT_FILTERS, mc_count);
- mc_count = ENIC_MULTICAST_PERFECT_FILTERS;
- }
-
- /* Is there an easier way? Trying to minimize to
- * calls to add/del multicast addrs. We keep the
- * addrs from the last call in enic->mc_addr and
- * look for changes to add/del.
- */
-
- i = 0;
- netdev_for_each_mc_addr(ha, netdev) {
- if (i == mc_count)
- break;
- memcpy(mc_addr[i++], ha->addr, ETH_ALEN);
- }
-
- for (i = 0; i < enic->mc_count; i++) {
- for (j = 0; j < mc_count; j++)
- if (ether_addr_equal(enic->mc_addr[i], mc_addr[j]))
- break;
- if (j == mc_count)
- enic_dev_del_addr(enic, enic->mc_addr[i]);
- }
-
- for (i = 0; i < mc_count; i++) {
- for (j = 0; j < enic->mc_count; j++)
- if (ether_addr_equal(mc_addr[i], enic->mc_addr[j]))
- break;
- if (j == enic->mc_count)
- enic_dev_add_addr(enic, mc_addr[i]);
- }
-
- /* Save the list to compare against next time
- */
-
- for (i = 0; i < mc_count; i++)
- memcpy(enic->mc_addr[i], mc_addr[i], ETH_ALEN);
-
- enic->mc_count = mc_count;
-}
-
-static void enic_update_unicast_addr_list(struct enic *enic)
-{
- struct net_device *netdev = enic->netdev;
- struct netdev_hw_addr *ha;
- unsigned int uc_count = netdev_uc_count(netdev);
- u8 uc_addr[ENIC_UNICAST_PERFECT_FILTERS][ETH_ALEN];
- unsigned int i, j;
-
- if (uc_count > ENIC_UNICAST_PERFECT_FILTERS) {
- netdev_warn(netdev, "Registering only %d out of %d "
- "unicast addresses\n",
- ENIC_UNICAST_PERFECT_FILTERS, uc_count);
- uc_count = ENIC_UNICAST_PERFECT_FILTERS;
- }
-
- /* Is there an easier way? Trying to minimize to
- * calls to add/del unicast addrs. We keep the
- * addrs from the last call in enic->uc_addr and
- * look for changes to add/del.
- */
-
- i = 0;
- netdev_for_each_uc_addr(ha, netdev) {
- if (i == uc_count)
- break;
- memcpy(uc_addr[i++], ha->addr, ETH_ALEN);
- }
-
- for (i = 0; i < enic->uc_count; i++) {
- for (j = 0; j < uc_count; j++)
- if (ether_addr_equal(enic->uc_addr[i], uc_addr[j]))
- break;
- if (j == uc_count)
- enic_dev_del_addr(enic, enic->uc_addr[i]);
- }
-
- for (i = 0; i < uc_count; i++) {
- for (j = 0; j < enic->uc_count; j++)
- if (ether_addr_equal(uc_addr[i], enic->uc_addr[j]))
- break;
- if (j == enic->uc_count)
- enic_dev_add_addr(enic, uc_addr[i]);
- }
-
- /* Save the list to compare against next time
- */
-
- for (i = 0; i < uc_count; i++)
- memcpy(enic->uc_addr[i], uc_addr[i], ETH_ALEN);
-
- enic->uc_count = uc_count;
-}
-
/* netif_tx_lock held, BHs disabled */
static void enic_set_rx_mode(struct net_device *netdev)
{
@@ -782,9 +769,9 @@ static void enic_set_rx_mode(struct net_device *netdev)
}
if (!promisc) {
- enic_update_unicast_addr_list(enic);
+ __dev_uc_sync(netdev, enic_uc_sync, enic_uc_unsync);
if (!allmulti)
- enic_update_multicast_addr_list(enic);
+ __dev_mc_sync(netdev, enic_mc_sync, enic_mc_unsync);
}
}
@@ -979,6 +966,15 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq)
return 0;
}
+static void enic_intr_update_pkt_size(struct vnic_rx_bytes_counter *pkt_size,
+ u32 pkt_len)
+{
+ if (ENIC_LARGE_PKT_THRESHOLD <= pkt_len)
+ pkt_size->large_pkt_bytes_cnt += pkt_len;
+ else
+ pkt_size->small_pkt_bytes_cnt += pkt_len;
+}
+
static void enic_rq_indicate_buf(struct vnic_rq *rq,
struct cq_desc *cq_desc, struct vnic_rq_buf *buf,
int skipped, void *opaque)
@@ -986,6 +982,7 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
struct enic *enic = vnic_dev_priv(rq->vdev);
struct net_device *netdev = enic->netdev;
struct sk_buff *skb;
+ struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
u8 type, color, eop, sop, ingress_port, vlan_stripped;
u8 fcoe, fcoe_sof, fcoe_fc_crc_ok, fcoe_enc_error, fcoe_eof;
@@ -1056,6 +1053,9 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
napi_gro_receive(&enic->napi[q_number], skb);
else
netif_receive_skb(skb);
+ if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
+ enic_intr_update_pkt_size(&cq->pkt_size_counter,
+ bytes_written);
} else {
/* Buffer overflow
@@ -1134,6 +1134,64 @@ static int enic_poll(struct napi_struct *napi, int budget)
return rq_work_done;
}
+static void enic_set_int_moderation(struct enic *enic, struct vnic_rq *rq)
+{
+ unsigned int intr = enic_msix_rq_intr(enic, rq->index);
+ struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
+ u32 timer = cq->tobe_rx_coal_timeval;
+
+ if (cq->tobe_rx_coal_timeval != cq->cur_rx_coal_timeval) {
+ vnic_intr_coalescing_timer_set(&enic->intr[intr], timer);
+ cq->cur_rx_coal_timeval = cq->tobe_rx_coal_timeval;
+ }
+}
+
+static void enic_calc_int_moderation(struct enic *enic, struct vnic_rq *rq)
+{
+ struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
+ struct vnic_cq *cq = &enic->cq[enic_cq_rq(enic, rq->index)];
+ struct vnic_rx_bytes_counter *pkt_size_counter = &cq->pkt_size_counter;
+ int index;
+ u32 timer;
+ u32 range_start;
+ u32 traffic;
+ u64 delta;
+ ktime_t now = ktime_get();
+
+ delta = ktime_us_delta(now, cq->prev_ts);
+ if (delta < ENIC_AIC_TS_BREAK)
+ return;
+ cq->prev_ts = now;
+
+ traffic = pkt_size_counter->large_pkt_bytes_cnt +
+ pkt_size_counter->small_pkt_bytes_cnt;
+ /* The table takes Mbps
+ * traffic *= 8 => bits
+ * traffic *= (10^6 / delta) => bps
+ * traffic /= 10^6 => Mbps
+ *
+ * Combining, traffic *= (8 / delta)
+ */
+
+ traffic <<= 3;
+ traffic = delta > UINT_MAX ? 0 : traffic / (u32)delta;
+
+ for (index = 0; index < ENIC_MAX_COALESCE_TIMERS; index++)
+ if (traffic < mod_table[index].rx_rate)
+ break;
+ range_start = (pkt_size_counter->small_pkt_bytes_cnt >
+ pkt_size_counter->large_pkt_bytes_cnt << 1) ?
+ rx_coal->small_pkt_range_start :
+ rx_coal->large_pkt_range_start;
+ timer = range_start + ((rx_coal->range_end - range_start) *
+ mod_table[index].range_percent / 100);
+ /* Damping */
+ cq->tobe_rx_coal_timeval = (timer + cq->tobe_rx_coal_timeval) >> 1;
+
+ pkt_size_counter->large_pkt_bytes_cnt = 0;
+ pkt_size_counter->small_pkt_bytes_cnt = 0;
+}
+
static int enic_poll_msix(struct napi_struct *napi, int budget)
{
struct net_device *netdev = napi->dev;
@@ -1171,6 +1229,13 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
if (err)
work_done = work_to_do;
+ if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
+ /* Call the function which refreshes
+ * the intr coalescing timer value based on
+ * the traffic. This is supported only in
+ * the case of MSI-x mode
+ */
+ enic_calc_int_moderation(enic, &enic->rq[rq]);
if (work_done < work_to_do) {
@@ -1179,6 +1244,8 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
*/
napi_complete(napi);
+ if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
+ enic_set_int_moderation(enic, &enic->rq[rq]);
vnic_intr_unmask(&enic->intr[intr]);
}
@@ -1314,6 +1381,42 @@ static void enic_synchronize_irqs(struct enic *enic)
}
}
+static void enic_set_rx_coal_setting(struct enic *enic)
+{
+ unsigned int speed;
+ int index = -1;
+ struct enic_rx_coal *rx_coal = &enic->rx_coalesce_setting;
+
+ /* If intr mode is not MSIX, do not do adaptive coalescing */
+ if (VNIC_DEV_INTR_MODE_MSIX != vnic_dev_get_intr_mode(enic->vdev)) {
+ netdev_info(enic->netdev, "INTR mode is not MSIX, Not initializing adaptive coalescing");
+ return;
+ }
+
+ /* 1. Read the link speed from fw
+ * 2. Pick the default range for the speed
+ * 3. Update it in enic->rx_coalesce_setting
+ */
+ speed = vnic_dev_port_speed(enic->vdev);
+ if (ENIC_LINK_SPEED_10G < speed)
+ index = ENIC_LINK_40G_INDEX;
+ else if (ENIC_LINK_SPEED_4G < speed)
+ index = ENIC_LINK_10G_INDEX;
+ else
+ index = ENIC_LINK_4G_INDEX;
+
+ rx_coal->small_pkt_range_start = mod_range[index].small_pkt_range_start;
+ rx_coal->large_pkt_range_start = mod_range[index].large_pkt_range_start;
+ rx_coal->range_end = ENIC_RX_COALESCE_RANGE_END;
+
+ /* Start with the value provided by UCSM */
+ for (index = 0; index < enic->rq_count; index++)
+ enic->cq[index].cur_rx_coal_timeval =
+ enic->config.intr_timer_usec;
+
+ rx_coal->use_adaptive_rx_coalesce = 1;
+}
+
static int enic_dev_notify_set(struct enic *enic)
{
int err;
@@ -2231,6 +2334,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
enic->notify_timer.function = enic_notify_timer;
enic->notify_timer.data = (unsigned long)enic;
+ enic_set_rx_coal_setting(enic);
INIT_WORK(&enic->reset, enic_reset);
INIT_WORK(&enic->change_mtu_work, enic_change_mtu_work);
@@ -2250,6 +2354,9 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
enic->tx_coalesce_usecs = enic->config.intr_timer_usec;
+ /* rx coalesce time already got initialized. This gets used
+ * if adaptive coal is turned off
+ */
enic->rx_coalesce_usecs = enic->tx_coalesce_usecs;
if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
diff --git a/drivers/net/ethernet/cisco/enic/vnic_cq.h b/drivers/net/ethernet/cisco/enic/vnic_cq.h
index 579315c..4e6aa65 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_cq.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_cq.h
@@ -50,6 +50,11 @@ struct vnic_cq_ctrl {
u32 pad10;
};
+struct vnic_rx_bytes_counter {
+ unsigned int small_pkt_bytes_cnt;
+ unsigned int large_pkt_bytes_cnt;
+};
+
struct vnic_cq {
unsigned int index;
struct vnic_dev *vdev;
@@ -58,6 +63,10 @@ struct vnic_cq {
unsigned int to_clean;
unsigned int last_color;
unsigned int interrupt_offset;
+ struct vnic_rx_bytes_counter pkt_size_counter;
+ unsigned int cur_rx_coal_timeval;
+ unsigned int tobe_rx_coal_timeval;
+ ktime_t prev_ts;
};
static inline unsigned int vnic_cq_service(struct vnic_cq *cq,
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c
index 69dd925..e86a45c 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c
@@ -657,7 +657,7 @@ int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
return err;
}
-int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
+int vnic_dev_add_addr(struct vnic_dev *vdev, const u8 *addr)
{
u64 a0 = 0, a1 = 0;
int wait = 1000;
@@ -674,7 +674,7 @@ int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr)
return err;
}
-int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr)
+int vnic_dev_del_addr(struct vnic_dev *vdev, const u8 *addr)
{
u64 a0 = 0, a1 = 0;
int wait = 1000;
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.h b/drivers/net/ethernet/cisco/enic/vnic_dev.h
index e670029..1f3b301 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.h
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.h
@@ -95,8 +95,8 @@ int vnic_dev_stats_dump(struct vnic_dev *vdev, struct vnic_stats **stats);
int vnic_dev_hang_notify(struct vnic_dev *vdev);
int vnic_dev_packet_filter(struct vnic_dev *vdev, int directed, int multicast,
int broadcast, int promisc, int allmulti);
-int vnic_dev_add_addr(struct vnic_dev *vdev, u8 *addr);
-int vnic_dev_del_addr(struct vnic_dev *vdev, u8 *addr);
+int vnic_dev_add_addr(struct vnic_dev *vdev, const u8 *addr);
+int vnic_dev_del_addr(struct vnic_dev *vdev, const u8 *addr);
int vnic_dev_get_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
int vnic_dev_notify_set(struct vnic_dev *vdev, u16 intr);
int vnic_dev_notify_unset(struct vnic_dev *vdev);
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 8c4b93b..13723c9 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -109,6 +109,7 @@ typedef struct board_info {
u8 imr_all;
unsigned int flags;
+ unsigned int in_timeout:1;
unsigned int in_suspend:1;
unsigned int wake_supported:1;
@@ -187,13 +188,13 @@ dm9000_reset(board_info_t *db)
* The essential point is that we have to do a double reset, and the
* instruction is to set LBK into MAC internal loopback mode.
*/
- iow(db, DM9000_NCR, 0x03);
+ iow(db, DM9000_NCR, NCR_RST | NCR_MAC_LBK);
udelay(100); /* Application note says at least 20 us */
if (ior(db, DM9000_NCR) & 1)
dev_err(db->dev, "dm9000 did not respond to first reset\n");
iow(db, DM9000_NCR, 0);
- iow(db, DM9000_NCR, 0x03);
+ iow(db, DM9000_NCR, NCR_RST | NCR_MAC_LBK);
udelay(100);
if (ior(db, DM9000_NCR) & 1)
dev_err(db->dev, "dm9000 did not respond to second reset\n");
@@ -273,7 +274,7 @@ static void dm9000_dumpblk_32bit(void __iomem *reg, int count)
*/
static void dm9000_msleep(board_info_t *db, unsigned int ms)
{
- if (db->in_suspend)
+ if (db->in_suspend || db->in_timeout)
mdelay(ms);
else
msleep(ms);
@@ -334,7 +335,8 @@ dm9000_phy_write(struct net_device *dev,
unsigned long reg_save;
dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value);
- mutex_lock(&db->addr_lock);
+ if (!db->in_timeout)
+ mutex_lock(&db->addr_lock);
spin_lock_irqsave(&db->lock, flags);
@@ -365,7 +367,8 @@ dm9000_phy_write(struct net_device *dev,
writeb(reg_save, db->io_addr);
spin_unlock_irqrestore(&db->lock, flags);
- mutex_unlock(&db->addr_lock);
+ if (!db->in_timeout)
+ mutex_unlock(&db->addr_lock);
}
/* dm9000_set_io
@@ -882,6 +885,18 @@ dm9000_hash_table(struct net_device *dev)
spin_unlock_irqrestore(&db->lock, flags);
}
+static void
+dm9000_mask_interrupts(board_info_t *db)
+{
+ iow(db, DM9000_IMR, IMR_PAR);
+}
+
+static void
+dm9000_unmask_interrupts(board_info_t *db)
+{
+ iow(db, DM9000_IMR, db->imr_all);
+}
+
/*
* Initialize dm9000 board
*/
@@ -894,6 +909,9 @@ dm9000_init_dm9000(struct net_device *dev)
dm9000_dbg(db, 1, "entering %s\n", __func__);
+ dm9000_reset(db);
+ dm9000_mask_interrupts(db);
+
/* I/O mode */
db->io_mode = ior(db, DM9000_ISR) >> 6; /* ISR bit7:6 keeps I/O mode */
@@ -941,9 +959,6 @@ dm9000_init_dm9000(struct net_device *dev)
db->imr_all = imr;
- /* Enable TX/RX interrupt mask */
- iow(db, DM9000_IMR, imr);
-
/* Init Driver variable */
db->tx_pkt_cnt = 0;
db->queue_pkt_len = 0;
@@ -959,17 +974,19 @@ static void dm9000_timeout(struct net_device *dev)
/* Save previous register address */
spin_lock_irqsave(&db->lock, flags);
+ db->in_timeout = 1;
reg_save = readb(db->io_addr);
netif_stop_queue(dev);
- dm9000_reset(db);
dm9000_init_dm9000(dev);
+ dm9000_unmask_interrupts(db);
/* We can accept TX packets again */
dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
/* Restore previous register address */
writeb(reg_save, db->io_addr);
+ db->in_timeout = 0;
spin_unlock_irqrestore(&db->lock, flags);
}
@@ -1093,7 +1110,6 @@ dm9000_rx(struct net_device *dev)
if (rxbyte & DM9000_PKT_ERR) {
dev_warn(db->dev, "status check fail: %d\n", rxbyte);
iow(db, DM9000_RCR, 0x00); /* Stop Device */
- iow(db, DM9000_ISR, IMR_PAR); /* Stop INT request */
return;
}
@@ -1193,9 +1209,7 @@ static irqreturn_t dm9000_interrupt(int irq, void *dev_id)
/* Save previous register address */
reg_save = readb(db->io_addr);
- /* Disable all interrupts */
- iow(db, DM9000_IMR, IMR_PAR);
-
+ dm9000_mask_interrupts(db);
/* Got DM9000 interrupt status */
int_status = ior(db, DM9000_ISR); /* Got ISR */
iow(db, DM9000_ISR, int_status); /* Clear ISR status */
@@ -1218,9 +1232,7 @@ static irqreturn_t dm9000_interrupt(int irq, void *dev_id)
}
}
- /* Re-enable interrupt mask */
- iow(db, DM9000_IMR, db->imr_all);
-
+ dm9000_unmask_interrupts(db);
/* Restore previous register address */
writeb(reg_save, db->io_addr);
@@ -1292,6 +1304,9 @@ dm9000_open(struct net_device *dev)
* may work, and tell the user that this is a problem */
if (irqflags == IRQF_TRIGGER_NONE)
+ irqflags = irq_get_trigger_type(dev->irq);
+
+ if (irqflags == IRQF_TRIGGER_NONE)
dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n");
irqflags |= IRQF_SHARED;
@@ -1301,11 +1316,14 @@ dm9000_open(struct net_device *dev)
mdelay(1); /* delay needs by DM9000B */
/* Initialize DM9000 board */
- dm9000_reset(db);
dm9000_init_dm9000(dev);
if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev))
return -EAGAIN;
+ /* Now that we have an interrupt handler hooked up we can unmask
+ * our interrupts
+ */
+ dm9000_unmask_interrupts(db);
/* Init driver variable */
db->dbug_cnt = 0;
@@ -1313,7 +1331,8 @@ dm9000_open(struct net_device *dev)
mii_check_media(&db->mii, netif_msg_link(db), 1);
netif_start_queue(dev);
- dm9000_schedule_poll(db);
+ /* Poll initial link status */
+ schedule_delayed_work(&db->phy_poll, 1);
return 0;
}
@@ -1326,7 +1345,7 @@ dm9000_shutdown(struct net_device *dev)
/* RESET device */
dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */
iow(db, DM9000_GPR, 0x01); /* Power-Down PHY */
- iow(db, DM9000_IMR, IMR_PAR); /* Disable all interrupt */
+ dm9000_mask_interrupts(db);
iow(db, DM9000_RCR, 0x00); /* Disable RX */
}
@@ -1547,12 +1566,7 @@ dm9000_probe(struct platform_device *pdev)
db->flags |= DM9000_PLATF_SIMPLE_PHY;
#endif
- /* Fixing bug on dm9000_probe, takeover dm9000_reset(db),
- * Need 'NCR_MAC_LBK' bit to indeed stable our DM9000 fifo
- * while probe stage.
- */
-
- iow(db, DM9000_NCR, NCR_MAC_LBK | NCR_RST);
+ dm9000_reset(db);
/* try multiple times, DM9000 sometimes gets the read wrong */
for (i = 0; i < 8; i++) {
@@ -1695,8 +1709,8 @@ dm9000_drv_resume(struct device *dev)
/* reset if we were not in wake mode to ensure if
* the device was powered off it is in a known state */
if (!db->wake_state) {
- dm9000_reset(db);
dm9000_init_dm9000(ndev);
+ dm9000_unmask_interrupts(db);
}
netif_device_attach(ndev);
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index 1642de7..8616608 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -1703,7 +1703,7 @@ static int tulip_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
#ifdef CONFIG_TULIP_NAPI
netif_napi_add(dev, &tp->napi, tulip_poll, 16);
#endif
- SET_ETHTOOL_OPS(dev, &ops);
+ dev->ethtool_ops = &ops;
if (register_netdev(dev))
goto err_out_free_ring;
diff --git a/drivers/net/ethernet/dec/tulip/uli526x.c b/drivers/net/ethernet/dec/tulip/uli526x.c
index aa801a6..80afec3 100644
--- a/drivers/net/ethernet/dec/tulip/uli526x.c
+++ b/drivers/net/ethernet/dec/tulip/uli526x.c
@@ -962,8 +962,8 @@ ULi_ethtool_gset(struct uli526x_board_info *db, struct ethtool_cmd *ecmd)
}
if(db->link_failed)
{
- ethtool_cmd_speed_set(ecmd, -1);
- ecmd->duplex = -1;
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ ecmd->duplex = DUPLEX_UNKNOWN;
}
if (db->media_mode & ULI526X_AUTO)
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index 4fb756d..1274b6f 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -227,7 +227,7 @@ rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
}
dev->netdev_ops = &netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
- SET_ETHTOOL_OPS(dev, &ethtool_ops);
+ dev->ethtool_ops = &ethtool_ops;
#if 0
dev->features = NETIF_F_IP_CSUM;
#endif
@@ -1185,8 +1185,8 @@ static int rio_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
ethtool_cmd_speed_set(cmd, np->speed);
cmd->duplex = np->full_duplex ? DUPLEX_FULL : DUPLEX_HALF;
} else {
- ethtool_cmd_speed_set(cmd, -1);
- cmd->duplex = -1;
+ ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+ cmd->duplex = DUPLEX_UNKNOWN;
}
if ( np->an_enable)
cmd->autoneg = AUTONEG_ENABLE;
diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c
index d9e5ca0..433c1e1 100644
--- a/drivers/net/ethernet/dlink/sundance.c
+++ b/drivers/net/ethernet/dlink/sundance.c
@@ -577,7 +577,7 @@ static int sundance_probe1(struct pci_dev *pdev,
/* The chip-specific entries in the device structure. */
dev->netdev_ops = &netdev_ops;
- SET_ETHTOOL_OPS(dev, &ethtool_ops);
+ dev->ethtool_ops = &ethtool_ops;
dev->watchdog_timeo = TX_TIMEOUT;
pci_set_drvdata(pdev, dev);
diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c
index 4884205..056b44b 100644
--- a/drivers/net/ethernet/ec_bhf.c
+++ b/drivers/net/ethernet/ec_bhf.c
@@ -134,17 +134,17 @@ struct ec_bhf_priv {
struct pci_dev *dev;
- void * __iomem io;
- void * __iomem dma_io;
+ void __iomem *io;
+ void __iomem *dma_io;
struct hrtimer hrtimer;
int tx_dma_chan;
int rx_dma_chan;
- void * __iomem ec_io;
- void * __iomem fifo_io;
- void * __iomem mii_io;
- void * __iomem mac_io;
+ void __iomem *ec_io;
+ void __iomem *fifo_io;
+ void __iomem *mii_io;
+ void __iomem *mac_io;
struct bhf_dma rx_buf;
struct rx_desc *rx_descs;
@@ -297,7 +297,7 @@ static int ec_bhf_setup_offsets(struct ec_bhf_priv *priv)
{
struct device *dev = PRIV_TO_DEV(priv);
unsigned block_count, i;
- void * __iomem ec_info;
+ void __iomem *ec_info;
dev_dbg(dev, "Info block:\n");
dev_dbg(dev, "Type of function: %x\n", (unsigned)ioread16(priv->io));
@@ -569,8 +569,8 @@ static int ec_bhf_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
struct net_device *net_dev;
struct ec_bhf_priv *priv;
- void * __iomem dma_io;
- void * __iomem io;
+ void __iomem *dma_io;
+ void __iomem *io;
int err = 0;
err = pci_enable_device(dev);
@@ -615,7 +615,7 @@ static int ec_bhf_probe(struct pci_dev *dev, const struct pci_device_id *id)
}
net_dev = alloc_etherdev(sizeof(struct ec_bhf_priv));
- if (net_dev == 0) {
+ if (net_dev == NULL) {
err = -ENOMEM;
goto err_unmap_dma_io;
}
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 97db5a7..2e7c555 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -120,6 +120,9 @@ static inline char *nic_name(struct pci_dev *pdev)
#define MAX_VFS 30 /* Max VFs supported by BE3 FW */
#define FW_VER_LEN 32
+#define RSS_INDIR_TABLE_LEN 128
+#define RSS_HASH_KEY_LEN 40
+
struct be_dma_mem {
void *va;
dma_addr_t dma;
@@ -371,6 +374,7 @@ enum vf_state {
#define BE_FLAGS_LINK_STATUS_INIT 1
#define BE_FLAGS_WORKER_SCHEDULED (1 << 3)
#define BE_FLAGS_VLAN_PROMISC (1 << 4)
+#define BE_FLAGS_MCAST_PROMISC (1 << 5)
#define BE_FLAGS_NAPI_ENABLED (1 << 9)
#define BE_FLAGS_QNQ_ASYNC_EVT_RCVD (1 << 11)
#define BE_FLAGS_VXLAN_OFFLOADS (1 << 12)
@@ -409,6 +413,13 @@ struct be_resources {
u32 if_cap_flags;
};
+struct rss_info {
+ u64 rss_flags;
+ u8 rsstable[RSS_INDIR_TABLE_LEN];
+ u8 rss_queue[RSS_INDIR_TABLE_LEN];
+ u8 rss_hkey[RSS_HASH_KEY_LEN];
+};
+
struct be_adapter {
struct pci_dev *pdev;
struct net_device *netdev;
@@ -445,7 +456,7 @@ struct be_adapter {
struct be_drv_stats drv_stats;
struct be_aic_obj aic_obj[MAX_EVT_QS];
u16 vlans_added;
- u8 vlan_tag[VLAN_N_VID];
+ unsigned long vids[BITS_TO_LONGS(VLAN_N_VID)];
u8 vlan_prio_bmap; /* Available Priority BitMap */
u16 recommended_prio; /* Recommended Priority */
struct be_dma_mem rx_filter; /* Cmd DMA mem for rx-filter */
@@ -507,7 +518,7 @@ struct be_adapter {
u32 msg_enable;
int be_get_temp_freq;
u8 pf_number;
- u64 rss_flags;
+ struct rss_info rss_info;
};
#define be_physfn(adapter) (!adapter->virtfn)
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index d1ec15a..f4ea349 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -52,8 +52,7 @@ static struct be_cmd_priv_map cmd_priv_map[] = {
}
};
-static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode,
- u8 subsystem)
+static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode, u8 subsystem)
{
int i;
int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map);
@@ -120,21 +119,28 @@ static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1)
return (void *)addr;
}
-static int be_mcc_compl_process(struct be_adapter *adapter,
- struct be_mcc_compl *compl)
+static bool be_skip_err_log(u8 opcode, u16 base_status, u16 addl_status)
{
- u16 compl_status, extd_status;
- struct be_cmd_resp_hdr *resp_hdr;
- u8 opcode = 0, subsystem = 0;
-
- /* Just swap the status to host endian; mcc tag is opaquely copied
- * from mcc_wrb */
- be_dws_le_to_cpu(compl, 4);
-
- compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
- CQE_STATUS_COMPL_MASK;
+ if (base_status == MCC_STATUS_NOT_SUPPORTED ||
+ base_status == MCC_STATUS_ILLEGAL_REQUEST ||
+ addl_status == MCC_ADDL_STATUS_TOO_MANY_INTERFACES ||
+ (opcode == OPCODE_COMMON_WRITE_FLASHROM &&
+ (base_status == MCC_STATUS_ILLEGAL_FIELD ||
+ addl_status == MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH)))
+ return true;
+ else
+ return false;
+}
- resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1);
+/* Place holder for all the async MCC cmds wherein the caller is not in a busy
+ * loop (has not issued be_mcc_notify_wait())
+ */
+static void be_async_cmd_process(struct be_adapter *adapter,
+ struct be_mcc_compl *compl,
+ struct be_cmd_resp_hdr *resp_hdr)
+{
+ enum mcc_base_status base_status = base_status(compl->status);
+ u8 opcode = 0, subsystem = 0;
if (resp_hdr) {
opcode = resp_hdr->opcode;
@@ -144,61 +150,86 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
if (opcode == OPCODE_LOWLEVEL_LOOPBACK_TEST &&
subsystem == CMD_SUBSYSTEM_LOWLEVEL) {
complete(&adapter->et_cmd_compl);
- return 0;
+ return;
}
- if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) ||
- (opcode == OPCODE_COMMON_WRITE_OBJECT)) &&
- (subsystem == CMD_SUBSYSTEM_COMMON)) {
- adapter->flash_status = compl_status;
+ if ((opcode == OPCODE_COMMON_WRITE_FLASHROM ||
+ opcode == OPCODE_COMMON_WRITE_OBJECT) &&
+ subsystem == CMD_SUBSYSTEM_COMMON) {
+ adapter->flash_status = compl->status;
complete(&adapter->et_cmd_compl);
+ return;
}
- if (compl_status == MCC_STATUS_SUCCESS) {
- if (((opcode == OPCODE_ETH_GET_STATISTICS) ||
- (opcode == OPCODE_ETH_GET_PPORT_STATS)) &&
- (subsystem == CMD_SUBSYSTEM_ETH)) {
- be_parse_stats(adapter);
- adapter->stats_cmd_sent = false;
- }
- if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES &&
- subsystem == CMD_SUBSYSTEM_COMMON) {
+ if ((opcode == OPCODE_ETH_GET_STATISTICS ||
+ opcode == OPCODE_ETH_GET_PPORT_STATS) &&
+ subsystem == CMD_SUBSYSTEM_ETH &&
+ base_status == MCC_STATUS_SUCCESS) {
+ be_parse_stats(adapter);
+ adapter->stats_cmd_sent = false;
+ return;
+ }
+
+ if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES &&
+ subsystem == CMD_SUBSYSTEM_COMMON) {
+ if (base_status == MCC_STATUS_SUCCESS) {
struct be_cmd_resp_get_cntl_addnl_attribs *resp =
- (void *)resp_hdr;
+ (void *)resp_hdr;
adapter->drv_stats.be_on_die_temperature =
- resp->on_die_temperature;
- }
- } else {
- if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES)
+ resp->on_die_temperature;
+ } else {
adapter->be_get_temp_freq = 0;
+ }
+ return;
+ }
+}
+
+static int be_mcc_compl_process(struct be_adapter *adapter,
+ struct be_mcc_compl *compl)
+{
+ enum mcc_base_status base_status;
+ enum mcc_addl_status addl_status;
+ struct be_cmd_resp_hdr *resp_hdr;
+ u8 opcode = 0, subsystem = 0;
+
+ /* Just swap the status to host endian; mcc tag is opaquely copied
+ * from mcc_wrb */
+ be_dws_le_to_cpu(compl, 4);
+
+ base_status = base_status(compl->status);
+ addl_status = addl_status(compl->status);
+
+ resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1);
+ if (resp_hdr) {
+ opcode = resp_hdr->opcode;
+ subsystem = resp_hdr->subsystem;
+ }
+
+ be_async_cmd_process(adapter, compl, resp_hdr);
- if (compl_status == MCC_STATUS_NOT_SUPPORTED ||
- compl_status == MCC_STATUS_ILLEGAL_REQUEST)
- goto done;
+ if (base_status != MCC_STATUS_SUCCESS &&
+ !be_skip_err_log(opcode, base_status, addl_status)) {
- if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
+ if (base_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
dev_warn(&adapter->pdev->dev,
"VF is not privileged to issue opcode %d-%d\n",
opcode, subsystem);
} else {
- extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
- CQE_STATUS_EXTD_MASK;
dev_err(&adapter->pdev->dev,
"opcode %d-%d failed:status %d-%d\n",
- opcode, subsystem, compl_status, extd_status);
-
- if (extd_status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
- return extd_status;
+ opcode, subsystem, base_status, addl_status);
}
}
-done:
- return compl_status;
+ return compl->status;
}
/* Link state evt is a string of bytes; no need for endian swapping */
static void be_async_link_state_process(struct be_adapter *adapter,
- struct be_async_event_link_state *evt)
+ struct be_mcc_compl *compl)
{
+ struct be_async_event_link_state *evt =
+ (struct be_async_event_link_state *)compl;
+
/* When link status changes, link speed must be re-queried from FW */
adapter->phy.link_speed = -1;
@@ -221,8 +252,11 @@ static void be_async_link_state_process(struct be_adapter *adapter,
/* Grp5 CoS Priority evt */
static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
- struct be_async_event_grp5_cos_priority *evt)
+ struct be_mcc_compl *compl)
{
+ struct be_async_event_grp5_cos_priority *evt =
+ (struct be_async_event_grp5_cos_priority *)compl;
+
if (evt->valid) {
adapter->vlan_prio_bmap = evt->available_priority_bmap;
adapter->recommended_prio &= ~VLAN_PRIO_MASK;
@@ -233,8 +267,11 @@ static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
/* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */
static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
- struct be_async_event_grp5_qos_link_speed *evt)
+ struct be_mcc_compl *compl)
{
+ struct be_async_event_grp5_qos_link_speed *evt =
+ (struct be_async_event_grp5_qos_link_speed *)compl;
+
if (adapter->phy.link_speed >= 0 &&
evt->physical_port == adapter->port_num)
adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10;
@@ -242,8 +279,11 @@ static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
/*Grp5 PVID evt*/
static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
- struct be_async_event_grp5_pvid_state *evt)
+ struct be_mcc_compl *compl)
{
+ struct be_async_event_grp5_pvid_state *evt =
+ (struct be_async_event_grp5_pvid_state *)compl;
+
if (evt->enabled) {
adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
dev_info(&adapter->pdev->dev, "LPVID: %d\n", adapter->pvid);
@@ -253,26 +293,21 @@ static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
}
static void be_async_grp5_evt_process(struct be_adapter *adapter,
- u32 trailer, struct be_mcc_compl *evt)
+ struct be_mcc_compl *compl)
{
- u8 event_type = 0;
-
- event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
- ASYNC_TRAILER_EVENT_TYPE_MASK;
+ u8 event_type = (compl->flags >> ASYNC_EVENT_TYPE_SHIFT) &
+ ASYNC_EVENT_TYPE_MASK;
switch (event_type) {
case ASYNC_EVENT_COS_PRIORITY:
- be_async_grp5_cos_priority_process(adapter,
- (struct be_async_event_grp5_cos_priority *)evt);
- break;
+ be_async_grp5_cos_priority_process(adapter, compl);
+ break;
case ASYNC_EVENT_QOS_SPEED:
- be_async_grp5_qos_speed_process(adapter,
- (struct be_async_event_grp5_qos_link_speed *)evt);
- break;
+ be_async_grp5_qos_speed_process(adapter, compl);
+ break;
case ASYNC_EVENT_PVID_STATE:
- be_async_grp5_pvid_state_process(adapter,
- (struct be_async_event_grp5_pvid_state *)evt);
- break;
+ be_async_grp5_pvid_state_process(adapter, compl);
+ break;
default:
dev_warn(&adapter->pdev->dev, "Unknown grp5 event 0x%x!\n",
event_type);
@@ -281,13 +316,13 @@ static void be_async_grp5_evt_process(struct be_adapter *adapter,
}
static void be_async_dbg_evt_process(struct be_adapter *adapter,
- u32 trailer, struct be_mcc_compl *cmp)
+ struct be_mcc_compl *cmp)
{
u8 event_type = 0;
struct be_async_event_qnq *evt = (struct be_async_event_qnq *) cmp;
- event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
- ASYNC_TRAILER_EVENT_TYPE_MASK;
+ event_type = (cmp->flags >> ASYNC_EVENT_TYPE_SHIFT) &
+ ASYNC_EVENT_TYPE_MASK;
switch (event_type) {
case ASYNC_DEBUG_EVENT_TYPE_QNQ:
@@ -302,25 +337,33 @@ static void be_async_dbg_evt_process(struct be_adapter *adapter,
}
}
-static inline bool is_link_state_evt(u32 trailer)
+static inline bool is_link_state_evt(u32 flags)
{
- return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
- ASYNC_TRAILER_EVENT_CODE_MASK) ==
- ASYNC_EVENT_CODE_LINK_STATE;
+ return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
+ ASYNC_EVENT_CODE_LINK_STATE;
}
-static inline bool is_grp5_evt(u32 trailer)
+static inline bool is_grp5_evt(u32 flags)
{
- return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
- ASYNC_TRAILER_EVENT_CODE_MASK) ==
- ASYNC_EVENT_CODE_GRP_5);
+ return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
+ ASYNC_EVENT_CODE_GRP_5;
}
-static inline bool is_dbg_evt(u32 trailer)
+static inline bool is_dbg_evt(u32 flags)
{
- return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
- ASYNC_TRAILER_EVENT_CODE_MASK) ==
- ASYNC_EVENT_CODE_QNQ);
+ return ((flags >> ASYNC_EVENT_CODE_SHIFT) & ASYNC_EVENT_CODE_MASK) ==
+ ASYNC_EVENT_CODE_QNQ;
+}
+
+static void be_mcc_event_process(struct be_adapter *adapter,
+ struct be_mcc_compl *compl)
+{
+ if (is_link_state_evt(compl->flags))
+ be_async_link_state_process(adapter, compl);
+ else if (is_grp5_evt(compl->flags))
+ be_async_grp5_evt_process(adapter, compl);
+ else if (is_dbg_evt(compl->flags))
+ be_async_dbg_evt_process(adapter, compl);
}
static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
@@ -362,21 +405,13 @@ int be_process_mcc(struct be_adapter *adapter)
struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
spin_lock(&adapter->mcc_cq_lock);
+
while ((compl = be_mcc_compl_get(adapter))) {
if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
- /* Interpret flags as an async trailer */
- if (is_link_state_evt(compl->flags))
- be_async_link_state_process(adapter,
- (struct be_async_event_link_state *) compl);
- else if (is_grp5_evt(compl->flags))
- be_async_grp5_evt_process(adapter,
- compl->flags, compl);
- else if (is_dbg_evt(compl->flags))
- be_async_dbg_evt_process(adapter,
- compl->flags, compl);
+ be_mcc_event_process(adapter, compl);
} else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
- status = be_mcc_compl_process(adapter, compl);
- atomic_dec(&mcc_obj->q.used);
+ status = be_mcc_compl_process(adapter, compl);
+ atomic_dec(&mcc_obj->q.used);
}
be_mcc_compl_use(compl);
num++;
@@ -436,7 +471,9 @@ static int be_mcc_notify_wait(struct be_adapter *adapter)
if (status == -EIO)
goto out;
- status = resp->status;
+ status = (resp->base_status |
+ ((resp->addl_status & CQE_ADDL_STATUS_MASK) <<
+ CQE_ADDL_STATUS_SHIFT));
out:
return status;
}
@@ -560,10 +597,8 @@ static bool lancer_provisioning_error(struct be_adapter *adapter)
u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
- sliport_err1 = ioread32(adapter->db +
- SLIPORT_ERROR1_OFFSET);
- sliport_err2 = ioread32(adapter->db +
- SLIPORT_ERROR2_OFFSET);
+ sliport_err1 = ioread32(adapter->db + SLIPORT_ERROR1_OFFSET);
+ sliport_err2 = ioread32(adapter->db + SLIPORT_ERROR2_OFFSET);
if (sliport_err1 == SLIPORT_ERROR_NO_RESOURCE1 &&
sliport_err2 == SLIPORT_ERROR_NO_RESOURCE2)
@@ -630,8 +665,7 @@ int be_fw_wait_ready(struct be_adapter *adapter)
if (stage == POST_STAGE_ARMFW_RDY)
return 0;
- dev_info(dev, "Waiting for POST, %ds elapsed\n",
- timeout);
+ dev_info(dev, "Waiting for POST, %ds elapsed\n", timeout);
if (msleep_interruptible(2000)) {
dev_err(dev, "Waiting for POST aborted\n");
return -EINTR;
@@ -649,8 +683,7 @@ static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
return &wrb->payload.sgl[0];
}
-static inline void fill_wrb_tags(struct be_mcc_wrb *wrb,
- unsigned long addr)
+static inline void fill_wrb_tags(struct be_mcc_wrb *wrb, unsigned long addr)
{
wrb->tag0 = addr & 0xFFFFFFFF;
wrb->tag1 = upper_32_bits(addr);
@@ -659,8 +692,9 @@ static inline void fill_wrb_tags(struct be_mcc_wrb *wrb,
/* Don't touch the hdr after it's prepared */
/* mem will be NULL for embedded commands */
static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
- u8 subsystem, u8 opcode, int cmd_len,
- struct be_mcc_wrb *wrb, struct be_dma_mem *mem)
+ u8 subsystem, u8 opcode, int cmd_len,
+ struct be_mcc_wrb *wrb,
+ struct be_dma_mem *mem)
{
struct be_sge *sge;
@@ -683,7 +717,7 @@ static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
}
static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
- struct be_dma_mem *mem)
+ struct be_dma_mem *mem)
{
int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
u64 dma = (u64)mem->dma;
@@ -868,7 +902,8 @@ int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo)
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb,
+ NULL);
/* Support for EQ_CREATEv2 available only SH-R onwards */
if (!(BEx_chip(adapter) || lancer_chip(adapter)))
@@ -917,7 +952,8 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb,
+ NULL);
req->type = MAC_ADDRESS_TYPE_NETWORK;
if (permanent) {
req->permanent = 1;
@@ -940,7 +976,7 @@ err:
/* Uses synchronous MCCQ */
int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
- u32 if_id, u32 *pmac_id, u32 domain)
+ u32 if_id, u32 *pmac_id, u32 domain)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_pmac_add *req;
@@ -956,7 +992,8 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb,
+ NULL);
req->hdr.domain = domain;
req->if_id = cpu_to_le32(if_id);
@@ -1012,7 +1049,7 @@ err:
/* Uses Mbox */
int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
- struct be_queue_info *eq, bool no_delay, int coalesce_wm)
+ struct be_queue_info *eq, bool no_delay, int coalesce_wm)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_cq_create *req;
@@ -1028,17 +1065,18 @@ int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
ctxt = &req->context;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb,
+ NULL);
req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
if (BEx_chip(adapter)) {
AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
- coalesce_wm);
+ coalesce_wm);
AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
- ctxt, no_delay);
+ ctxt, no_delay);
AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
- __ilog2_u32(cq->len/256));
+ __ilog2_u32(cq->len / 256));
AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
@@ -1053,14 +1091,12 @@ int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
AMAP_SET_BITS(struct amap_cq_context_v2, coalescwm,
ctxt, coalesce_wm);
AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt,
- no_delay);
+ no_delay);
AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
- __ilog2_u32(cq->len/256));
+ __ilog2_u32(cq->len / 256));
AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
- AMAP_SET_BITS(struct amap_cq_context_v2, eventable,
- ctxt, 1);
- AMAP_SET_BITS(struct amap_cq_context_v2, eqid,
- ctxt, eq->id);
+ AMAP_SET_BITS(struct amap_cq_context_v2, eventable, ctxt, 1);
+ AMAP_SET_BITS(struct amap_cq_context_v2, eqid, ctxt, eq->id);
}
be_dws_cpu_to_le(ctxt, sizeof(req->context));
@@ -1088,8 +1124,8 @@ static u32 be_encoded_q_len(int q_len)
}
static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
- struct be_queue_info *mccq,
- struct be_queue_info *cq)
+ struct be_queue_info *mccq,
+ struct be_queue_info *cq)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_mcc_ext_create *req;
@@ -1105,13 +1141,14 @@ static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
ctxt = &req->context;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb,
+ NULL);
req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
if (BEx_chip(adapter)) {
AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
- be_encoded_q_len(mccq->len));
+ be_encoded_q_len(mccq->len));
AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
} else {
req->hdr.version = 1;
@@ -1145,8 +1182,8 @@ static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
}
static int be_cmd_mccq_org_create(struct be_adapter *adapter,
- struct be_queue_info *mccq,
- struct be_queue_info *cq)
+ struct be_queue_info *mccq,
+ struct be_queue_info *cq)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_mcc_create *req;
@@ -1162,13 +1199,14 @@ static int be_cmd_mccq_org_create(struct be_adapter *adapter,
ctxt = &req->context;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb,
+ NULL);
req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
- be_encoded_q_len(mccq->len));
+ be_encoded_q_len(mccq->len));
AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
be_dws_cpu_to_le(ctxt, sizeof(req->context));
@@ -1187,8 +1225,7 @@ static int be_cmd_mccq_org_create(struct be_adapter *adapter,
}
int be_cmd_mccq_create(struct be_adapter *adapter,
- struct be_queue_info *mccq,
- struct be_queue_info *cq)
+ struct be_queue_info *mccq, struct be_queue_info *cq)
{
int status;
@@ -1213,7 +1250,7 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
req = embedded_payload(&wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
- OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL);
+ OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL);
if (lancer_chip(adapter)) {
req->hdr.version = 1;
@@ -1250,8 +1287,8 @@ int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
/* Uses MCC */
int be_cmd_rxq_create(struct be_adapter *adapter,
- struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
- u32 if_id, u32 rss, u8 *rss_id)
+ struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
+ u32 if_id, u32 rss, u8 *rss_id)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_eth_rx_create *req;
@@ -1268,7 +1305,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
- OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
+ OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
req->cq_id = cpu_to_le16(cq_id);
req->frag_size = fls(frag_size) - 1;
@@ -1295,7 +1332,7 @@ err:
* Uses Mbox
*/
int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
- int queue_type)
+ int queue_type)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_q_destroy *req;
@@ -1334,7 +1371,7 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
}
be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb,
- NULL);
+ NULL);
req->id = cpu_to_le16(q->id);
status = be_mbox_notify_wait(adapter);
@@ -1361,7 +1398,7 @@ int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
- OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
+ OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
req->id = cpu_to_le16(q->id);
status = be_mcc_notify_wait(adapter);
@@ -1384,7 +1421,8 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
req = embedded_payload(&wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req), &wrb, NULL);
+ OPCODE_COMMON_NTWK_INTERFACE_CREATE,
+ sizeof(*req), &wrb, NULL);
req->hdr.domain = domain;
req->capability_flags = cpu_to_le32(cap_flags);
req->enable_flags = cpu_to_le32(en_flags);
@@ -1422,7 +1460,8 @@ int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_NTWK_INTERFACE_DESTROY,
+ sizeof(*req), wrb, NULL);
req->hdr.domain = domain;
req->interface_id = cpu_to_le32(interface_id);
@@ -1452,7 +1491,8 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
hdr = nonemb_cmd->va;
be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
- OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, nonemb_cmd);
+ OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb,
+ nonemb_cmd);
/* version 1 of the cmd is not supported only by BE2 */
if (BE2_chip(adapter))
@@ -1472,7 +1512,7 @@ err:
/* Lancer Stats */
int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
- struct be_dma_mem *nonemb_cmd)
+ struct be_dma_mem *nonemb_cmd)
{
struct be_mcc_wrb *wrb;
@@ -1493,8 +1533,8 @@ int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
req = nonemb_cmd->va;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
- OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size, wrb,
- nonemb_cmd);
+ OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size,
+ wrb, nonemb_cmd);
req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
req->cmd_params.params.reset_stats = 0;
@@ -1553,7 +1593,8 @@ int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_NTWK_LINK_STATUS_QUERY,
+ sizeof(*req), wrb, NULL);
/* version 1 of the cmd is not supported only by BE2 */
if (!BE2_chip(adapter))
@@ -1598,8 +1639,8 @@ int be_cmd_get_die_temperature(struct be_adapter *adapter)
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req),
- wrb, NULL);
+ OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES,
+ sizeof(*req), wrb, NULL);
be_mcc_notify(adapter);
@@ -1625,7 +1666,8 @@ int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb,
+ NULL);
req->fat_operation = cpu_to_le32(QUERY_FAT);
status = be_mcc_notify_wait(adapter);
if (!status) {
@@ -1655,8 +1697,8 @@ void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
- get_fat_cmd.size,
- &get_fat_cmd.dma);
+ get_fat_cmd.size,
+ &get_fat_cmd.dma);
if (!get_fat_cmd.va) {
status = -ENOMEM;
dev_err(&adapter->pdev->dev,
@@ -1679,8 +1721,8 @@ void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_MANAGE_FAT, payload_len, wrb,
- &get_fat_cmd);
+ OPCODE_COMMON_MANAGE_FAT, payload_len,
+ wrb, &get_fat_cmd);
req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
req->read_log_offset = cpu_to_le32(log_offset);
@@ -1691,8 +1733,8 @@ void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
if (!status) {
struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
memcpy(buf + offset,
- resp->data_buffer,
- le32_to_cpu(resp->read_log_length));
+ resp->data_buffer,
+ le32_to_cpu(resp->read_log_length));
} else {
dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
goto err;
@@ -1702,14 +1744,13 @@ void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
}
err:
pci_free_consistent(adapter->pdev, get_fat_cmd.size,
- get_fat_cmd.va,
- get_fat_cmd.dma);
+ get_fat_cmd.va, get_fat_cmd.dma);
spin_unlock_bh(&adapter->mcc_lock);
}
/* Uses synchronous mcc */
int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
- char *fw_on_flash)
+ char *fw_on_flash)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_get_fw_version *req;
@@ -1726,7 +1767,8 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb,
+ NULL);
status = be_mcc_notify_wait(adapter);
if (!status) {
struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
@@ -1759,7 +1801,8 @@ int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *set_eqd,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb,
+ NULL);
req->num_eq = cpu_to_le32(num);
for (i = 0; i < num; i++) {
@@ -1777,7 +1820,7 @@ err:
/* Uses sycnhronous mcc */
int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
- u32 num, bool promiscuous)
+ u32 num)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_vlan_config *req;
@@ -1793,19 +1836,16 @@ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req),
+ wrb, NULL);
req->interface_id = if_id;
- req->promiscuous = promiscuous;
req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0;
req->num_vlan = num;
- if (!promiscuous) {
- memcpy(req->normal_vlan, vtag_array,
- req->num_vlan * sizeof(vtag_array[0]));
- }
+ memcpy(req->normal_vlan, vtag_array,
+ req->num_vlan * sizeof(vtag_array[0]));
status = be_mcc_notify_wait(adapter);
-
err:
spin_unlock_bh(&adapter->mcc_lock);
return status;
@@ -1827,18 +1867,19 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
}
memset(req, 0, sizeof(*req));
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
- wrb, mem);
+ OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
+ wrb, mem);
req->if_id = cpu_to_le32(adapter->if_handle);
if (flags & IFF_PROMISC) {
req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
- BE_IF_FLAGS_VLAN_PROMISCUOUS |
- BE_IF_FLAGS_MCAST_PROMISCUOUS);
+ BE_IF_FLAGS_VLAN_PROMISCUOUS |
+ BE_IF_FLAGS_MCAST_PROMISCUOUS);
if (value == ON)
- req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
- BE_IF_FLAGS_VLAN_PROMISCUOUS |
- BE_IF_FLAGS_MCAST_PROMISCUOUS);
+ req->if_flags =
+ cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
+ BE_IF_FLAGS_VLAN_PROMISCUOUS |
+ BE_IF_FLAGS_MCAST_PROMISCUOUS);
} else if (flags & IFF_ALLMULTI) {
req->if_flags_mask = req->if_flags =
cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
@@ -1867,7 +1908,7 @@ int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
}
if ((req->if_flags_mask & cpu_to_le32(be_if_cap_flags(adapter))) !=
- req->if_flags_mask) {
+ req->if_flags_mask) {
dev_warn(&adapter->pdev->dev,
"Cannot set rx filter flags 0x%x\n",
req->if_flags_mask);
@@ -1905,7 +1946,8 @@ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req),
+ wrb, NULL);
req->tx_flow_control = cpu_to_le16((u16)tx_fc);
req->rx_flow_control = cpu_to_le16((u16)rx_fc);
@@ -1938,7 +1980,8 @@ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req),
+ wrb, NULL);
status = be_mcc_notify_wait(adapter);
if (!status) {
@@ -1968,7 +2011,8 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_QUERY_FIRMWARE_CONFIG,
+ sizeof(*req), wrb, NULL);
status = be_mbox_notify_wait(adapter);
if (!status) {
@@ -2011,7 +2055,8 @@ int be_cmd_reset_function(struct be_adapter *adapter)
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb,
+ NULL);
status = be_mbox_notify_wait(adapter);
@@ -2020,47 +2065,47 @@ int be_cmd_reset_function(struct be_adapter *adapter)
}
int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
- u32 rss_hash_opts, u16 table_size)
+ u32 rss_hash_opts, u16 table_size, const u8 *rss_hkey)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_rss_config *req;
- u32 myhash[10] = {0x15d43fa5, 0x2534685a, 0x5f87693a, 0x5668494e,
- 0x33cf6a53, 0x383334c6, 0x76ac4257, 0x59b242b2,
- 0x3ea83c02, 0x4a110304};
int status;
if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
return 0;
- if (mutex_lock_interruptible(&adapter->mbox_lock))
- return -1;
+ spin_lock_bh(&adapter->mcc_lock);
- wrb = wrb_from_mbox(adapter);
+ wrb = wrb_from_mccq(adapter);
+ if (!wrb) {
+ status = -EBUSY;
+ goto err;
+ }
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
- OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
+ OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
req->if_id = cpu_to_le32(adapter->if_handle);
req->enable_rss = cpu_to_le16(rss_hash_opts);
req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
- if (lancer_chip(adapter) || skyhawk_chip(adapter))
+ if (!BEx_chip(adapter))
req->hdr.version = 1;
memcpy(req->cpu_table, rsstable, table_size);
- memcpy(req->hash, myhash, sizeof(myhash));
+ memcpy(req->hash, rss_hkey, RSS_HASH_KEY_LEN);
be_dws_cpu_to_le(req->hash, sizeof(req->hash));
- status = be_mbox_notify_wait(adapter);
-
- mutex_unlock(&adapter->mbox_lock);
+ status = be_mcc_notify_wait(adapter);
+err:
+ spin_unlock_bh(&adapter->mcc_lock);
return status;
}
/* Uses sync mcc */
int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
- u8 bcn, u8 sts, u8 state)
+ u8 bcn, u8 sts, u8 state)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_enable_disable_beacon *req;
@@ -2076,7 +2121,8 @@ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_ENABLE_DISABLE_BEACON,
+ sizeof(*req), wrb, NULL);
req->port_num = port_num;
req->beacon_state = state;
@@ -2107,7 +2153,8 @@ int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req),
+ wrb, NULL);
req->port_num = port_num;
@@ -2146,20 +2193,20 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_WRITE_OBJECT,
- sizeof(struct lancer_cmd_req_write_object), wrb,
- NULL);
+ OPCODE_COMMON_WRITE_OBJECT,
+ sizeof(struct lancer_cmd_req_write_object), wrb,
+ NULL);
ctxt = &req->context;
AMAP_SET_BITS(struct amap_lancer_write_obj_context,
- write_length, ctxt, data_size);
+ write_length, ctxt, data_size);
if (data_size == 0)
AMAP_SET_BITS(struct amap_lancer_write_obj_context,
- eof, ctxt, 1);
+ eof, ctxt, 1);
else
AMAP_SET_BITS(struct amap_lancer_write_obj_context,
- eof, ctxt, 0);
+ eof, ctxt, 0);
be_dws_cpu_to_le(ctxt, sizeof(req->context));
req->write_offset = cpu_to_le32(data_offset);
@@ -2167,8 +2214,8 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
req->descriptor_count = cpu_to_le32(1);
req->buf_len = cpu_to_le32(data_size);
req->addr_low = cpu_to_le32((cmd->dma +
- sizeof(struct lancer_cmd_req_write_object))
- & 0xFFFFFFFF);
+ sizeof(struct lancer_cmd_req_write_object))
+ & 0xFFFFFFFF);
req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
sizeof(struct lancer_cmd_req_write_object)));
@@ -2197,8 +2244,8 @@ err_unlock:
}
int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
- u32 data_size, u32 data_offset, const char *obj_name,
- u32 *data_read, u32 *eof, u8 *addn_status)
+ u32 data_size, u32 data_offset, const char *obj_name,
+ u32 *data_read, u32 *eof, u8 *addn_status)
{
struct be_mcc_wrb *wrb;
struct lancer_cmd_req_read_object *req;
@@ -2216,9 +2263,9 @@ int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_READ_OBJECT,
- sizeof(struct lancer_cmd_req_read_object), wrb,
- NULL);
+ OPCODE_COMMON_READ_OBJECT,
+ sizeof(struct lancer_cmd_req_read_object), wrb,
+ NULL);
req->desired_read_len = cpu_to_le32(data_size);
req->read_offset = cpu_to_le32(data_offset);
@@ -2244,7 +2291,7 @@ err_unlock:
}
int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
- u32 flash_type, u32 flash_opcode, u32 buf_size)
+ u32 flash_type, u32 flash_opcode, u32 buf_size)
{
struct be_mcc_wrb *wrb;
struct be_cmd_write_flashrom *req;
@@ -2261,7 +2308,8 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
req = cmd->va;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb, cmd);
+ OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb,
+ cmd);
req->params.op_type = cpu_to_le32(flash_type);
req->params.op_code = cpu_to_le32(flash_opcode);
@@ -2284,7 +2332,7 @@ err_unlock:
}
int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
- int offset)
+ u16 optype, int offset)
{
struct be_mcc_wrb *wrb;
struct be_cmd_read_flash_crc *req;
@@ -2303,7 +2351,7 @@ int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
OPCODE_COMMON_READ_FLASHROM, sizeof(*req),
wrb, NULL);
- req->params.op_type = cpu_to_le32(OPTYPE_REDBOOT);
+ req->params.op_type = cpu_to_le32(optype);
req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
req->params.offset = cpu_to_le32(offset);
req->params.data_buf_size = cpu_to_le32(0x4);
@@ -2318,7 +2366,7 @@ err:
}
int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
- struct be_dma_mem *nonemb_cmd)
+ struct be_dma_mem *nonemb_cmd)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_acpi_wol_magic_config *req;
@@ -2334,8 +2382,8 @@ int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
req = nonemb_cmd->va;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
- OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req), wrb,
- nonemb_cmd);
+ OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req),
+ wrb, nonemb_cmd);
memcpy(req->magic_mac, mac, ETH_ALEN);
status = be_mcc_notify_wait(adapter);
@@ -2363,8 +2411,8 @@ int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
- OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req), wrb,
- NULL);
+ OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req),
+ wrb, NULL);
req->src_port = port_num;
req->dest_port = port_num;
@@ -2378,7 +2426,8 @@ err:
}
int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
- u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
+ u32 loopback_type, u32 pkt_size, u32 num_pkts,
+ u64 pattern)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_loopback_test *req;
@@ -2396,7 +2445,8 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
- OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL);
+ OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb,
+ NULL);
req->hdr.timeout = cpu_to_le32(15);
req->pattern = cpu_to_le64(pattern);
@@ -2421,7 +2471,7 @@ err:
}
int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
- u32 byte_cnt, struct be_dma_mem *cmd)
+ u32 byte_cnt, struct be_dma_mem *cmd)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_ddrdma_test *req;
@@ -2437,7 +2487,8 @@ int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
}
req = cmd->va;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
- OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb, cmd);
+ OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb,
+ cmd);
req->pattern = cpu_to_le64(pattern);
req->byte_count = cpu_to_le32(byte_cnt);
@@ -2465,7 +2516,7 @@ err:
}
int be_cmd_get_seeprom_data(struct be_adapter *adapter,
- struct be_dma_mem *nonemb_cmd)
+ struct be_dma_mem *nonemb_cmd)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_seeprom_read *req;
@@ -2481,8 +2532,8 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
req = nonemb_cmd->va;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
- nonemb_cmd);
+ OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
+ nonemb_cmd);
status = be_mcc_notify_wait(adapter);
@@ -2510,8 +2561,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
goto err;
}
cmd.size = sizeof(struct be_cmd_req_get_phy_info);
- cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
- &cmd.dma);
+ cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
if (!cmd.va) {
dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
status = -ENOMEM;
@@ -2521,8 +2571,8 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
req = cmd.va;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
- wrb, &cmd);
+ OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
+ wrb, &cmd);
status = be_mcc_notify_wait(adapter);
if (!status) {
@@ -2544,8 +2594,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
BE_SUPPORTED_SPEED_1GBPS;
}
}
- pci_free_consistent(adapter->pdev, cmd.size,
- cmd.va, cmd.dma);
+ pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
err:
spin_unlock_bh(&adapter->mcc_lock);
return status;
@@ -2568,7 +2617,7 @@ int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
req->hdr.domain = domain;
req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
@@ -2597,10 +2646,9 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
- &attribs_cmd.dma);
+ &attribs_cmd.dma);
if (!attribs_cmd.va) {
- dev_err(&adapter->pdev->dev,
- "Memory allocation failure\n");
+ dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
status = -ENOMEM;
goto err;
}
@@ -2613,8 +2661,8 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
req = attribs_cmd.va;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len, wrb,
- &attribs_cmd);
+ OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len,
+ wrb, &attribs_cmd);
status = be_mbox_notify_wait(adapter);
if (!status) {
@@ -2649,7 +2697,8 @@ int be_cmd_req_native_mode(struct be_adapter *adapter)
req = embedded_payload(wrb);
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP,
+ sizeof(*req), wrb, NULL);
req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
CAPABILITY_BE3_NATIVE_ERX_API);
@@ -2762,12 +2811,12 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev,
- get_mac_list_cmd.size,
- &get_mac_list_cmd.dma);
+ get_mac_list_cmd.size,
+ &get_mac_list_cmd.dma);
if (!get_mac_list_cmd.va) {
dev_err(&adapter->pdev->dev,
- "Memory allocation failure during GET_MAC_LIST\n");
+ "Memory allocation failure during GET_MAC_LIST\n");
return -ENOMEM;
}
@@ -2831,18 +2880,18 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
/* If no active mac_id found, return first mac addr */
*pmac_id_valid = false;
memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
- ETH_ALEN);
+ ETH_ALEN);
}
out:
spin_unlock_bh(&adapter->mcc_lock);
pci_free_consistent(adapter->pdev, get_mac_list_cmd.size,
- get_mac_list_cmd.va, get_mac_list_cmd.dma);
+ get_mac_list_cmd.va, get_mac_list_cmd.dma);
return status;
}
-int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id, u8 *mac,
- u32 if_handle, bool active, u32 domain)
+int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id,
+ u8 *mac, u32 if_handle, bool active, u32 domain)
{
if (!active)
@@ -2892,7 +2941,7 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
memset(&cmd, 0, sizeof(struct be_dma_mem));
cmd.size = sizeof(struct be_cmd_req_set_mac_list);
cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
- &cmd.dma, GFP_KERNEL);
+ &cmd.dma, GFP_KERNEL);
if (!cmd.va)
return -ENOMEM;
@@ -2906,8 +2955,8 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
req = cmd.va;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
- wrb, &cmd);
+ OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
+ wrb, &cmd);
req->hdr.domain = domain;
req->mac_count = mac_count;
@@ -2917,8 +2966,7 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
status = be_mcc_notify_wait(adapter);
err:
- dma_free_coherent(&adapter->pdev->dev, cmd.size,
- cmd.va, cmd.dma);
+ dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
spin_unlock_bh(&adapter->mcc_lock);
return status;
}
@@ -2963,7 +3011,8 @@ int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
ctxt = &req->context;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb,
+ NULL);
req->hdr.domain = domain;
AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id);
@@ -3009,7 +3058,8 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
ctxt = &req->context;
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
- OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb, NULL);
+ OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb,
+ NULL);
req->hdr.domain = domain;
AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
@@ -3027,10 +3077,9 @@ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
if (!status) {
struct be_cmd_resp_get_hsw_config *resp =
embedded_payload(wrb);
- be_dws_le_to_cpu(&resp->context,
- sizeof(resp->context));
+ be_dws_le_to_cpu(&resp->context, sizeof(resp->context));
vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
- pvid, &resp->context);
+ pvid, &resp->context);
if (pvid)
*pvid = le16_to_cpu(vid);
if (mode)
@@ -3062,11 +3111,9 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
memset(&cmd, 0, sizeof(struct be_dma_mem));
cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
- cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
- &cmd.dma);
+ cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
if (!cmd.va) {
- dev_err(&adapter->pdev->dev,
- "Memory allocation failure\n");
+ dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
status = -ENOMEM;
goto err;
}
@@ -3349,8 +3396,7 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
memset(&cmd, 0, sizeof(struct be_dma_mem));
cmd.size = sizeof(struct be_cmd_resp_get_func_config);
- cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
- &cmd.dma);
+ cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
if (!cmd.va) {
dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
status = -ENOMEM;
@@ -3396,7 +3442,7 @@ err:
/* Uses mbox */
static int be_cmd_get_profile_config_mbox(struct be_adapter *adapter,
- u8 domain, struct be_dma_mem *cmd)
+ u8 domain, struct be_dma_mem *cmd)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_get_profile_config *req;
@@ -3424,7 +3470,7 @@ static int be_cmd_get_profile_config_mbox(struct be_adapter *adapter,
/* Uses sync mcc */
static int be_cmd_get_profile_config_mccq(struct be_adapter *adapter,
- u8 domain, struct be_dma_mem *cmd)
+ u8 domain, struct be_dma_mem *cmd)
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_get_profile_config *req;
@@ -3484,8 +3530,8 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
resp = cmd.va;
desc_count = le32_to_cpu(resp->desc_count);
- pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param,
- desc_count);
+ pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param,
+ desc_count);
if (pcie)
res->max_vfs = le16_to_cpu(pcie->num_vfs);
@@ -3548,33 +3594,47 @@ void be_reset_nic_desc(struct be_nic_res_desc *nic)
nic->cq_count = 0xFFFF;
nic->toe_conn_count = 0xFFFF;
nic->eq_count = 0xFFFF;
+ nic->iface_count = 0xFFFF;
nic->link_param = 0xFF;
+ nic->channel_id_param = cpu_to_le16(0xF000);
nic->acpi_params = 0xFF;
nic->wol_param = 0x0F;
- nic->bw_min = 0xFFFFFFFF;
+ nic->tunnel_iface_count = 0xFFFF;
+ nic->direct_tenant_iface_count = 0xFFFF;
nic->bw_max = 0xFFFFFFFF;
}
-int be_cmd_config_qos(struct be_adapter *adapter, u32 bps, u8 domain)
+int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed,
+ u8 domain)
{
- if (lancer_chip(adapter)) {
- struct be_nic_res_desc nic_desc;
+ struct be_nic_res_desc nic_desc;
+ u32 bw_percent;
+ u16 version = 0;
+
+ if (BE3_chip(adapter))
+ return be_cmd_set_qos(adapter, max_rate / 10, domain);
- be_reset_nic_desc(&nic_desc);
+ be_reset_nic_desc(&nic_desc);
+ nic_desc.pf_num = adapter->pf_number;
+ nic_desc.vf_num = domain;
+ if (lancer_chip(adapter)) {
nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0;
nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0;
nic_desc.flags = (1 << QUN_SHIFT) | (1 << IMM_SHIFT) |
(1 << NOSV_SHIFT);
- nic_desc.pf_num = adapter->pf_number;
- nic_desc.vf_num = domain;
- nic_desc.bw_max = cpu_to_le32(bps);
-
- return be_cmd_set_profile_config(adapter, &nic_desc,
- RESOURCE_DESC_SIZE_V0,
- 0, domain);
+ nic_desc.bw_max = cpu_to_le32(max_rate / 10);
} else {
- return be_cmd_set_qos(adapter, bps, domain);
+ version = 1;
+ nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1;
+ nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
+ nic_desc.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
+ bw_percent = max_rate ? (max_rate * 100) / link_speed : 100;
+ nic_desc.bw_max = cpu_to_le32(bw_percent);
}
+
+ return be_cmd_set_profile_config(adapter, &nic_desc,
+ nic_desc.hdr.desc_len,
+ version, domain);
}
int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op)
@@ -3859,7 +3919,7 @@ err:
}
int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
- int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
+ int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
{
struct be_adapter *adapter = netdev_priv(netdev_handle);
struct be_mcc_wrb *wrb;
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index b60e4d5..3e0a6b2 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -50,7 +50,7 @@ struct be_mcc_wrb {
#define CQE_FLAGS_CONSUMED_MASK (1 << 27)
/* Completion Status */
-enum {
+enum mcc_base_status {
MCC_STATUS_SUCCESS = 0,
MCC_STATUS_FAILED = 1,
MCC_STATUS_ILLEGAL_REQUEST = 2,
@@ -60,12 +60,25 @@ enum {
MCC_STATUS_NOT_SUPPORTED = 66
};
-#define MCC_ADDL_STS_INSUFFICIENT_RESOURCES 0x16
+/* Additional status */
+enum mcc_addl_status {
+ MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES = 0x16,
+ MCC_ADDL_STATUS_FLASH_IMAGE_CRC_MISMATCH = 0x4d,
+ MCC_ADDL_STATUS_TOO_MANY_INTERFACES = 0x4a
+};
+
+#define CQE_BASE_STATUS_MASK 0xFFFF
+#define CQE_BASE_STATUS_SHIFT 0 /* bits 0 - 15 */
+#define CQE_ADDL_STATUS_MASK 0xFF
+#define CQE_ADDL_STATUS_SHIFT 16 /* bits 16 - 31 */
-#define CQE_STATUS_COMPL_MASK 0xFFFF
-#define CQE_STATUS_COMPL_SHIFT 0 /* bits 0 - 15 */
-#define CQE_STATUS_EXTD_MASK 0xFFFF
-#define CQE_STATUS_EXTD_SHIFT 16 /* bits 16 - 31 */
+#define base_status(status) \
+ ((enum mcc_base_status) \
+ (status > 0 ? (status & CQE_BASE_STATUS_MASK) : 0))
+#define addl_status(status) \
+ ((enum mcc_addl_status) \
+ (status > 0 ? (status >> CQE_ADDL_STATUS_SHIFT) & \
+ CQE_ADDL_STATUS_MASK : 0))
struct be_mcc_compl {
u32 status; /* dword 0 */
@@ -74,13 +87,13 @@ struct be_mcc_compl {
u32 flags; /* dword 3 */
};
-/* When the async bit of mcc_compl is set, the last 4 bytes of
- * mcc_compl is interpreted as follows:
+/* When the async bit of mcc_compl flags is set, flags
+ * is interpreted as follows:
*/
-#define ASYNC_TRAILER_EVENT_CODE_SHIFT 8 /* bits 8 - 15 */
-#define ASYNC_TRAILER_EVENT_CODE_MASK 0xFF
-#define ASYNC_TRAILER_EVENT_TYPE_SHIFT 16
-#define ASYNC_TRAILER_EVENT_TYPE_MASK 0xFF
+#define ASYNC_EVENT_CODE_SHIFT 8 /* bits 8 - 15 */
+#define ASYNC_EVENT_CODE_MASK 0xFF
+#define ASYNC_EVENT_TYPE_SHIFT 16
+#define ASYNC_EVENT_TYPE_MASK 0xFF
#define ASYNC_EVENT_CODE_LINK_STATE 0x1
#define ASYNC_EVENT_CODE_GRP_5 0x5
#define ASYNC_EVENT_QOS_SPEED 0x1
@@ -89,10 +102,6 @@ struct be_mcc_compl {
#define ASYNC_EVENT_CODE_QNQ 0x6
#define ASYNC_DEBUG_EVENT_TYPE_QNQ 1
-struct be_async_event_trailer {
- u32 code;
-};
-
enum {
LINK_DOWN = 0x0,
LINK_UP = 0x1
@@ -100,7 +109,7 @@ enum {
#define LINK_STATUS_MASK 0x1
#define LOGICAL_LINK_STATUS_MASK 0x2
-/* When the event code of an async trailer is link-state, the mcc_compl
+/* When the event code of compl->flags is link-state, the mcc_compl
* must be interpreted as follows
*/
struct be_async_event_link_state {
@@ -110,10 +119,10 @@ struct be_async_event_link_state {
u8 port_speed;
u8 port_fault;
u8 rsvd0[7];
- struct be_async_event_trailer trailer;
+ u32 flags;
} __packed;
-/* When the event code of an async trailer is GRP-5 and event_type is QOS_SPEED
+/* When the event code of compl->flags is GRP-5 and event_type is QOS_SPEED
* the mcc_compl must be interpreted as follows
*/
struct be_async_event_grp5_qos_link_speed {
@@ -121,10 +130,10 @@ struct be_async_event_grp5_qos_link_speed {
u8 rsvd[5];
u16 qos_link_speed;
u32 event_tag;
- struct be_async_event_trailer trailer;
+ u32 flags;
} __packed;
-/* When the event code of an async trailer is GRP5 and event type is
+/* When the event code of compl->flags is GRP5 and event type is
* CoS-Priority, the mcc_compl must be interpreted as follows
*/
struct be_async_event_grp5_cos_priority {
@@ -134,10 +143,10 @@ struct be_async_event_grp5_cos_priority {
u8 valid;
u8 rsvd0;
u8 event_tag;
- struct be_async_event_trailer trailer;
+ u32 flags;
} __packed;
-/* When the event code of an async trailer is GRP5 and event type is
+/* When the event code of compl->flags is GRP5 and event type is
* PVID state, the mcc_compl must be interpreted as follows
*/
struct be_async_event_grp5_pvid_state {
@@ -146,7 +155,7 @@ struct be_async_event_grp5_pvid_state {
u16 tag;
u32 event_tag;
u32 rsvd1;
- struct be_async_event_trailer trailer;
+ u32 flags;
} __packed;
/* async event indicating outer VLAN tag in QnQ */
@@ -156,7 +165,7 @@ struct be_async_event_qnq {
u16 vlan_tag;
u32 event_tag;
u8 rsvd1[4];
- struct be_async_event_trailer trailer;
+ u32 flags;
} __packed;
struct be_mcc_mailbox {
@@ -258,8 +267,8 @@ struct be_cmd_resp_hdr {
u8 opcode; /* dword 0 */
u8 subsystem; /* dword 0 */
u8 rsvd[2]; /* dword 0 */
- u8 status; /* dword 1 */
- u8 add_status; /* dword 1 */
+ u8 base_status; /* dword 1 */
+ u8 addl_status; /* dword 1 */
u8 rsvd1[2]; /* dword 1 */
u32 response_length; /* dword 2 */
u32 actual_resp_len; /* dword 3 */
@@ -1186,7 +1195,8 @@ struct be_cmd_read_flash_crc {
struct flashrom_params params;
u8 crc[4];
u8 rsvd[4];
-};
+} __packed;
+
/**************** Lancer Firmware Flash ************/
struct amap_lancer_write_obj_context {
u8 write_length[24];
@@ -1891,16 +1901,20 @@ struct be_nic_res_desc {
u16 cq_count;
u16 toe_conn_count;
u16 eq_count;
- u32 rsvd5;
+ u16 vlan_id;
+ u16 iface_count;
u32 cap_flags;
u8 link_param;
- u8 rsvd6[3];
+ u8 rsvd6;
+ u16 channel_id_param;
u32 bw_min;
u32 bw_max;
u8 acpi_params;
u8 wol_param;
u16 rsvd7;
- u32 rsvd8[7];
+ u16 tunnel_iface_count;
+ u16 direct_tenant_iface_count;
+ u32 rsvd8[6];
} __packed;
/************ Multi-Channel type ***********/
@@ -2060,7 +2074,7 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
char *fw_on_flash);
int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num);
int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
- u32 num, bool promiscuous);
+ u32 num);
int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status);
int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc);
int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc);
@@ -2068,7 +2082,7 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
u32 *function_mode, u32 *function_caps, u16 *asic_rev);
int be_cmd_reset_function(struct be_adapter *adapter);
int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
- u32 rss_hash_opts, u16 table_size);
+ u32 rss_hash_opts, u16 table_size, const u8 *rss_hkey);
int be_process_mcc(struct be_adapter *adapter);
int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, u8 beacon,
u8 status, u8 state);
@@ -2084,7 +2098,7 @@ int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
u32 data_size, u32 data_offset, const char *obj_name,
u32 *data_read, u32 *eof, u8 *addn_status);
int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
- int offset);
+ u16 optype, int offset);
int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
struct be_dma_mem *nonemb_cmd);
int be_cmd_fw_init(struct be_adapter *adapter);
@@ -2101,7 +2115,8 @@ int be_cmd_get_seeprom_data(struct be_adapter *adapter,
int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
u8 loopback_type, u8 enable);
int be_cmd_get_phy_info(struct be_adapter *adapter);
-int be_cmd_config_qos(struct be_adapter *adapter, u32 bps, u8 domain);
+int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate,
+ u16 link_speed, u8 domain);
void be_detect_error(struct be_adapter *adapter);
int be_cmd_get_die_temperature(struct be_adapter *adapter);
int be_cmd_get_cntl_attributes(struct be_adapter *adapter);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 15ba96c..e2da4d2 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -132,6 +132,7 @@ static const struct be_ethtool_stat et_rx_stats[] = {
{DRVSTAT_RX_INFO(rx_bytes)},/* If moving this member see above note */
{DRVSTAT_RX_INFO(rx_pkts)}, /* If moving this member see above note */
{DRVSTAT_RX_INFO(rx_compl)},
+ {DRVSTAT_RX_INFO(rx_compl_err)},
{DRVSTAT_RX_INFO(rx_mcast_pkts)},
/* Number of page allocation failures while posting receive buffers
* to HW.
@@ -181,7 +182,7 @@ static const char et_self_tests[][ETH_GSTRING_LEN] = {
#define BE_NO_LOOPBACK 0xff
static void be_get_drvinfo(struct net_device *netdev,
- struct ethtool_drvinfo *drvinfo)
+ struct ethtool_drvinfo *drvinfo)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -201,8 +202,7 @@ static void be_get_drvinfo(struct net_device *netdev,
drvinfo->eedump_len = 0;
}
-static u32
-lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name)
+static u32 lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name)
{
u32 data_read = 0, eof;
u8 addn_status;
@@ -212,14 +212,14 @@ lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name)
memset(&data_len_cmd, 0, sizeof(data_len_cmd));
/* data_offset and data_size should be 0 to get reg len */
status = lancer_cmd_read_object(adapter, &data_len_cmd, 0, 0,
- file_name, &data_read, &eof, &addn_status);
+ file_name, &data_read, &eof,
+ &addn_status);
return data_read;
}
-static int
-lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
- u32 buf_len, void *buf)
+static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
+ u32 buf_len, void *buf)
{
struct be_dma_mem read_cmd;
u32 read_len = 0, total_read_len = 0, chunk_size;
@@ -229,11 +229,11 @@ lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
read_cmd.size = LANCER_READ_FILE_CHUNK;
read_cmd.va = pci_alloc_consistent(adapter->pdev, read_cmd.size,
- &read_cmd.dma);
+ &read_cmd.dma);
if (!read_cmd.va) {
dev_err(&adapter->pdev->dev,
- "Memory allocation failure while reading dump\n");
+ "Memory allocation failure while reading dump\n");
return -ENOMEM;
}
@@ -242,8 +242,8 @@ lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
LANCER_READ_FILE_CHUNK);
chunk_size = ALIGN(chunk_size, 4);
status = lancer_cmd_read_object(adapter, &read_cmd, chunk_size,
- total_read_len, file_name, &read_len,
- &eof, &addn_status);
+ total_read_len, file_name,
+ &read_len, &eof, &addn_status);
if (!status) {
memcpy(buf + total_read_len, read_cmd.va, read_len);
total_read_len += read_len;
@@ -254,13 +254,12 @@ lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
}
}
pci_free_consistent(adapter->pdev, read_cmd.size, read_cmd.va,
- read_cmd.dma);
+ read_cmd.dma);
return status;
}
-static int
-be_get_reg_len(struct net_device *netdev)
+static int be_get_reg_len(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
u32 log_size = 0;
@@ -271,7 +270,7 @@ be_get_reg_len(struct net_device *netdev)
if (be_physfn(adapter)) {
if (lancer_chip(adapter))
log_size = lancer_cmd_get_file_len(adapter,
- LANCER_FW_DUMP_FILE);
+ LANCER_FW_DUMP_FILE);
else
be_cmd_get_reg_len(adapter, &log_size);
}
@@ -287,7 +286,7 @@ be_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf)
memset(buf, 0, regs->len);
if (lancer_chip(adapter))
lancer_cmd_read_file(adapter, LANCER_FW_DUMP_FILE,
- regs->len, buf);
+ regs->len, buf);
else
be_cmd_get_regs(adapter, regs->len, buf);
}
@@ -337,9 +336,8 @@ static int be_set_coalesce(struct net_device *netdev,
return 0;
}
-static void
-be_get_ethtool_stats(struct net_device *netdev,
- struct ethtool_stats *stats, uint64_t *data)
+static void be_get_ethtool_stats(struct net_device *netdev,
+ struct ethtool_stats *stats, uint64_t *data)
{
struct be_adapter *adapter = netdev_priv(netdev);
struct be_rx_obj *rxo;
@@ -390,9 +388,8 @@ be_get_ethtool_stats(struct net_device *netdev,
}
}
-static void
-be_get_stat_strings(struct net_device *netdev, uint32_t stringset,
- uint8_t *data)
+static void be_get_stat_strings(struct net_device *netdev, uint32_t stringset,
+ uint8_t *data)
{
struct be_adapter *adapter = netdev_priv(netdev);
int i, j;
@@ -642,16 +639,15 @@ be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
adapter->rx_fc = ecmd->rx_pause;
status = be_cmd_set_flow_control(adapter,
- adapter->tx_fc, adapter->rx_fc);
+ adapter->tx_fc, adapter->rx_fc);
if (status)
dev_warn(&adapter->pdev->dev, "Pause param set failed.\n");
return status;
}
-static int
-be_set_phys_id(struct net_device *netdev,
- enum ethtool_phys_id_state state)
+static int be_set_phys_id(struct net_device *netdev,
+ enum ethtool_phys_id_state state)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -708,8 +704,7 @@ static int be_set_dump(struct net_device *netdev, struct ethtool_dump *dump)
return status;
}
-static void
-be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+static void be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -723,8 +718,7 @@ be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
memset(&wol->sopass, 0, sizeof(wol->sopass));
}
-static int
-be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
+static int be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -744,8 +738,7 @@ be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
return 0;
}
-static int
-be_test_ddr_dma(struct be_adapter *adapter)
+static int be_test_ddr_dma(struct be_adapter *adapter)
{
int ret, i;
struct be_dma_mem ddrdma_cmd;
@@ -761,7 +754,7 @@ be_test_ddr_dma(struct be_adapter *adapter)
for (i = 0; i < 2; i++) {
ret = be_cmd_ddr_dma_test(adapter, pattern[i],
- 4096, &ddrdma_cmd);
+ 4096, &ddrdma_cmd);
if (ret != 0)
goto err;
}
@@ -773,20 +766,17 @@ err:
}
static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type,
- u64 *status)
+ u64 *status)
{
- be_cmd_set_loopback(adapter, adapter->hba_port_num,
- loopback_type, 1);
+ be_cmd_set_loopback(adapter, adapter->hba_port_num, loopback_type, 1);
*status = be_cmd_loopback_test(adapter, adapter->hba_port_num,
- loopback_type, 1500,
- 2, 0xabc);
- be_cmd_set_loopback(adapter, adapter->hba_port_num,
- BE_NO_LOOPBACK, 1);
+ loopback_type, 1500, 2, 0xabc);
+ be_cmd_set_loopback(adapter, adapter->hba_port_num, BE_NO_LOOPBACK, 1);
return *status;
}
-static void
-be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
+static void be_self_test(struct net_device *netdev, struct ethtool_test *test,
+ u64 *data)
{
struct be_adapter *adapter = netdev_priv(netdev);
int status;
@@ -801,12 +791,10 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM);
if (test->flags & ETH_TEST_FL_OFFLINE) {
- if (be_loopback_test(adapter, BE_MAC_LOOPBACK,
- &data[0]) != 0)
+ if (be_loopback_test(adapter, BE_MAC_LOOPBACK, &data[0]) != 0)
test->flags |= ETH_TEST_FL_FAILED;
- if (be_loopback_test(adapter, BE_PHY_LOOPBACK,
- &data[1]) != 0)
+ if (be_loopback_test(adapter, BE_PHY_LOOPBACK, &data[1]) != 0)
test->flags |= ETH_TEST_FL_FAILED;
if (test->flags & ETH_TEST_FL_EXTERNAL_LB) {
@@ -832,16 +820,14 @@ be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data)
}
}
-static int
-be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
+static int be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
{
struct be_adapter *adapter = netdev_priv(netdev);
return be_load_fw(adapter, efl->data);
}
-static int
-be_get_eeprom_len(struct net_device *netdev)
+static int be_get_eeprom_len(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -851,18 +837,17 @@ be_get_eeprom_len(struct net_device *netdev)
if (lancer_chip(adapter)) {
if (be_physfn(adapter))
return lancer_cmd_get_file_len(adapter,
- LANCER_VPD_PF_FILE);
+ LANCER_VPD_PF_FILE);
else
return lancer_cmd_get_file_len(adapter,
- LANCER_VPD_VF_FILE);
+ LANCER_VPD_VF_FILE);
} else {
return BE_READ_SEEPROM_LEN;
}
}
-static int
-be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
- uint8_t *data)
+static int be_read_eeprom(struct net_device *netdev,
+ struct ethtool_eeprom *eeprom, uint8_t *data)
{
struct be_adapter *adapter = netdev_priv(netdev);
struct be_dma_mem eeprom_cmd;
@@ -875,10 +860,10 @@ be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
if (lancer_chip(adapter)) {
if (be_physfn(adapter))
return lancer_cmd_read_file(adapter, LANCER_VPD_PF_FILE,
- eeprom->len, data);
+ eeprom->len, data);
else
return lancer_cmd_read_file(adapter, LANCER_VPD_VF_FILE,
- eeprom->len, data);
+ eeprom->len, data);
}
eeprom->magic = BE_VENDOR_ID | (adapter->pdev->device<<16);
@@ -933,27 +918,27 @@ static u64 be_get_rss_hash_opts(struct be_adapter *adapter, u64 flow_type)
switch (flow_type) {
case TCP_V4_FLOW:
- if (adapter->rss_flags & RSS_ENABLE_IPV4)
+ if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV4)
data |= RXH_IP_DST | RXH_IP_SRC;
- if (adapter->rss_flags & RSS_ENABLE_TCP_IPV4)
+ if (adapter->rss_info.rss_flags & RSS_ENABLE_TCP_IPV4)
data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break;
case UDP_V4_FLOW:
- if (adapter->rss_flags & RSS_ENABLE_IPV4)
+ if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV4)
data |= RXH_IP_DST | RXH_IP_SRC;
- if (adapter->rss_flags & RSS_ENABLE_UDP_IPV4)
+ if (adapter->rss_info.rss_flags & RSS_ENABLE_UDP_IPV4)
data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break;
case TCP_V6_FLOW:
- if (adapter->rss_flags & RSS_ENABLE_IPV6)
+ if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV6)
data |= RXH_IP_DST | RXH_IP_SRC;
- if (adapter->rss_flags & RSS_ENABLE_TCP_IPV6)
+ if (adapter->rss_info.rss_flags & RSS_ENABLE_TCP_IPV6)
data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break;
case UDP_V6_FLOW:
- if (adapter->rss_flags & RSS_ENABLE_IPV6)
+ if (adapter->rss_info.rss_flags & RSS_ENABLE_IPV6)
data |= RXH_IP_DST | RXH_IP_SRC;
- if (adapter->rss_flags & RSS_ENABLE_UDP_IPV6)
+ if (adapter->rss_info.rss_flags & RSS_ENABLE_UDP_IPV6)
data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break;
}
@@ -962,7 +947,7 @@ static u64 be_get_rss_hash_opts(struct be_adapter *adapter, u64 flow_type)
}
static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
- u32 *rule_locs)
+ u32 *rule_locs)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -992,7 +977,7 @@ static int be_set_rss_hash_opts(struct be_adapter *adapter,
struct be_rx_obj *rxo;
int status = 0, i, j;
u8 rsstable[128];
- u32 rss_flags = adapter->rss_flags;
+ u32 rss_flags = adapter->rss_info.rss_flags;
if (cmd->data != L3_RSS_FLAGS &&
cmd->data != (L3_RSS_FLAGS | L4_RSS_FLAGS))
@@ -1039,7 +1024,7 @@ static int be_set_rss_hash_opts(struct be_adapter *adapter,
return -EINVAL;
}
- if (rss_flags == adapter->rss_flags)
+ if (rss_flags == adapter->rss_info.rss_flags)
return status;
if (be_multi_rxq(adapter)) {
@@ -1051,9 +1036,11 @@ static int be_set_rss_hash_opts(struct be_adapter *adapter,
}
}
}
- status = be_cmd_rss_config(adapter, rsstable, rss_flags, 128);
+
+ status = be_cmd_rss_config(adapter, adapter->rss_info.rsstable,
+ rss_flags, 128, adapter->rss_info.rss_hkey);
if (!status)
- adapter->rss_flags = rss_flags;
+ adapter->rss_info.rss_flags = rss_flags;
return status;
}
@@ -1103,6 +1090,69 @@ static int be_set_channels(struct net_device *netdev,
return be_update_queues(adapter);
}
+static u32 be_get_rxfh_indir_size(struct net_device *netdev)
+{
+ return RSS_INDIR_TABLE_LEN;
+}
+
+static u32 be_get_rxfh_key_size(struct net_device *netdev)
+{
+ return RSS_HASH_KEY_LEN;
+}
+
+static int be_get_rxfh(struct net_device *netdev, u32 *indir, u8 *hkey)
+{
+ struct be_adapter *adapter = netdev_priv(netdev);
+ int i;
+ struct rss_info *rss = &adapter->rss_info;
+
+ if (indir) {
+ for (i = 0; i < RSS_INDIR_TABLE_LEN; i++)
+ indir[i] = rss->rss_queue[i];
+ }
+
+ if (hkey)
+ memcpy(hkey, rss->rss_hkey, RSS_HASH_KEY_LEN);
+
+ return 0;
+}
+
+static int be_set_rxfh(struct net_device *netdev, const u32 *indir,
+ const u8 *hkey)
+{
+ int rc = 0, i, j;
+ struct be_adapter *adapter = netdev_priv(netdev);
+ u8 rsstable[RSS_INDIR_TABLE_LEN];
+
+ if (indir) {
+ struct be_rx_obj *rxo;
+ for (i = 0; i < RSS_INDIR_TABLE_LEN; i++) {
+ j = indir[i];
+ rxo = &adapter->rx_obj[j];
+ rsstable[i] = rxo->rss_id;
+ adapter->rss_info.rss_queue[i] = j;
+ }
+ } else {
+ memcpy(rsstable, adapter->rss_info.rsstable,
+ RSS_INDIR_TABLE_LEN);
+ }
+
+ if (!hkey)
+ hkey = adapter->rss_info.rss_hkey;
+
+ rc = be_cmd_rss_config(adapter, rsstable,
+ adapter->rss_info.rss_flags,
+ RSS_INDIR_TABLE_LEN, hkey);
+ if (rc) {
+ adapter->rss_info.rss_flags = RSS_ENABLE_NONE;
+ return -EIO;
+ }
+ memcpy(adapter->rss_info.rss_hkey, hkey, RSS_HASH_KEY_LEN);
+ memcpy(adapter->rss_info.rsstable, rsstable,
+ RSS_INDIR_TABLE_LEN);
+ return 0;
+}
+
const struct ethtool_ops be_ethtool_ops = {
.get_settings = be_get_settings,
.get_drvinfo = be_get_drvinfo,
@@ -1129,6 +1179,10 @@ const struct ethtool_ops be_ethtool_ops = {
.self_test = be_self_test,
.get_rxnfc = be_get_rxnfc,
.set_rxnfc = be_set_rxnfc,
+ .get_rxfh_indir_size = be_get_rxfh_indir_size,
+ .get_rxfh_key_size = be_get_rxfh_key_size,
+ .get_rxfh = be_get_rxfh,
+ .set_rxfh = be_set_rxfh,
.get_channels = be_get_channels,
.set_channels = be_set_channels
};
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index 3bd1985..8840c64 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -188,10 +188,14 @@
#define OPTYPE_FCOE_FW_ACTIVE 10
#define OPTYPE_FCOE_FW_BACKUP 11
#define OPTYPE_NCSI_FW 13
+#define OPTYPE_REDBOOT_DIR 18
+#define OPTYPE_REDBOOT_CONFIG 19
+#define OPTYPE_SH_PHY_FW 21
+#define OPTYPE_FLASHISM_JUMPVECTOR 22
+#define OPTYPE_UFI_DIR 23
#define OPTYPE_PHY_FW 99
#define TN_8022 13
-#define ILLEGAL_IOCTL_REQ 2
#define FLASHROM_OPER_PHY_FLASH 9
#define FLASHROM_OPER_PHY_SAVE 10
#define FLASHROM_OPER_FLASH 1
@@ -250,6 +254,9 @@
#define IMAGE_FIRMWARE_BACKUP_FCoE 178
#define IMAGE_FIRMWARE_BACKUP_COMP_FCoE 179
#define IMAGE_FIRMWARE_PHY 192
+#define IMAGE_REDBOOT_DIR 208
+#define IMAGE_REDBOOT_CONFIG 209
+#define IMAGE_UFI_DIR 210
#define IMAGE_BOOT_CODE 224
/************* Rx Packet Type Encoding **************/
@@ -534,7 +541,8 @@ struct flash_section_entry {
u32 image_size;
u32 cksum;
u32 entry_point;
- u32 rsvd0;
+ u16 optype;
+ u16 rsvd0;
u32 rsvd1;
u8 ver_data[32];
} __packed;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index dc19bc5..6822b3d 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -134,7 +134,7 @@ static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
}
static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
- u16 len, u16 entry_size)
+ u16 len, u16 entry_size)
{
struct be_dma_mem *mem = &q->dma_mem;
@@ -154,7 +154,7 @@ static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
u32 reg, enabled;
pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
- &reg);
+ &reg);
enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
if (!enabled && enable)
@@ -165,7 +165,7 @@ static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
return;
pci_write_config_dword(adapter->pdev,
- PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
+ PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
}
static void be_intr_set(struct be_adapter *adapter, bool enable)
@@ -206,12 +206,11 @@ static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
}
static void be_eq_notify(struct be_adapter *adapter, u16 qid,
- bool arm, bool clear_int, u16 num_popped)
+ bool arm, bool clear_int, u16 num_popped)
{
u32 val = 0;
val |= qid & DB_EQ_RING_ID_MASK;
- val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
- DB_EQ_RING_ID_EXT_MASK_SHIFT);
+ val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
if (adapter->eeh_error)
return;
@@ -477,7 +476,7 @@ static void populate_be_v2_stats(struct be_adapter *adapter)
drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
- if (be_roce_supported(adapter)) {
+ if (be_roce_supported(adapter)) {
drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
drvs->rx_roce_frames = port_stats->roce_frames_received;
@@ -491,8 +490,7 @@ static void populate_lancer_stats(struct be_adapter *adapter)
{
struct be_drv_stats *drvs = &adapter->drv_stats;
- struct lancer_pport_stats *pport_stats =
- pport_stats_from_cmd(adapter);
+ struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
@@ -539,8 +537,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
}
static void populate_erx_stats(struct be_adapter *adapter,
- struct be_rx_obj *rxo,
- u32 erx_stat)
+ struct be_rx_obj *rxo, u32 erx_stat)
{
if (!BEx_chip(adapter))
rx_stats(rxo)->rx_drops_no_frags = erx_stat;
@@ -579,7 +576,7 @@ void be_parse_stats(struct be_adapter *adapter)
}
static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
- struct rtnl_link_stats64 *stats)
+ struct rtnl_link_stats64 *stats)
{
struct be_adapter *adapter = netdev_priv(netdev);
struct be_drv_stats *drvs = &adapter->drv_stats;
@@ -660,7 +657,8 @@ void be_link_status_update(struct be_adapter *adapter, u8 link_status)
}
static void be_tx_stats_update(struct be_tx_obj *txo,
- u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
+ u32 wrb_cnt, u32 copied, u32 gso_segs,
+ bool stopped)
{
struct be_tx_stats *stats = tx_stats(txo);
@@ -676,7 +674,7 @@ static void be_tx_stats_update(struct be_tx_obj *txo,
/* Determine number of WRB entries needed to xmit data in an skb */
static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
- bool *dummy)
+ bool *dummy)
{
int cnt = (skb->len > skb->data_len);
@@ -704,7 +702,7 @@ static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
}
static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
- struct sk_buff *skb)
+ struct sk_buff *skb)
{
u8 vlan_prio;
u16 vlan_tag;
@@ -733,7 +731,8 @@ static u16 skb_ip_proto(struct sk_buff *skb)
}
static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
- struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
+ struct sk_buff *skb, u32 wrb_cnt, u32 len,
+ bool skip_hw_vlan)
{
u16 vlan_tag, proto;
@@ -774,7 +773,7 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
}
static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
- bool unmap_single)
+ bool unmap_single)
{
dma_addr_t dma;
@@ -791,8 +790,8 @@ static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
}
static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
- struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
- bool skip_hw_vlan)
+ struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
+ bool skip_hw_vlan)
{
dma_addr_t busaddr;
int i, copied = 0;
@@ -821,8 +820,7 @@ static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
}
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
- const struct skb_frag_struct *frag =
- &skb_shinfo(skb)->frags[i];
+ const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
busaddr = skb_frag_dma_map(dev, frag, 0,
skb_frag_size(frag), DMA_TO_DEVICE);
if (dma_mapping_error(dev, busaddr))
@@ -927,8 +925,7 @@ static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
}
-static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
- struct sk_buff *skb)
+static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
{
return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
}
@@ -959,7 +956,7 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
*/
if (be_pvid_tagging_enabled(adapter) &&
veh->h_vlan_proto == htons(ETH_P_8021Q))
- *skip_hw_vlan = true;
+ *skip_hw_vlan = true;
/* HW has a bug wherein it will calculate CSUM for VLAN
* pkts even though it is disabled.
@@ -1077,16 +1074,15 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
{
struct be_adapter *adapter = netdev_priv(netdev);
if (new_mtu < BE_MIN_MTU ||
- new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
- (ETH_HLEN + ETH_FCS_LEN))) {
+ new_mtu > (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN))) {
dev_info(&adapter->pdev->dev,
- "MTU must be between %d and %d bytes\n",
- BE_MIN_MTU,
- (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
+ "MTU must be between %d and %d bytes\n",
+ BE_MIN_MTU,
+ (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
return -EINVAL;
}
dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
- netdev->mtu, new_mtu);
+ netdev->mtu, new_mtu);
netdev->mtu = new_mtu;
return 0;
}
@@ -1098,7 +1094,7 @@ static int be_change_mtu(struct net_device *netdev, int new_mtu)
static int be_vid_config(struct be_adapter *adapter)
{
u16 vids[BE_NUM_VLANS_SUPPORTED];
- u16 num = 0, i;
+ u16 num = 0, i = 0;
int status = 0;
/* No need to further configure vids if in promiscuous mode */
@@ -1109,16 +1105,14 @@ static int be_vid_config(struct be_adapter *adapter)
goto set_vlan_promisc;
/* Construct VLAN Table to give to HW */
- for (i = 0; i < VLAN_N_VID; i++)
- if (adapter->vlan_tag[i])
- vids[num++] = cpu_to_le16(i);
-
- status = be_cmd_vlan_config(adapter, adapter->if_handle,
- vids, num, 0);
+ for_each_set_bit(i, adapter->vids, VLAN_N_VID)
+ vids[num++] = cpu_to_le16(i);
+ status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
if (status) {
/* Set to VLAN promisc mode as setting VLAN filter failed */
- if (status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
+ if (addl_status(status) ==
+ MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
goto set_vlan_promisc;
dev_err(&adapter->pdev->dev,
"Setting HW VLAN filtering failed.\n");
@@ -1160,16 +1154,16 @@ static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
if (lancer_chip(adapter) && vid == 0)
return status;
- if (adapter->vlan_tag[vid])
+ if (test_bit(vid, adapter->vids))
return status;
- adapter->vlan_tag[vid] = 1;
+ set_bit(vid, adapter->vids);
adapter->vlans_added++;
status = be_vid_config(adapter);
if (status) {
adapter->vlans_added--;
- adapter->vlan_tag[vid] = 0;
+ clear_bit(vid, adapter->vids);
}
return status;
@@ -1184,12 +1178,12 @@ static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
if (lancer_chip(adapter) && vid == 0)
goto ret;
- adapter->vlan_tag[vid] = 0;
+ clear_bit(vid, adapter->vids);
status = be_vid_config(adapter);
if (!status)
adapter->vlans_added--;
else
- adapter->vlan_tag[vid] = 1;
+ set_bit(vid, adapter->vids);
ret:
return status;
}
@@ -1197,7 +1191,7 @@ ret:
static void be_clear_promisc(struct be_adapter *adapter)
{
adapter->promiscuous = false;
- adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
+ adapter->flags &= ~(BE_FLAGS_VLAN_PROMISC | BE_FLAGS_MCAST_PROMISC);
be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
}
@@ -1222,10 +1216,8 @@ static void be_set_rx_mode(struct net_device *netdev)
/* Enable multicast promisc if num configured exceeds what we support */
if (netdev->flags & IFF_ALLMULTI ||
- netdev_mc_count(netdev) > be_max_mc(adapter)) {
- be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
- goto done;
- }
+ netdev_mc_count(netdev) > be_max_mc(adapter))
+ goto set_mcast_promisc;
if (netdev_uc_count(netdev) != adapter->uc_macs) {
struct netdev_hw_addr *ha;
@@ -1251,13 +1243,22 @@ static void be_set_rx_mode(struct net_device *netdev)
}
status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
-
- /* Set to MCAST promisc mode if setting MULTICAST address fails */
- if (status) {
- dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
- dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
- be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
+ if (!status) {
+ if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
+ adapter->flags &= ~BE_FLAGS_MCAST_PROMISC;
+ goto done;
}
+
+set_mcast_promisc:
+ if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
+ return;
+
+ /* Set to MCAST promisc mode if setting MULTICAST address fails
+ * or if num configured exceeds what we support
+ */
+ status = be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
+ if (!status)
+ adapter->flags |= BE_FLAGS_MCAST_PROMISC;
done:
return;
}
@@ -1287,7 +1288,7 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
if (status)
dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
- mac, vf);
+ mac, vf);
else
memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
@@ -1295,7 +1296,7 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
}
static int be_get_vf_config(struct net_device *netdev, int vf,
- struct ifla_vf_info *vi)
+ struct ifla_vf_info *vi)
{
struct be_adapter *adapter = netdev_priv(netdev);
struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
@@ -1307,7 +1308,8 @@ static int be_get_vf_config(struct net_device *netdev, int vf,
return -EINVAL;
vi->vf = vf;
- vi->tx_rate = vf_cfg->tx_rate;
+ vi->max_tx_rate = vf_cfg->tx_rate;
+ vi->min_tx_rate = 0;
vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
@@ -1316,8 +1318,7 @@ static int be_get_vf_config(struct net_device *netdev, int vf,
return 0;
}
-static int be_set_vf_vlan(struct net_device *netdev,
- int vf, u16 vlan, u8 qos)
+static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
{
struct be_adapter *adapter = netdev_priv(netdev);
struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
@@ -1348,11 +1349,14 @@ static int be_set_vf_vlan(struct net_device *netdev,
return status;
}
-static int be_set_vf_tx_rate(struct net_device *netdev,
- int vf, int rate)
+static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
+ int min_tx_rate, int max_tx_rate)
{
struct be_adapter *adapter = netdev_priv(netdev);
- int status = 0;
+ struct device *dev = &adapter->pdev->dev;
+ int percent_rate, status = 0;
+ u16 link_speed = 0;
+ u8 link_status;
if (!sriov_enabled(adapter))
return -EPERM;
@@ -1360,18 +1364,50 @@ static int be_set_vf_tx_rate(struct net_device *netdev,
if (vf >= adapter->num_vfs)
return -EINVAL;
- if (rate < 100 || rate > 10000) {
- dev_err(&adapter->pdev->dev,
- "tx rate must be between 100 and 10000 Mbps\n");
+ if (min_tx_rate)
return -EINVAL;
+
+ if (!max_tx_rate)
+ goto config_qos;
+
+ status = be_cmd_link_status_query(adapter, &link_speed,
+ &link_status, 0);
+ if (status)
+ goto err;
+
+ if (!link_status) {
+ dev_err(dev, "TX-rate setting not allowed when link is down\n");
+ status = -EPERM;
+ goto err;
+ }
+
+ if (max_tx_rate < 100 || max_tx_rate > link_speed) {
+ dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
+ link_speed);
+ status = -EINVAL;
+ goto err;
+ }
+
+ /* On Skyhawk the QOS setting must be done only as a % value */
+ percent_rate = link_speed / 100;
+ if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
+ dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
+ percent_rate);
+ status = -EINVAL;
+ goto err;
}
- status = be_cmd_config_qos(adapter, rate / 10, vf + 1);
+config_qos:
+ status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
if (status)
- dev_err(&adapter->pdev->dev,
- "tx rate %d on VF %d failed\n", rate, vf);
- else
- adapter->vf_cfg[vf].tx_rate = rate;
+ goto err;
+
+ adapter->vf_cfg[vf].tx_rate = max_tx_rate;
+ return 0;
+
+err:
+ dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
+ max_tx_rate, vf);
return status;
}
static int be_set_vf_link_state(struct net_device *netdev, int vf,
@@ -1469,7 +1505,7 @@ modify_eqd:
}
static void be_rx_stats_update(struct be_rx_obj *rxo,
- struct be_rx_compl_info *rxcp)
+ struct be_rx_compl_info *rxcp)
{
struct be_rx_stats *stats = rx_stats(rxo);
@@ -1566,7 +1602,8 @@ static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
skb_frag_set_page(skb, 0, page_info->page);
skb_shinfo(skb)->frags[0].page_offset =
page_info->page_offset + hdr_len;
- skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
+ skb_frag_size_set(&skb_shinfo(skb)->frags[0],
+ curr_frag_len - hdr_len);
skb->data_len = curr_frag_len - hdr_len;
skb->truesize += rx_frag_size;
skb->tail += hdr_len;
@@ -1725,8 +1762,8 @@ static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
if (rxcp->vlanf) {
rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, qnq,
compl);
- rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
- compl);
+ rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1,
+ vlan_tag, compl);
}
rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
rxcp->tunneled =
@@ -1757,8 +1794,8 @@ static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
if (rxcp->vlanf) {
rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, qnq,
compl);
- rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
- compl);
+ rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
+ vlan_tag, compl);
}
rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
@@ -1799,7 +1836,7 @@ static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
rxcp->vlan_tag = swab16(rxcp->vlan_tag);
if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
- !adapter->vlan_tag[rxcp->vlan_tag])
+ !test_bit(rxcp->vlan_tag, adapter->vids))
rxcp->vlanf = 0;
}
@@ -1915,7 +1952,7 @@ static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
}
static u16 be_tx_compl_process(struct be_adapter *adapter,
- struct be_tx_obj *txo, u16 last_index)
+ struct be_tx_obj *txo, u16 last_index)
{
struct be_queue_info *txq = &txo->q;
struct be_eth_wrb *wrb;
@@ -2122,7 +2159,7 @@ static int be_evt_queues_create(struct be_adapter *adapter)
eq = &eqo->q;
rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
- sizeof(struct be_eq_entry));
+ sizeof(struct be_eq_entry));
if (rc)
return rc;
@@ -2155,7 +2192,7 @@ static int be_mcc_queues_create(struct be_adapter *adapter)
cq = &adapter->mcc_obj.cq;
if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
- sizeof(struct be_mcc_compl)))
+ sizeof(struct be_mcc_compl)))
goto err;
/* Use the default EQ for MCC completions */
@@ -2275,7 +2312,7 @@ static int be_rx_cqs_create(struct be_adapter *adapter)
rxo->adapter = adapter;
cq = &rxo->cq;
rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
- sizeof(struct be_eth_rx_compl));
+ sizeof(struct be_eth_rx_compl));
if (rc)
return rc;
@@ -2339,7 +2376,7 @@ static inline bool do_gro(struct be_rx_compl_info *rxcp)
}
static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
- int budget, int polling)
+ int budget, int polling)
{
struct be_adapter *adapter = rxo->adapter;
struct be_queue_info *rx_cq = &rxo->cq;
@@ -2365,7 +2402,7 @@ static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
* promiscuous mode on some skews
*/
if (unlikely(rxcp->port != adapter->port_num &&
- !lancer_chip(adapter))) {
+ !lancer_chip(adapter))) {
be_rx_compl_discard(rxo, rxcp);
goto loop_continue;
}
@@ -2405,8 +2442,9 @@ static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
if (!txcp)
break;
num_wrbs += be_tx_compl_process(adapter, txo,
- AMAP_GET_BITS(struct amap_eth_tx_compl,
- wrb_index, txcp));
+ AMAP_GET_BITS(struct
+ amap_eth_tx_compl,
+ wrb_index, txcp));
}
if (work_done) {
@@ -2416,7 +2454,7 @@ static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
/* As Tx wrbs have been freed up, wake up netdev queue
* if it was stopped due to lack of tx wrbs. */
if (__netif_subqueue_stopped(adapter->netdev, idx) &&
- atomic_read(&txo->q.used) < txo->q.len / 2) {
+ atomic_read(&txo->q.used) < txo->q.len / 2) {
netif_wake_subqueue(adapter->netdev, idx);
}
@@ -2510,9 +2548,9 @@ void be_detect_error(struct be_adapter *adapter)
sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
sliport_err1 = ioread32(adapter->db +
- SLIPORT_ERROR1_OFFSET);
+ SLIPORT_ERROR1_OFFSET);
sliport_err2 = ioread32(adapter->db +
- SLIPORT_ERROR2_OFFSET);
+ SLIPORT_ERROR2_OFFSET);
adapter->hw_error = true;
/* Do not log error messages if its a FW reset */
if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
@@ -2531,13 +2569,13 @@ void be_detect_error(struct be_adapter *adapter)
}
} else {
pci_read_config_dword(adapter->pdev,
- PCICFG_UE_STATUS_LOW, &ue_lo);
+ PCICFG_UE_STATUS_LOW, &ue_lo);
pci_read_config_dword(adapter->pdev,
- PCICFG_UE_STATUS_HIGH, &ue_hi);
+ PCICFG_UE_STATUS_HIGH, &ue_hi);
pci_read_config_dword(adapter->pdev,
- PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
+ PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
pci_read_config_dword(adapter->pdev,
- PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
+ PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
ue_lo = (ue_lo & ~ue_lo_mask);
ue_hi = (ue_hi & ~ue_hi_mask);
@@ -2624,7 +2662,7 @@ fail:
}
static inline int be_msix_vec_get(struct be_adapter *adapter,
- struct be_eq_obj *eqo)
+ struct be_eq_obj *eqo)
{
return adapter->msix_entries[eqo->msix_idx].vector;
}
@@ -2648,7 +2686,7 @@ err_msix:
for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
free_irq(be_msix_vec_get(adapter, eqo), eqo);
dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
- status);
+ status);
be_msix_disable(adapter);
return status;
}
@@ -2774,7 +2812,8 @@ static int be_rx_qs_create(struct be_adapter *adapter)
{
struct be_rx_obj *rxo;
int rc, i, j;
- u8 rsstable[128];
+ u8 rss_hkey[RSS_HASH_KEY_LEN];
+ struct rss_info *rss = &adapter->rss_info;
for_all_rx_queues(adapter, rxo, i) {
rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
@@ -2799,31 +2838,36 @@ static int be_rx_qs_create(struct be_adapter *adapter)
}
if (be_multi_rxq(adapter)) {
- for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
+ for (j = 0; j < RSS_INDIR_TABLE_LEN;
+ j += adapter->num_rx_qs - 1) {
for_all_rss_queues(adapter, rxo, i) {
- if ((j + i) >= 128)
+ if ((j + i) >= RSS_INDIR_TABLE_LEN)
break;
- rsstable[j + i] = rxo->rss_id;
+ rss->rsstable[j + i] = rxo->rss_id;
+ rss->rss_queue[j + i] = i;
}
}
- adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
- RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
+ rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
+ RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
if (!BEx_chip(adapter))
- adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
- RSS_ENABLE_UDP_IPV6;
+ rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
+ RSS_ENABLE_UDP_IPV6;
} else {
/* Disable RSS, if only default RX Q is created */
- adapter->rss_flags = RSS_ENABLE_NONE;
+ rss->rss_flags = RSS_ENABLE_NONE;
}
- rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
- 128);
+ get_random_bytes(rss_hkey, RSS_HASH_KEY_LEN);
+ rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
+ 128, rss_hkey);
if (rc) {
- adapter->rss_flags = RSS_ENABLE_NONE;
+ rss->rss_flags = RSS_ENABLE_NONE;
return rc;
}
+ memcpy(rss->rss_hkey, rss_hkey, RSS_HASH_KEY_LEN);
+
/* First time posting */
for_all_rx_queues(adapter, rxo, i)
be_post_rx_frags(rxo, GFP_KERNEL);
@@ -2896,7 +2940,8 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
if (enable) {
status = pci_write_config_dword(adapter->pdev,
- PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
+ PCICFG_PM_CONTROL_OFFSET,
+ PCICFG_PM_CONTROL_MASK);
if (status) {
dev_err(&adapter->pdev->dev,
"Could not enable Wake-on-lan\n");
@@ -2905,7 +2950,8 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
return status;
}
status = be_cmd_enable_magic_wol(adapter,
- adapter->netdev->dev_addr, &cmd);
+ adapter->netdev->dev_addr,
+ &cmd);
pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
} else {
@@ -2944,7 +2990,8 @@ static int be_vf_eth_addr_config(struct be_adapter *adapter)
if (status)
dev_err(&adapter->pdev->dev,
- "Mac address assignment failed for VF %d\n", vf);
+ "Mac address assignment failed for VF %d\n",
+ vf);
else
memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
@@ -3086,9 +3133,11 @@ static int be_vfs_if_create(struct be_adapter *adapter)
/* If a FW profile exists, then cap_flags are updated */
en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
- BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
- status = be_cmd_if_create(adapter, cap_flags, en_flags,
- &vf_cfg->if_handle, vf + 1);
+ BE_IF_FLAGS_BROADCAST |
+ BE_IF_FLAGS_MULTICAST);
+ status =
+ be_cmd_if_create(adapter, cap_flags, en_flags,
+ &vf_cfg->if_handle, vf + 1);
if (status)
goto err;
}
@@ -3119,7 +3168,6 @@ static int be_vf_setup(struct be_adapter *adapter)
struct be_vf_cfg *vf_cfg;
int status, old_vfs, vf;
u32 privileges;
- u16 lnk_speed;
old_vfs = pci_num_vf(adapter->pdev);
if (old_vfs) {
@@ -3175,16 +3223,9 @@ static int be_vf_setup(struct be_adapter *adapter)
vf);
}
- /* BE3 FW, by default, caps VF TX-rate to 100mbps.
- * Allow full available bandwidth
- */
- if (BE3_chip(adapter) && !old_vfs)
- be_cmd_config_qos(adapter, 1000, vf + 1);
-
- status = be_cmd_link_status_query(adapter, &lnk_speed,
- NULL, vf + 1);
- if (!status)
- vf_cfg->tx_rate = lnk_speed;
+ /* Allow full available bandwidth */
+ if (!old_vfs)
+ be_cmd_config_qos(adapter, 0, 0, vf + 1);
if (!old_vfs) {
be_cmd_enable_vf(adapter, vf + 1);
@@ -3590,35 +3631,7 @@ static void be_netpoll(struct net_device *netdev)
}
#endif
-#define FW_FILE_HDR_SIGN "ServerEngines Corp. "
-static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
-
-static bool be_flash_redboot(struct be_adapter *adapter,
- const u8 *p, u32 img_start, int image_size,
- int hdr_size)
-{
- u32 crc_offset;
- u8 flashed_crc[4];
- int status;
-
- crc_offset = hdr_size + img_start + image_size - 4;
-
- p += crc_offset;
-
- status = be_cmd_get_flash_crc(adapter, flashed_crc,
- (image_size - 4));
- if (status) {
- dev_err(&adapter->pdev->dev,
- "could not get crc from flash, not flashing redboot\n");
- return false;
- }
-
- /*update redboot only if crc does not match*/
- if (!memcmp(flashed_crc, p, 4))
- return false;
- else
- return true;
-}
+static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
static bool phy_flashing_required(struct be_adapter *adapter)
{
@@ -3649,8 +3662,8 @@ static bool is_comp_in_ufi(struct be_adapter *adapter,
}
static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
- int header_size,
- const struct firmware *fw)
+ int header_size,
+ const struct firmware *fw)
{
struct flash_section_info *fsec = NULL;
const u8 *p = fw->data;
@@ -3665,12 +3678,35 @@ static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
return NULL;
}
+static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
+ u32 img_offset, u32 img_size, int hdr_size,
+ u16 img_optype, bool *crc_match)
+{
+ u32 crc_offset;
+ int status;
+ u8 crc[4];
+
+ status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_size - 4);
+ if (status)
+ return status;
+
+ crc_offset = hdr_size + img_offset + img_size - 4;
+
+ /* Skip flashing, if crc of flashed region matches */
+ if (!memcmp(crc, p + crc_offset, 4))
+ *crc_match = true;
+ else
+ *crc_match = false;
+
+ return status;
+}
+
static int be_flash(struct be_adapter *adapter, const u8 *img,
- struct be_dma_mem *flash_cmd, int optype, int img_size)
+ struct be_dma_mem *flash_cmd, int optype, int img_size)
{
- u32 total_bytes = 0, flash_op, num_bytes = 0;
- int status = 0;
struct be_cmd_write_flashrom *req = flash_cmd->va;
+ u32 total_bytes, flash_op, num_bytes;
+ int status;
total_bytes = img_size;
while (total_bytes) {
@@ -3693,32 +3729,28 @@ static int be_flash(struct be_adapter *adapter, const u8 *img,
memcpy(req->data_buf, img, num_bytes);
img += num_bytes;
status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
- flash_op, num_bytes);
- if (status) {
- if (status == ILLEGAL_IOCTL_REQ &&
- optype == OPTYPE_PHY_FW)
- break;
- dev_err(&adapter->pdev->dev,
- "cmd to write to flash rom failed.\n");
+ flash_op, num_bytes);
+ if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
+ optype == OPTYPE_PHY_FW)
+ break;
+ else if (status)
return status;
- }
}
return 0;
}
/* For BE2, BE3 and BE3-R */
static int be_flash_BEx(struct be_adapter *adapter,
- const struct firmware *fw,
- struct be_dma_mem *flash_cmd,
- int num_of_images)
-
+ const struct firmware *fw,
+ struct be_dma_mem *flash_cmd, int num_of_images)
{
- int status = 0, i, filehdr_size = 0;
int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
- const u8 *p = fw->data;
- const struct flash_comp *pflashcomp;
- int num_comp, redboot;
+ struct device *dev = &adapter->pdev->dev;
struct flash_section_info *fsec = NULL;
+ int status, i, filehdr_size, num_comp;
+ const struct flash_comp *pflashcomp;
+ bool crc_match;
+ const u8 *p;
struct flash_comp gen3_flash_types[] = {
{ FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
@@ -3775,8 +3807,7 @@ static int be_flash_BEx(struct be_adapter *adapter,
/* Get flash section info*/
fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
if (!fsec) {
- dev_err(&adapter->pdev->dev,
- "Invalid Cookie. UFI corrupted ?\n");
+ dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
return -1;
}
for (i = 0; i < num_comp; i++) {
@@ -3792,23 +3823,32 @@ static int be_flash_BEx(struct be_adapter *adapter,
continue;
if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
- redboot = be_flash_redboot(adapter, fw->data,
- pflashcomp[i].offset, pflashcomp[i].size,
- filehdr_size + img_hdrs_size);
- if (!redboot)
+ status = be_check_flash_crc(adapter, fw->data,
+ pflashcomp[i].offset,
+ pflashcomp[i].size,
+ filehdr_size +
+ img_hdrs_size,
+ OPTYPE_REDBOOT, &crc_match);
+ if (status) {
+ dev_err(dev,
+ "Could not get CRC for 0x%x region\n",
+ pflashcomp[i].optype);
+ continue;
+ }
+
+ if (crc_match)
continue;
}
- p = fw->data;
- p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
+ p = fw->data + filehdr_size + pflashcomp[i].offset +
+ img_hdrs_size;
if (p + pflashcomp[i].size > fw->data + fw->size)
return -1;
status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
- pflashcomp[i].size);
+ pflashcomp[i].size);
if (status) {
- dev_err(&adapter->pdev->dev,
- "Flashing section type %d failed.\n",
+ dev_err(dev, "Flashing section type 0x%x failed\n",
pflashcomp[i].img_type);
return status;
}
@@ -3816,80 +3856,142 @@ static int be_flash_BEx(struct be_adapter *adapter,
return 0;
}
+static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
+{
+ u32 img_type = le32_to_cpu(fsec_entry.type);
+ u16 img_optype = le16_to_cpu(fsec_entry.optype);
+
+ if (img_optype != 0xFFFF)
+ return img_optype;
+
+ switch (img_type) {
+ case IMAGE_FIRMWARE_iSCSI:
+ img_optype = OPTYPE_ISCSI_ACTIVE;
+ break;
+ case IMAGE_BOOT_CODE:
+ img_optype = OPTYPE_REDBOOT;
+ break;
+ case IMAGE_OPTION_ROM_ISCSI:
+ img_optype = OPTYPE_BIOS;
+ break;
+ case IMAGE_OPTION_ROM_PXE:
+ img_optype = OPTYPE_PXE_BIOS;
+ break;
+ case IMAGE_OPTION_ROM_FCoE:
+ img_optype = OPTYPE_FCOE_BIOS;
+ break;
+ case IMAGE_FIRMWARE_BACKUP_iSCSI:
+ img_optype = OPTYPE_ISCSI_BACKUP;
+ break;
+ case IMAGE_NCSI:
+ img_optype = OPTYPE_NCSI_FW;
+ break;
+ case IMAGE_FLASHISM_JUMPVECTOR:
+ img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
+ break;
+ case IMAGE_FIRMWARE_PHY:
+ img_optype = OPTYPE_SH_PHY_FW;
+ break;
+ case IMAGE_REDBOOT_DIR:
+ img_optype = OPTYPE_REDBOOT_DIR;
+ break;
+ case IMAGE_REDBOOT_CONFIG:
+ img_optype = OPTYPE_REDBOOT_CONFIG;
+ break;
+ case IMAGE_UFI_DIR:
+ img_optype = OPTYPE_UFI_DIR;
+ break;
+ default:
+ break;
+ }
+
+ return img_optype;
+}
+
static int be_flash_skyhawk(struct be_adapter *adapter,
- const struct firmware *fw,
- struct be_dma_mem *flash_cmd, int num_of_images)
+ const struct firmware *fw,
+ struct be_dma_mem *flash_cmd, int num_of_images)
{
- int status = 0, i, filehdr_size = 0;
- int img_offset, img_size, img_optype, redboot;
int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
- const u8 *p = fw->data;
+ struct device *dev = &adapter->pdev->dev;
struct flash_section_info *fsec = NULL;
+ u32 img_offset, img_size, img_type;
+ int status, i, filehdr_size;
+ bool crc_match, old_fw_img;
+ u16 img_optype;
+ const u8 *p;
filehdr_size = sizeof(struct flash_file_hdr_g3);
fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
if (!fsec) {
- dev_err(&adapter->pdev->dev,
- "Invalid Cookie. UFI corrupted ?\n");
+ dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
return -1;
}
for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
img_size = le32_to_cpu(fsec->fsec_entry[i].pad_size);
+ img_type = le32_to_cpu(fsec->fsec_entry[i].type);
+ img_optype = be_get_img_optype(fsec->fsec_entry[i]);
+ old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
- switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
- case IMAGE_FIRMWARE_iSCSI:
- img_optype = OPTYPE_ISCSI_ACTIVE;
- break;
- case IMAGE_BOOT_CODE:
- img_optype = OPTYPE_REDBOOT;
- break;
- case IMAGE_OPTION_ROM_ISCSI:
- img_optype = OPTYPE_BIOS;
- break;
- case IMAGE_OPTION_ROM_PXE:
- img_optype = OPTYPE_PXE_BIOS;
- break;
- case IMAGE_OPTION_ROM_FCoE:
- img_optype = OPTYPE_FCOE_BIOS;
- break;
- case IMAGE_FIRMWARE_BACKUP_iSCSI:
- img_optype = OPTYPE_ISCSI_BACKUP;
- break;
- case IMAGE_NCSI:
- img_optype = OPTYPE_NCSI_FW;
- break;
- default:
+ if (img_optype == 0xFFFF)
continue;
+ /* Don't bother verifying CRC if an old FW image is being
+ * flashed
+ */
+ if (old_fw_img)
+ goto flash;
+
+ status = be_check_flash_crc(adapter, fw->data, img_offset,
+ img_size, filehdr_size +
+ img_hdrs_size, img_optype,
+ &crc_match);
+ /* The current FW image on the card does not recognize the new
+ * FLASH op_type. The FW download is partially complete.
+ * Reboot the server now to enable FW image to recognize the
+ * new FLASH op_type. To complete the remaining process,
+ * download the same FW again after the reboot.
+ */
+ if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
+ base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
+ dev_err(dev, "Flash incomplete. Reset the server\n");
+ dev_err(dev, "Download FW image again after reset\n");
+ return -EAGAIN;
+ } else if (status) {
+ dev_err(dev, "Could not get CRC for 0x%x region\n",
+ img_optype);
+ return -EFAULT;
}
- if (img_optype == OPTYPE_REDBOOT) {
- redboot = be_flash_redboot(adapter, fw->data,
- img_offset, img_size,
- filehdr_size + img_hdrs_size);
- if (!redboot)
- continue;
- }
+ if (crc_match)
+ continue;
- p = fw->data;
- p += filehdr_size + img_offset + img_hdrs_size;
+flash:
+ p = fw->data + filehdr_size + img_offset + img_hdrs_size;
if (p + img_size > fw->data + fw->size)
return -1;
status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
- if (status) {
- dev_err(&adapter->pdev->dev,
- "Flashing section type %d failed.\n",
- fsec->fsec_entry[i].type);
- return status;
+ /* For old FW images ignore ILLEGAL_FIELD error or errors on
+ * UFI_DIR region
+ */
+ if (old_fw_img &&
+ (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
+ (img_optype == OPTYPE_UFI_DIR &&
+ base_status(status) == MCC_STATUS_FAILED))) {
+ continue;
+ } else if (status) {
+ dev_err(dev, "Flashing section type 0x%x failed\n",
+ img_type);
+ return -EFAULT;
}
}
return 0;
}
static int lancer_fw_download(struct be_adapter *adapter,
- const struct firmware *fw)
+ const struct firmware *fw)
{
#define LANCER_FW_DOWNLOAD_CHUNK (32 * 1024)
#define LANCER_FW_DOWNLOAD_LOCATION "/prg"
@@ -3955,7 +4057,7 @@ static int lancer_fw_download(struct be_adapter *adapter,
}
dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
- flash_cmd.dma);
+ flash_cmd.dma);
if (status) {
dev_err(&adapter->pdev->dev,
"Firmware load error. "
@@ -3976,9 +4078,8 @@ static int lancer_fw_download(struct be_adapter *adapter,
goto lancer_fw_exit;
}
} else if (change_status != LANCER_NO_RESET_NEEDED) {
- dev_err(&adapter->pdev->dev,
- "System reboot required for new FW"
- " to be active\n");
+ dev_err(&adapter->pdev->dev,
+ "System reboot required for new FW to be active\n");
}
dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
@@ -4042,7 +4143,7 @@ static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
switch (ufi_type) {
case UFI_TYPE4:
status = be_flash_skyhawk(adapter, fw,
- &flash_cmd, num_imgs);
+ &flash_cmd, num_imgs);
break;
case UFI_TYPE3R:
status = be_flash_BEx(adapter, fw, &flash_cmd,
@@ -4112,8 +4213,7 @@ fw_exit:
return status;
}
-static int be_ndo_bridge_setlink(struct net_device *dev,
- struct nlmsghdr *nlh)
+static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh)
{
struct be_adapter *adapter = netdev_priv(dev);
struct nlattr *attr, *br_spec;
@@ -4155,8 +4255,7 @@ err:
}
static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
- struct net_device *dev,
- u32 filter_mask)
+ struct net_device *dev, u32 filter_mask)
{
struct be_adapter *adapter = netdev_priv(dev);
int status = 0;
@@ -4254,7 +4353,7 @@ static const struct net_device_ops be_netdev_ops = {
.ndo_vlan_rx_kill_vid = be_vlan_rem_vid,
.ndo_set_vf_mac = be_set_vf_mac,
.ndo_set_vf_vlan = be_set_vf_vlan,
- .ndo_set_vf_tx_rate = be_set_vf_tx_rate,
+ .ndo_set_vf_rate = be_set_vf_tx_rate,
.ndo_get_vf_config = be_get_vf_config,
.ndo_set_vf_link_state = be_set_vf_link_state,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -4301,7 +4400,7 @@ static void be_netdev_init(struct net_device *netdev)
netdev->netdev_ops = &be_netdev_ops;
- SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
+ netdev->ethtool_ops = &be_ethtool_ops;
}
static void be_unmap_pci_bars(struct be_adapter *adapter)
@@ -4870,7 +4969,7 @@ static void be_shutdown(struct pci_dev *pdev)
}
static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
- pci_channel_state_t state)
+ pci_channel_state_t state)
{
struct be_adapter *adapter = pci_get_drvdata(pdev);
struct net_device *netdev = adapter->netdev;
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 8b70ca7..f3658bd 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -769,11 +769,6 @@ static int ethoc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return phy_mii_ioctl(phy, ifr, cmd);
}
-static int ethoc_config(struct net_device *dev, struct ifmap *map)
-{
- return -ENOSYS;
-}
-
static void ethoc_do_set_mac_address(struct net_device *dev)
{
struct ethoc *priv = netdev_priv(dev);
@@ -995,7 +990,6 @@ static const struct net_device_ops ethoc_netdev_ops = {
.ndo_open = ethoc_open,
.ndo_stop = ethoc_stop,
.ndo_do_ioctl = ethoc_ioctl,
- .ndo_set_config = ethoc_config,
.ndo_set_mac_address = ethoc_set_mac_address,
.ndo_set_rx_mode = ethoc_set_multicast_list,
.ndo_change_mtu = ethoc_change_mtu,
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 68069ea..c77fa4a 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -1210,7 +1210,7 @@ static int ftgmac100_probe(struct platform_device *pdev)
SET_NETDEV_DEV(netdev, &pdev->dev);
- SET_ETHTOOL_OPS(netdev, &ftgmac100_ethtool_ops);
+ netdev->ethtool_ops = &ftgmac100_ethtool_ops;
netdev->netdev_ops = &ftgmac100_netdev_ops;
netdev->features = NETIF_F_IP_CSUM | NETIF_F_GRO;
diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
index 8be5b40..4ff1adc 100644
--- a/drivers/net/ethernet/faraday/ftmac100.c
+++ b/drivers/net/ethernet/faraday/ftmac100.c
@@ -1085,7 +1085,7 @@ static int ftmac100_probe(struct platform_device *pdev)
}
SET_NETDEV_DEV(netdev, &pdev->dev);
- SET_ETHTOOL_OPS(netdev, &ftmac100_ethtool_ops);
+ netdev->ethtool_ops = &ftmac100_ethtool_ops;
netdev->netdev_ops = &ftmac100_netdev_ops;
platform_set_drvdata(pdev, netdev);
diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig
index 6048dc8..2703083 100644
--- a/drivers/net/ethernet/freescale/Kconfig
+++ b/drivers/net/ethernet/freescale/Kconfig
@@ -67,6 +67,7 @@ config FSL_XGMAC_MDIO
tristate "Freescale XGMAC MDIO"
depends on FSL_SOC
select PHYLIB
+ select OF_MDIO
---help---
This driver supports the MDIO bus on the Fman 10G Ethernet MACs.
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 3b8d6d1..671d080 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -221,7 +221,7 @@ struct bufdesc_ex {
#define BD_ENET_TX_RCMASK ((ushort)0x003c)
#define BD_ENET_TX_UN ((ushort)0x0002)
#define BD_ENET_TX_CSL ((ushort)0x0001)
-#define BD_ENET_TX_STATS ((ushort)0x03ff) /* All status bits */
+#define BD_ENET_TX_STATS ((ushort)0x0fff) /* All status bits */
/*enhanced buffer descriptor control/status used by Ethernet transmit*/
#define BD_ENET_TX_INT 0x40000000
@@ -246,8 +246,8 @@ struct bufdesc_ex {
#define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES)
#define FEC_ENET_TX_FRSIZE 2048
#define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE)
-#define TX_RING_SIZE 16 /* Must be power of two */
-#define TX_RING_MOD_MASK 15 /* for this to work */
+#define TX_RING_SIZE 512 /* Must be power of two */
+#define TX_RING_MOD_MASK 511 /* for this to work */
#define BD_ENET_RX_INT 0x00800000
#define BD_ENET_RX_PTP ((ushort)0x0400)
@@ -296,8 +296,15 @@ struct fec_enet_private {
/* The ring entries to be free()ed */
struct bufdesc *dirty_tx;
+ unsigned short bufdesc_size;
unsigned short tx_ring_size;
unsigned short rx_ring_size;
+ unsigned short tx_stop_threshold;
+ unsigned short tx_wake_threshold;
+
+ /* Software TSO */
+ char *tso_hdrs;
+ dma_addr_t tso_hdrs_dma;
struct platform_device *pdev;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 8d69e43..38d9d27 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -36,6 +36,7 @@
#include <linux/in.h>
#include <linux/ip.h>
#include <net/ip.h>
+#include <net/tso.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/icmp.h>
@@ -54,6 +55,7 @@
#include <linux/of_net.h>
#include <linux/regulator/consumer.h>
#include <linux/if_vlan.h>
+#include <linux/pinctrl/consumer.h>
#include <asm/cacheflush.h>
@@ -172,10 +174,6 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#endif
#endif /* CONFIG_M5272 */
-#if (((RX_RING_SIZE + TX_RING_SIZE) * 32) > PAGE_SIZE)
-#error "FEC: descriptor ring size constants too large"
-#endif
-
/* Interrupt events/masks. */
#define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */
#define FEC_ENET_BABR ((uint)0x40000000) /* Babbling receiver */
@@ -231,6 +229,15 @@ MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
#define FEC_PAUSE_FLAG_AUTONEG 0x1
#define FEC_PAUSE_FLAG_ENABLE 0x2
+#define TSO_HEADER_SIZE 128
+/* Max number of allowed TCP segments for software TSO */
+#define FEC_MAX_TSO_SEGS 100
+#define FEC_MAX_SKB_DESCS (FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
+
+#define IS_TSO_HEADER(txq, addr) \
+ ((addr >= txq->tso_hdrs_dma) && \
+ (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
+
static int mii_cnt;
static inline
@@ -286,6 +293,22 @@ struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, struct fec_enet_priva
return (new_bd < base) ? (new_bd + ring_size) : new_bd;
}
+static int fec_enet_get_bd_index(struct bufdesc *base, struct bufdesc *bdp,
+ struct fec_enet_private *fep)
+{
+ return ((const char *)bdp - (const char *)base) / fep->bufdesc_size;
+}
+
+static int fec_enet_get_free_txdesc_num(struct fec_enet_private *fep)
+{
+ int entries;
+
+ entries = ((const char *)fep->dirty_tx -
+ (const char *)fep->cur_tx) / fep->bufdesc_size - 1;
+
+ return entries > 0 ? entries : entries + fep->tx_ring_size;
+}
+
static void *swap_buffer(void *bufaddr, int len)
{
int i;
@@ -307,33 +330,133 @@ fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
if (unlikely(skb_cow_head(skb, 0)))
return -1;
+ ip_hdr(skb)->check = 0;
*(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0;
return 0;
}
-static netdev_tx_t
-fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+static void
+fec_enet_submit_work(struct bufdesc *bdp, struct fec_enet_private *fep)
+{
+ const struct platform_device_id *id_entry =
+ platform_get_device_id(fep->pdev);
+ struct bufdesc *bdp_pre;
+
+ bdp_pre = fec_enet_get_prevdesc(bdp, fep);
+ if ((id_entry->driver_data & FEC_QUIRK_ERR006358) &&
+ !(bdp_pre->cbd_sc & BD_ENET_TX_READY)) {
+ fep->delay_work.trig_tx = true;
+ schedule_delayed_work(&(fep->delay_work.delay_work),
+ msecs_to_jiffies(1));
+ }
+}
+
+static int
+fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
const struct platform_device_id *id_entry =
platform_get_device_id(fep->pdev);
- struct bufdesc *bdp, *bdp_pre;
- void *bufaddr;
- unsigned short status;
+ struct bufdesc *bdp = fep->cur_tx;
+ struct bufdesc_ex *ebdp;
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ int frag, frag_len;
+ unsigned short status;
+ unsigned int estatus = 0;
+ skb_frag_t *this_frag;
unsigned int index;
+ void *bufaddr;
+ int i;
- /* Fill in a Tx ring entry */
+ for (frag = 0; frag < nr_frags; frag++) {
+ this_frag = &skb_shinfo(skb)->frags[frag];
+ bdp = fec_enet_get_nextdesc(bdp, fep);
+ ebdp = (struct bufdesc_ex *)bdp;
+
+ status = bdp->cbd_sc;
+ status &= ~BD_ENET_TX_STATS;
+ status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
+ frag_len = skb_shinfo(skb)->frags[frag].size;
+
+ /* Handle the last BD specially */
+ if (frag == nr_frags - 1) {
+ status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
+ if (fep->bufdesc_ex) {
+ estatus |= BD_ENET_TX_INT;
+ if (unlikely(skb_shinfo(skb)->tx_flags &
+ SKBTX_HW_TSTAMP && fep->hwts_tx_en))
+ estatus |= BD_ENET_TX_TS;
+ }
+ }
+
+ if (fep->bufdesc_ex) {
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
+ ebdp->cbd_bdu = 0;
+ ebdp->cbd_esc = estatus;
+ }
+
+ bufaddr = page_address(this_frag->page.p) + this_frag->page_offset;
+
+ index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
+ if (((unsigned long) bufaddr) & FEC_ALIGNMENT ||
+ id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
+ memcpy(fep->tx_bounce[index], bufaddr, frag_len);
+ bufaddr = fep->tx_bounce[index];
+
+ if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
+ swap_buffer(bufaddr, frag_len);
+ }
+
+ bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr,
+ frag_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
+ dev_kfree_skb_any(skb);
+ if (net_ratelimit())
+ netdev_err(ndev, "Tx DMA memory map failed\n");
+ goto dma_mapping_error;
+ }
+
+ bdp->cbd_datlen = frag_len;
+ bdp->cbd_sc = status;
+ }
+
+ fep->cur_tx = bdp;
+
+ return 0;
+
+dma_mapping_error:
bdp = fep->cur_tx;
+ for (i = 0; i < frag; i++) {
+ bdp = fec_enet_get_nextdesc(bdp, fep);
+ dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
+ bdp->cbd_datlen, DMA_TO_DEVICE);
+ }
+ return NETDEV_TX_OK;
+}
- status = bdp->cbd_sc;
+static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ const struct platform_device_id *id_entry =
+ platform_get_device_id(fep->pdev);
+ int nr_frags = skb_shinfo(skb)->nr_frags;
+ struct bufdesc *bdp, *last_bdp;
+ void *bufaddr;
+ unsigned short status;
+ unsigned short buflen;
+ unsigned int estatus = 0;
+ unsigned int index;
+ int entries_free;
+ int ret;
- if (status & BD_ENET_TX_READY) {
- /* Ooops. All transmit buffers are full. Bail out.
- * This should not happen, since ndev->tbusy should be set.
- */
- netdev_err(ndev, "tx queue full!\n");
- return NETDEV_TX_BUSY;
+ entries_free = fec_enet_get_free_txdesc_num(fep);
+ if (entries_free < MAX_SKB_FRAGS + 1) {
+ dev_kfree_skb_any(skb);
+ if (net_ratelimit())
+ netdev_err(ndev, "NOT enough BD for SG!\n");
+ return NETDEV_TX_OK;
}
/* Protocol checksum off-load for TCP and UDP. */
@@ -342,102 +465,300 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
return NETDEV_TX_OK;
}
- /* Clear all of the status flags */
+ /* Fill in a Tx ring entry */
+ bdp = fep->cur_tx;
+ status = bdp->cbd_sc;
status &= ~BD_ENET_TX_STATS;
/* Set buffer length and buffer pointer */
bufaddr = skb->data;
- bdp->cbd_datlen = skb->len;
-
- /*
- * On some FEC implementations data must be aligned on
- * 4-byte boundaries. Use bounce buffers to copy data
- * and get it aligned. Ugh.
- */
- if (fep->bufdesc_ex)
- index = (struct bufdesc_ex *)bdp -
- (struct bufdesc_ex *)fep->tx_bd_base;
- else
- index = bdp - fep->tx_bd_base;
+ buflen = skb_headlen(skb);
- if (((unsigned long) bufaddr) & FEC_ALIGNMENT) {
- memcpy(fep->tx_bounce[index], skb->data, skb->len);
+ index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
+ if (((unsigned long) bufaddr) & FEC_ALIGNMENT ||
+ id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
+ memcpy(fep->tx_bounce[index], skb->data, buflen);
bufaddr = fep->tx_bounce[index];
- }
- /*
- * Some design made an incorrect assumption on endian mode of
- * the system that it's running on. As the result, driver has to
- * swap every frame going to and coming from the controller.
- */
- if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
- swap_buffer(bufaddr, skb->len);
-
- /* Save skb pointer */
- fep->tx_skbuff[index] = skb;
+ if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
+ swap_buffer(bufaddr, buflen);
+ }
/* Push the data cache so the CPM does not get stale memory
* data.
*/
bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr,
- skb->len, DMA_TO_DEVICE);
+ buflen, DMA_TO_DEVICE);
if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
- bdp->cbd_bufaddr = 0;
- fep->tx_skbuff[index] = NULL;
dev_kfree_skb_any(skb);
if (net_ratelimit())
netdev_err(ndev, "Tx DMA memory map failed\n");
return NETDEV_TX_OK;
}
+ if (nr_frags) {
+ ret = fec_enet_txq_submit_frag_skb(skb, ndev);
+ if (ret)
+ return ret;
+ } else {
+ status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
+ if (fep->bufdesc_ex) {
+ estatus = BD_ENET_TX_INT;
+ if (unlikely(skb_shinfo(skb)->tx_flags &
+ SKBTX_HW_TSTAMP && fep->hwts_tx_en))
+ estatus |= BD_ENET_TX_TS;
+ }
+ }
+
if (fep->bufdesc_ex) {
struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
- ebdp->cbd_bdu = 0;
+
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP &&
- fep->hwts_tx_en)) {
- ebdp->cbd_esc = (BD_ENET_TX_TS | BD_ENET_TX_INT);
+ fep->hwts_tx_en))
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
- } else {
- ebdp->cbd_esc = BD_ENET_TX_INT;
- /* Enable protocol checksum flags
- * We do not bother with the IP Checksum bits as they
- * are done by the kernel
- */
- if (skb->ip_summed == CHECKSUM_PARTIAL)
- ebdp->cbd_esc |= BD_ENET_TX_PINS;
- }
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
+
+ ebdp->cbd_bdu = 0;
+ ebdp->cbd_esc = estatus;
}
+ last_bdp = fep->cur_tx;
+ index = fec_enet_get_bd_index(fep->tx_bd_base, last_bdp, fep);
+ /* Save skb pointer */
+ fep->tx_skbuff[index] = skb;
+
+ bdp->cbd_datlen = buflen;
+
/* Send it on its way. Tell FEC it's ready, interrupt when done,
* it's the last BD of the frame, and to put the CRC on the end.
*/
- status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR
- | BD_ENET_TX_LAST | BD_ENET_TX_TC);
+ status |= (BD_ENET_TX_READY | BD_ENET_TX_TC);
bdp->cbd_sc = status;
- bdp_pre = fec_enet_get_prevdesc(bdp, fep);
- if ((id_entry->driver_data & FEC_QUIRK_ERR006358) &&
- !(bdp_pre->cbd_sc & BD_ENET_TX_READY)) {
- fep->delay_work.trig_tx = true;
- schedule_delayed_work(&(fep->delay_work.delay_work),
- msecs_to_jiffies(1));
- }
+ fec_enet_submit_work(bdp, fep);
/* If this was the last BD in the ring, start at the beginning again. */
- bdp = fec_enet_get_nextdesc(bdp, fep);
+ bdp = fec_enet_get_nextdesc(last_bdp, fep);
skb_tx_timestamp(skb);
fep->cur_tx = bdp;
- if (fep->cur_tx == fep->dirty_tx)
- netif_stop_queue(ndev);
+ /* Trigger transmission start */
+ writel(0, fep->hwp + FEC_X_DES_ACTIVE);
+
+ return 0;
+}
+
+static int
+fec_enet_txq_put_data_tso(struct sk_buff *skb, struct net_device *ndev,
+ struct bufdesc *bdp, int index, char *data,
+ int size, bool last_tcp, bool is_last)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ const struct platform_device_id *id_entry =
+ platform_get_device_id(fep->pdev);
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+ unsigned short status;
+ unsigned int estatus = 0;
+
+ status = bdp->cbd_sc;
+ status &= ~BD_ENET_TX_STATS;
+
+ status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
+ bdp->cbd_datlen = size;
+
+ if (((unsigned long) data) & FEC_ALIGNMENT ||
+ id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
+ memcpy(fep->tx_bounce[index], data, size);
+ data = fep->tx_bounce[index];
+
+ if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
+ swap_buffer(data, size);
+ }
+
+ bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data,
+ size, DMA_TO_DEVICE);
+ if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
+ dev_kfree_skb_any(skb);
+ if (net_ratelimit())
+ netdev_err(ndev, "Tx DMA memory map failed\n");
+ return NETDEV_TX_BUSY;
+ }
+
+ if (fep->bufdesc_ex) {
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
+ ebdp->cbd_bdu = 0;
+ ebdp->cbd_esc = estatus;
+ }
+
+ /* Handle the last BD specially */
+ if (last_tcp)
+ status |= (BD_ENET_TX_LAST | BD_ENET_TX_TC);
+ if (is_last) {
+ status |= BD_ENET_TX_INTR;
+ if (fep->bufdesc_ex)
+ ebdp->cbd_esc |= BD_ENET_TX_INT;
+ }
+
+ bdp->cbd_sc = status;
+
+ return 0;
+}
+
+static int
+fec_enet_txq_put_hdr_tso(struct sk_buff *skb, struct net_device *ndev,
+ struct bufdesc *bdp, int index)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ const struct platform_device_id *id_entry =
+ platform_get_device_id(fep->pdev);
+ int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+ void *bufaddr;
+ unsigned long dmabuf;
+ unsigned short status;
+ unsigned int estatus = 0;
+
+ status = bdp->cbd_sc;
+ status &= ~BD_ENET_TX_STATS;
+ status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
+
+ bufaddr = fep->tso_hdrs + index * TSO_HEADER_SIZE;
+ dmabuf = fep->tso_hdrs_dma + index * TSO_HEADER_SIZE;
+ if (((unsigned long) bufaddr) & FEC_ALIGNMENT ||
+ id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
+ memcpy(fep->tx_bounce[index], skb->data, hdr_len);
+ bufaddr = fep->tx_bounce[index];
+
+ if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
+ swap_buffer(bufaddr, hdr_len);
+
+ dmabuf = dma_map_single(&fep->pdev->dev, bufaddr,
+ hdr_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&fep->pdev->dev, dmabuf)) {
+ dev_kfree_skb_any(skb);
+ if (net_ratelimit())
+ netdev_err(ndev, "Tx DMA memory map failed\n");
+ return NETDEV_TX_BUSY;
+ }
+ }
+
+ bdp->cbd_bufaddr = dmabuf;
+ bdp->cbd_datlen = hdr_len;
+
+ if (fep->bufdesc_ex) {
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
+ ebdp->cbd_bdu = 0;
+ ebdp->cbd_esc = estatus;
+ }
+
+ bdp->cbd_sc = status;
+
+ return 0;
+}
+
+static int fec_enet_txq_submit_tso(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ int total_len, data_left;
+ struct bufdesc *bdp = fep->cur_tx;
+ struct tso_t tso;
+ unsigned int index = 0;
+ int ret;
+
+ if (tso_count_descs(skb) >= fec_enet_get_free_txdesc_num(fep)) {
+ dev_kfree_skb_any(skb);
+ if (net_ratelimit())
+ netdev_err(ndev, "NOT enough BD for TSO!\n");
+ return NETDEV_TX_OK;
+ }
+
+ /* Protocol checksum off-load for TCP and UDP. */
+ if (fec_enet_clear_csum(skb, ndev)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ /* Initialize the TSO handler, and prepare the first payload */
+ tso_start(skb, &tso);
+
+ total_len = skb->len - hdr_len;
+ while (total_len > 0) {
+ char *hdr;
+
+ index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
+ data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
+ total_len -= data_left;
+
+ /* prepare packet headers: MAC + IP + TCP */
+ hdr = fep->tso_hdrs + index * TSO_HEADER_SIZE;
+ tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
+ ret = fec_enet_txq_put_hdr_tso(skb, ndev, bdp, index);
+ if (ret)
+ goto err_release;
+
+ while (data_left > 0) {
+ int size;
+
+ size = min_t(int, tso.size, data_left);
+ bdp = fec_enet_get_nextdesc(bdp, fep);
+ index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
+ ret = fec_enet_txq_put_data_tso(skb, ndev, bdp, index, tso.data,
+ size, size == data_left,
+ total_len == 0);
+ if (ret)
+ goto err_release;
+
+ data_left -= size;
+ tso_build_data(skb, &tso, size);
+ }
+
+ bdp = fec_enet_get_nextdesc(bdp, fep);
+ }
+
+ /* Save skb pointer */
+ fep->tx_skbuff[index] = skb;
+
+ fec_enet_submit_work(bdp, fep);
+
+ skb_tx_timestamp(skb);
+ fep->cur_tx = bdp;
/* Trigger transmission start */
writel(0, fep->hwp + FEC_X_DES_ACTIVE);
+ return 0;
+
+err_release:
+ /* TODO: Release all used data descriptors for TSO */
+ return ret;
+}
+
+static netdev_tx_t
+fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ int entries_free;
+ int ret;
+
+ if (skb_is_gso(skb))
+ ret = fec_enet_txq_submit_tso(skb, ndev);
+ else
+ ret = fec_enet_txq_submit_skb(skb, ndev);
+ if (ret)
+ return ret;
+
+ entries_free = fec_enet_get_free_txdesc_num(fep);
+ if (entries_free <= fep->tx_stop_threshold)
+ netif_stop_queue(ndev);
+
return NETDEV_TX_OK;
}
@@ -756,6 +1077,7 @@ fec_enet_tx(struct net_device *ndev)
unsigned short status;
struct sk_buff *skb;
int index = 0;
+ int entries_free;
fep = netdev_priv(ndev);
bdp = fep->dirty_tx;
@@ -769,16 +1091,17 @@ fec_enet_tx(struct net_device *ndev)
if (bdp == fep->cur_tx)
break;
- if (fep->bufdesc_ex)
- index = (struct bufdesc_ex *)bdp -
- (struct bufdesc_ex *)fep->tx_bd_base;
- else
- index = bdp - fep->tx_bd_base;
+ index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
skb = fep->tx_skbuff[index];
- dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, skb->len,
- DMA_TO_DEVICE);
+ if (!IS_TSO_HEADER(fep, bdp->cbd_bufaddr))
+ dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
+ bdp->cbd_datlen, DMA_TO_DEVICE);
bdp->cbd_bufaddr = 0;
+ if (!skb) {
+ bdp = fec_enet_get_nextdesc(bdp, fep);
+ continue;
+ }
/* Check for errors. */
if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
@@ -797,7 +1120,7 @@ fec_enet_tx(struct net_device *ndev)
ndev->stats.tx_carrier_errors++;
} else {
ndev->stats.tx_packets++;
- ndev->stats.tx_bytes += bdp->cbd_datlen;
+ ndev->stats.tx_bytes += skb->len;
}
if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) &&
@@ -834,15 +1157,15 @@ fec_enet_tx(struct net_device *ndev)
/* Since we have freed up a buffer, the ring is no longer full
*/
- if (fep->dirty_tx != fep->cur_tx) {
- if (netif_queue_stopped(ndev))
+ if (netif_queue_stopped(ndev)) {
+ entries_free = fec_enet_get_free_txdesc_num(fep);
+ if (entries_free >= fep->tx_wake_threshold)
netif_wake_queue(ndev);
}
}
return;
}
-
/* During a receive, the cur_rx points to the current incoming buffer.
* When we update through the ring, if the next incoming buffer has
* not been given to the system, we just set the empty indicator,
@@ -920,11 +1243,7 @@ fec_enet_rx(struct net_device *ndev, int budget)
pkt_len = bdp->cbd_datlen;
ndev->stats.rx_bytes += pkt_len;
- if (fep->bufdesc_ex)
- index = (struct bufdesc_ex *)bdp -
- (struct bufdesc_ex *)fep->rx_bd_base;
- else
- index = bdp - fep->rx_bd_base;
+ index = fec_enet_get_bd_index(fep->rx_bd_base, bdp, fep);
data = fep->rx_skbuff[index]->data;
dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr,
FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
@@ -1255,6 +1574,49 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
return 0;
}
+static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
+{
+ struct fec_enet_private *fep = netdev_priv(ndev);
+ int ret;
+
+ if (enable) {
+ ret = clk_prepare_enable(fep->clk_ahb);
+ if (ret)
+ return ret;
+ ret = clk_prepare_enable(fep->clk_ipg);
+ if (ret)
+ goto failed_clk_ipg;
+ if (fep->clk_enet_out) {
+ ret = clk_prepare_enable(fep->clk_enet_out);
+ if (ret)
+ goto failed_clk_enet_out;
+ }
+ if (fep->clk_ptp) {
+ ret = clk_prepare_enable(fep->clk_ptp);
+ if (ret)
+ goto failed_clk_ptp;
+ }
+ } else {
+ clk_disable_unprepare(fep->clk_ahb);
+ clk_disable_unprepare(fep->clk_ipg);
+ if (fep->clk_enet_out)
+ clk_disable_unprepare(fep->clk_enet_out);
+ if (fep->clk_ptp)
+ clk_disable_unprepare(fep->clk_ptp);
+ }
+
+ return 0;
+failed_clk_ptp:
+ if (fep->clk_enet_out)
+ clk_disable_unprepare(fep->clk_enet_out);
+failed_clk_enet_out:
+ clk_disable_unprepare(fep->clk_ipg);
+failed_clk_ipg:
+ clk_disable_unprepare(fep->clk_ahb);
+
+ return ret;
+}
+
static int fec_enet_mii_probe(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
@@ -1364,7 +1726,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
* Reference Manual has an error on this, and gets fixed on i.MX6Q
* document.
*/
- fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ahb), 5000000);
+ fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 5000000);
if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
fep->phy_speed--;
fep->phy_speed <<= 1;
@@ -1773,6 +2135,11 @@ fec_enet_open(struct net_device *ndev)
struct fec_enet_private *fep = netdev_priv(ndev);
int ret;
+ pinctrl_pm_select_default_state(&fep->pdev->dev);
+ ret = fec_enet_clk_enable(ndev, true);
+ if (ret)
+ return ret;
+
/* I should reset the ring buffers here, but I don't yet know
* a simple way to do that.
*/
@@ -1811,6 +2178,8 @@ fec_enet_close(struct net_device *ndev)
phy_disconnect(fep->phy_dev);
}
+ fec_enet_clk_enable(ndev, false);
+ pinctrl_pm_select_sleep_state(&fep->pdev->dev);
fec_enet_free_buffers(ndev);
return 0;
@@ -1988,13 +2357,35 @@ static int fec_enet_init(struct net_device *ndev)
const struct platform_device_id *id_entry =
platform_get_device_id(fep->pdev);
struct bufdesc *cbd_base;
+ int bd_size;
+
+ /* init the tx & rx ring size */
+ fep->tx_ring_size = TX_RING_SIZE;
+ fep->rx_ring_size = RX_RING_SIZE;
+
+ fep->tx_stop_threshold = FEC_MAX_SKB_DESCS;
+ fep->tx_wake_threshold = (fep->tx_ring_size - fep->tx_stop_threshold) / 2;
+
+ if (fep->bufdesc_ex)
+ fep->bufdesc_size = sizeof(struct bufdesc_ex);
+ else
+ fep->bufdesc_size = sizeof(struct bufdesc);
+ bd_size = (fep->tx_ring_size + fep->rx_ring_size) *
+ fep->bufdesc_size;
/* Allocate memory for buffer descriptors. */
- cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma,
+ cbd_base = dma_alloc_coherent(NULL, bd_size, &fep->bd_dma,
GFP_KERNEL);
if (!cbd_base)
return -ENOMEM;
+ fep->tso_hdrs = dma_alloc_coherent(NULL, fep->tx_ring_size * TSO_HEADER_SIZE,
+ &fep->tso_hdrs_dma, GFP_KERNEL);
+ if (!fep->tso_hdrs) {
+ dma_free_coherent(NULL, bd_size, cbd_base, fep->bd_dma);
+ return -ENOMEM;
+ }
+
memset(cbd_base, 0, PAGE_SIZE);
fep->netdev = ndev;
@@ -2004,10 +2395,6 @@ static int fec_enet_init(struct net_device *ndev)
/* make sure MAC we just acquired is programmed into the hw */
fec_set_mac_address(ndev, NULL);
- /* init the tx & rx ring size */
- fep->tx_ring_size = TX_RING_SIZE;
- fep->rx_ring_size = RX_RING_SIZE;
-
/* Set receive and transmit descriptor base. */
fep->rx_bd_base = cbd_base;
if (fep->bufdesc_ex)
@@ -2024,21 +2411,21 @@ static int fec_enet_init(struct net_device *ndev)
writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT);
- if (id_entry->driver_data & FEC_QUIRK_HAS_VLAN) {
+ if (id_entry->driver_data & FEC_QUIRK_HAS_VLAN)
/* enable hw VLAN support */
ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
- ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
- }
if (id_entry->driver_data & FEC_QUIRK_HAS_CSUM) {
+ ndev->gso_max_segs = FEC_MAX_TSO_SEGS;
+
/* enable hw accelerator */
ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
- | NETIF_F_RXCSUM);
- ndev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
- | NETIF_F_RXCSUM);
+ | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO);
fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
}
+ ndev->hw_features = ndev->features;
+
fec_restart(ndev, 0);
return 0;
@@ -2114,6 +2501,9 @@ fec_probe(struct platform_device *pdev)
fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
#endif
+ /* Select default pin state */
+ pinctrl_pm_select_default_state(&pdev->dev);
+
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
fep->hwp = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(fep->hwp)) {
@@ -2164,26 +2554,10 @@ fec_probe(struct platform_device *pdev)
fep->bufdesc_ex = 0;
}
- ret = clk_prepare_enable(fep->clk_ahb);
+ ret = fec_enet_clk_enable(ndev, true);
if (ret)
goto failed_clk;
- ret = clk_prepare_enable(fep->clk_ipg);
- if (ret)
- goto failed_clk_ipg;
-
- if (fep->clk_enet_out) {
- ret = clk_prepare_enable(fep->clk_enet_out);
- if (ret)
- goto failed_clk_enet_out;
- }
-
- if (fep->clk_ptp) {
- ret = clk_prepare_enable(fep->clk_ptp);
- if (ret)
- goto failed_clk_ptp;
- }
-
fep->reg_phy = devm_regulator_get(&pdev->dev, "phy");
if (!IS_ERR(fep->reg_phy)) {
ret = regulator_enable(fep->reg_phy);
@@ -2225,6 +2599,8 @@ fec_probe(struct platform_device *pdev)
/* Carrier starts down, phylib will bring it up */
netif_carrier_off(ndev);
+ fec_enet_clk_enable(ndev, false);
+ pinctrl_pm_select_sleep_state(&pdev->dev);
ret = register_netdev(ndev);
if (ret)
@@ -2244,15 +2620,7 @@ failed_init:
if (fep->reg_phy)
regulator_disable(fep->reg_phy);
failed_regulator:
- if (fep->clk_ptp)
- clk_disable_unprepare(fep->clk_ptp);
-failed_clk_ptp:
- if (fep->clk_enet_out)
- clk_disable_unprepare(fep->clk_enet_out);
-failed_clk_enet_out:
- clk_disable_unprepare(fep->clk_ipg);
-failed_clk_ipg:
- clk_disable_unprepare(fep->clk_ahb);
+ fec_enet_clk_enable(ndev, false);
failed_clk:
failed_ioremap:
free_netdev(ndev);
@@ -2272,14 +2640,9 @@ fec_drv_remove(struct platform_device *pdev)
del_timer_sync(&fep->time_keep);
if (fep->reg_phy)
regulator_disable(fep->reg_phy);
- if (fep->clk_ptp)
- clk_disable_unprepare(fep->clk_ptp);
if (fep->ptp_clock)
ptp_clock_unregister(fep->ptp_clock);
- if (fep->clk_enet_out)
- clk_disable_unprepare(fep->clk_enet_out);
- clk_disable_unprepare(fep->clk_ipg);
- clk_disable_unprepare(fep->clk_ahb);
+ fec_enet_clk_enable(ndev, false);
free_netdev(ndev);
return 0;
@@ -2296,12 +2659,8 @@ fec_suspend(struct device *dev)
fec_stop(ndev);
netif_device_detach(ndev);
}
- if (fep->clk_ptp)
- clk_disable_unprepare(fep->clk_ptp);
- if (fep->clk_enet_out)
- clk_disable_unprepare(fep->clk_enet_out);
- clk_disable_unprepare(fep->clk_ipg);
- clk_disable_unprepare(fep->clk_ahb);
+ fec_enet_clk_enable(ndev, false);
+ pinctrl_pm_select_sleep_state(&fep->pdev->dev);
if (fep->reg_phy)
regulator_disable(fep->reg_phy);
@@ -2322,25 +2681,10 @@ fec_resume(struct device *dev)
return ret;
}
- ret = clk_prepare_enable(fep->clk_ahb);
+ pinctrl_pm_select_default_state(&fep->pdev->dev);
+ ret = fec_enet_clk_enable(ndev, true);
if (ret)
- goto failed_clk_ahb;
-
- ret = clk_prepare_enable(fep->clk_ipg);
- if (ret)
- goto failed_clk_ipg;
-
- if (fep->clk_enet_out) {
- ret = clk_prepare_enable(fep->clk_enet_out);
- if (ret)
- goto failed_clk_enet_out;
- }
-
- if (fep->clk_ptp) {
- ret = clk_prepare_enable(fep->clk_ptp);
- if (ret)
- goto failed_clk_ptp;
- }
+ goto failed_clk;
if (netif_running(ndev)) {
fec_restart(ndev, fep->full_duplex);
@@ -2349,14 +2693,7 @@ fec_resume(struct device *dev)
return 0;
-failed_clk_ptp:
- if (fep->clk_enet_out)
- clk_disable_unprepare(fep->clk_enet_out);
-failed_clk_enet_out:
- clk_disable_unprepare(fep->clk_ipg);
-failed_clk_ipg:
- clk_disable_unprepare(fep->clk_ahb);
-failed_clk_ahb:
+failed_clk:
if (fep->reg_phy)
regulator_disable(fep->reg_phy);
return ret;
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index dc80db4..cfaf17b 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -792,10 +792,6 @@ static int fs_init_phy(struct net_device *dev)
phydev = of_phy_connect(dev, fep->fpi->phy_node, &fs_adjust_link, 0,
iface);
if (!phydev) {
- phydev = of_phy_connect_fixed_link(dev, &fs_adjust_link,
- iface);
- }
- if (!phydev) {
dev_err(&dev->dev, "Could not attach to PHY\n");
return -ENODEV;
}
@@ -1029,9 +1025,16 @@ static int fs_enet_probe(struct platform_device *ofdev)
fpi->use_napi = 1;
fpi->napi_weight = 17;
fpi->phy_node = of_parse_phandle(ofdev->dev.of_node, "phy-handle", 0);
- if ((!fpi->phy_node) && (!of_get_property(ofdev->dev.of_node, "fixed-link",
- NULL)))
- goto out_free_fpi;
+ if (!fpi->phy_node && of_phy_is_fixed_link(ofdev->dev.of_node)) {
+ err = of_phy_register_fixed_link(ofdev->dev.of_node);
+ if (err)
+ goto out_free_fpi;
+
+ /* In the case of a fixed PHY, the DT node associated
+ * to the PHY is the Ethernet MAC DT node.
+ */
+ fpi->phy_node = ofdev->dev.of_node;
+ }
if (of_device_is_compatible(ofdev->dev.of_node, "fsl,mpc5125-fec")) {
phy_connection_type = of_get_property(ofdev->dev.of_node,
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index ee6ddbd..a6cf40e 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -889,6 +889,17 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
+ /* In the case of a fixed PHY, the DT node associated
+ * to the PHY is the Ethernet MAC DT node.
+ */
+ if (of_phy_is_fixed_link(np)) {
+ err = of_phy_register_fixed_link(np);
+ if (err)
+ goto err_grp_init;
+
+ priv->phy_node = np;
+ }
+
/* Find the TBI PHY. If it's not there, we don't support SGMII */
priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
@@ -1231,7 +1242,7 @@ static void gfar_hw_init(struct gfar_private *priv)
gfar_write_isrg(priv);
}
-static void __init gfar_init_addr_hash_table(struct gfar_private *priv)
+static void gfar_init_addr_hash_table(struct gfar_private *priv)
{
struct gfar __iomem *regs = priv->gfargrp[0].regs;
@@ -1373,6 +1384,9 @@ static int gfar_probe(struct platform_device *ofdev)
gfar_hw_init(priv);
+ /* Carrier starts down, phylib will bring it up */
+ netif_carrier_off(dev);
+
err = register_netdev(dev);
if (err) {
@@ -1380,9 +1394,6 @@ static int gfar_probe(struct platform_device *ofdev)
goto register_fail;
}
- /* Carrier starts down, phylib will bring it up */
- netif_carrier_off(dev);
-
device_init_wakeup(&dev->dev,
priv->device_flags &
FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
@@ -1660,9 +1671,6 @@ static int init_phy(struct net_device *dev)
priv->phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
interface);
- if (!priv->phydev)
- priv->phydev = of_phy_connect_fixed_link(dev, &adjust_link,
- interface);
if (!priv->phydev) {
dev_err(&dev->dev, "could not attach to PHY\n");
return -ENODEV;
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index c8299c3..fab39e2 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -1728,9 +1728,6 @@ static int init_phy(struct net_device *dev)
phydev = of_phy_connect(dev, ug_info->phy_node, &adjust_link, 0,
priv->phy_interface);
- if (!phydev)
- phydev = of_phy_connect_fixed_link(dev, &adjust_link,
- priv->phy_interface);
if (!phydev) {
dev_err(&dev->dev, "Could not attach to PHY\n");
return -ENODEV;
@@ -3790,6 +3787,17 @@ static int ucc_geth_probe(struct platform_device* ofdev)
ug_info->uf_info.irq = irq_of_parse_and_map(np, 0);
ug_info->phy_node = of_parse_phandle(np, "phy-handle", 0);
+ if (!ug_info->phy_node) {
+ /* In the case of a fixed PHY, the DT node associated
+ * to the PHY is the Ethernet MAC DT node.
+ */
+ if (of_phy_is_fixed_link(np)) {
+ err = of_phy_register_fixed_link(np);
+ if (err)
+ return err;
+ }
+ ug_info->phy_node = np;
+ }
/* Find the TBI PHY node. If it's not there, we don't support SGMII */
ug_info->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
diff --git a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
index 413329e..cc83350 100644
--- a/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
+++ b/drivers/net/ethernet/freescale/ucc_geth_ethtool.c
@@ -417,5 +417,5 @@ static const struct ethtool_ops uec_ethtool_ops = {
void uec_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &uec_ethtool_ops);
+ netdev->ethtool_ops = &uec_ethtool_ops;
}
diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c
index d449fcb9..0c9d55c 100644
--- a/drivers/net/ethernet/freescale/xgmac_mdio.c
+++ b/drivers/net/ethernet/freescale/xgmac_mdio.c
@@ -162,7 +162,9 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
/* Return all Fs if nothing was there */
if (in_be32(&regs->mdio_stat) & MDIO_STAT_RD_ER) {
- dev_err(&bus->dev, "MDIO read error\n");
+ dev_err(&bus->dev,
+ "Error while reading PHY%d reg at %d.%d\n",
+ phy_id, dev_addr, regnum);
return 0xffff;
}
diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
index 7becab1..cfe7a74 100644
--- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
+++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
@@ -256,7 +256,7 @@ static int fmvj18x_probe(struct pcmcia_device *link)
dev->netdev_ops = &fjn_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
- SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
+ dev->ethtool_ops = &netdev_ethtool_ops;
return fmvj18x_config(link);
} /* fmvj18x_attach */
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
new file mode 100644
index 0000000..e942173
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -0,0 +1,27 @@
+#
+# HISILICON device configuration
+#
+
+config NET_VENDOR_HISILICON
+ bool "Hisilicon devices"
+ default y
+ depends on ARM
+ ---help---
+ If you have a network (Ethernet) card belonging to this class, say Y
+ and read the Ethernet-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>.
+
+ Note that the answer to this question doesn't directly affect the
+ kernel: saying N will just cause the configurator to skip all
+ the questions about Hisilicon devices. If you say Y, you will be asked
+ for your specific card in the following questions.
+
+if NET_VENDOR_HISILICON
+
+config HIX5HD2_GMAC
+ tristate "Hisilicon HIX5HD2 Family Network Device Support"
+ select PHYLIB
+ help
+ This selects the hix5hd2 mac family network device.
+
+endif # NET_VENDOR_HISILICON
diff --git a/drivers/net/ethernet/hisilicon/Makefile b/drivers/net/ethernet/hisilicon/Makefile
new file mode 100644
index 0000000..9175e846
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the HISILICON network device drivers.
+#
+
+obj-$(CONFIG_HIX5HD2_GMAC) += hix5hd2_gmac.o
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
new file mode 100644
index 0000000..0ffdcd3
--- /dev/null
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -0,0 +1,1066 @@
+/* Copyright (c) 2014 Linaro Ltd.
+ * Copyright (c) 2014 Hisilicon Limited.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+#include <linux/clk.h>
+#include <linux/circ_buf.h>
+
+#define STATION_ADDR_LOW 0x0000
+#define STATION_ADDR_HIGH 0x0004
+#define MAC_DUPLEX_HALF_CTRL 0x0008
+#define MAX_FRM_SIZE 0x003c
+#define PORT_MODE 0x0040
+#define PORT_EN 0x0044
+#define BITS_TX_EN BIT(2)
+#define BITS_RX_EN BIT(1)
+#define REC_FILT_CONTROL 0x0064
+#define BIT_CRC_ERR_PASS BIT(5)
+#define BIT_PAUSE_FRM_PASS BIT(4)
+#define BIT_VLAN_DROP_EN BIT(3)
+#define BIT_BC_DROP_EN BIT(2)
+#define BIT_MC_MATCH_EN BIT(1)
+#define BIT_UC_MATCH_EN BIT(0)
+#define PORT_MC_ADDR_LOW 0x0068
+#define PORT_MC_ADDR_HIGH 0x006C
+#define CF_CRC_STRIP 0x01b0
+#define MODE_CHANGE_EN 0x01b4
+#define BIT_MODE_CHANGE_EN BIT(0)
+#define COL_SLOT_TIME 0x01c0
+#define RECV_CONTROL 0x01e0
+#define BIT_STRIP_PAD_EN BIT(3)
+#define BIT_RUNT_PKT_EN BIT(4)
+#define CONTROL_WORD 0x0214
+#define MDIO_SINGLE_CMD 0x03c0
+#define MDIO_SINGLE_DATA 0x03c4
+#define MDIO_CTRL 0x03cc
+#define MDIO_RDATA_STATUS 0x03d0
+
+#define MDIO_START BIT(20)
+#define MDIO_R_VALID BIT(0)
+#define MDIO_READ (BIT(17) | MDIO_START)
+#define MDIO_WRITE (BIT(16) | MDIO_START)
+
+#define RX_FQ_START_ADDR 0x0500
+#define RX_FQ_DEPTH 0x0504
+#define RX_FQ_WR_ADDR 0x0508
+#define RX_FQ_RD_ADDR 0x050c
+#define RX_FQ_VLDDESC_CNT 0x0510
+#define RX_FQ_ALEMPTY_TH 0x0514
+#define RX_FQ_REG_EN 0x0518
+#define BITS_RX_FQ_START_ADDR_EN BIT(2)
+#define BITS_RX_FQ_DEPTH_EN BIT(1)
+#define BITS_RX_FQ_RD_ADDR_EN BIT(0)
+#define RX_FQ_ALFULL_TH 0x051c
+#define RX_BQ_START_ADDR 0x0520
+#define RX_BQ_DEPTH 0x0524
+#define RX_BQ_WR_ADDR 0x0528
+#define RX_BQ_RD_ADDR 0x052c
+#define RX_BQ_FREE_DESC_CNT 0x0530
+#define RX_BQ_ALEMPTY_TH 0x0534
+#define RX_BQ_REG_EN 0x0538
+#define BITS_RX_BQ_START_ADDR_EN BIT(2)
+#define BITS_RX_BQ_DEPTH_EN BIT(1)
+#define BITS_RX_BQ_WR_ADDR_EN BIT(0)
+#define RX_BQ_ALFULL_TH 0x053c
+#define TX_BQ_START_ADDR 0x0580
+#define TX_BQ_DEPTH 0x0584
+#define TX_BQ_WR_ADDR 0x0588
+#define TX_BQ_RD_ADDR 0x058c
+#define TX_BQ_VLDDESC_CNT 0x0590
+#define TX_BQ_ALEMPTY_TH 0x0594
+#define TX_BQ_REG_EN 0x0598
+#define BITS_TX_BQ_START_ADDR_EN BIT(2)
+#define BITS_TX_BQ_DEPTH_EN BIT(1)
+#define BITS_TX_BQ_RD_ADDR_EN BIT(0)
+#define TX_BQ_ALFULL_TH 0x059c
+#define TX_RQ_START_ADDR 0x05a0
+#define TX_RQ_DEPTH 0x05a4
+#define TX_RQ_WR_ADDR 0x05a8
+#define TX_RQ_RD_ADDR 0x05ac
+#define TX_RQ_FREE_DESC_CNT 0x05b0
+#define TX_RQ_ALEMPTY_TH 0x05b4
+#define TX_RQ_REG_EN 0x05b8
+#define BITS_TX_RQ_START_ADDR_EN BIT(2)
+#define BITS_TX_RQ_DEPTH_EN BIT(1)
+#define BITS_TX_RQ_WR_ADDR_EN BIT(0)
+#define TX_RQ_ALFULL_TH 0x05bc
+#define RAW_PMU_INT 0x05c0
+#define ENA_PMU_INT 0x05c4
+#define STATUS_PMU_INT 0x05c8
+#define MAC_FIFO_ERR_IN BIT(30)
+#define TX_RQ_IN_TIMEOUT_INT BIT(29)
+#define RX_BQ_IN_TIMEOUT_INT BIT(28)
+#define TXOUTCFF_FULL_INT BIT(27)
+#define TXOUTCFF_EMPTY_INT BIT(26)
+#define TXCFF_FULL_INT BIT(25)
+#define TXCFF_EMPTY_INT BIT(24)
+#define RXOUTCFF_FULL_INT BIT(23)
+#define RXOUTCFF_EMPTY_INT BIT(22)
+#define RXCFF_FULL_INT BIT(21)
+#define RXCFF_EMPTY_INT BIT(20)
+#define TX_RQ_IN_INT BIT(19)
+#define TX_BQ_OUT_INT BIT(18)
+#define RX_BQ_IN_INT BIT(17)
+#define RX_FQ_OUT_INT BIT(16)
+#define TX_RQ_EMPTY_INT BIT(15)
+#define TX_RQ_FULL_INT BIT(14)
+#define TX_RQ_ALEMPTY_INT BIT(13)
+#define TX_RQ_ALFULL_INT BIT(12)
+#define TX_BQ_EMPTY_INT BIT(11)
+#define TX_BQ_FULL_INT BIT(10)
+#define TX_BQ_ALEMPTY_INT BIT(9)
+#define TX_BQ_ALFULL_INT BIT(8)
+#define RX_BQ_EMPTY_INT BIT(7)
+#define RX_BQ_FULL_INT BIT(6)
+#define RX_BQ_ALEMPTY_INT BIT(5)
+#define RX_BQ_ALFULL_INT BIT(4)
+#define RX_FQ_EMPTY_INT BIT(3)
+#define RX_FQ_FULL_INT BIT(2)
+#define RX_FQ_ALEMPTY_INT BIT(1)
+#define RX_FQ_ALFULL_INT BIT(0)
+
+#define DEF_INT_MASK (RX_BQ_IN_INT | RX_BQ_IN_TIMEOUT_INT | \
+ TX_RQ_IN_INT | TX_RQ_IN_TIMEOUT_INT)
+
+#define DESC_WR_RD_ENA 0x05cc
+#define IN_QUEUE_TH 0x05d8
+#define OUT_QUEUE_TH 0x05dc
+#define QUEUE_TX_BQ_SHIFT 16
+#define RX_BQ_IN_TIMEOUT_TH 0x05e0
+#define TX_RQ_IN_TIMEOUT_TH 0x05e4
+#define STOP_CMD 0x05e8
+#define BITS_TX_STOP BIT(1)
+#define BITS_RX_STOP BIT(0)
+#define FLUSH_CMD 0x05eC
+#define BITS_TX_FLUSH_CMD BIT(5)
+#define BITS_RX_FLUSH_CMD BIT(4)
+#define BITS_TX_FLUSH_FLAG_DOWN BIT(3)
+#define BITS_TX_FLUSH_FLAG_UP BIT(2)
+#define BITS_RX_FLUSH_FLAG_DOWN BIT(1)
+#define BITS_RX_FLUSH_FLAG_UP BIT(0)
+#define RX_CFF_NUM_REG 0x05f0
+#define PMU_FSM_REG 0x05f8
+#define RX_FIFO_PKT_IN_NUM 0x05fc
+#define RX_FIFO_PKT_OUT_NUM 0x0600
+
+#define RGMII_SPEED_1000 0x2c
+#define RGMII_SPEED_100 0x2f
+#define RGMII_SPEED_10 0x2d
+#define MII_SPEED_100 0x0f
+#define MII_SPEED_10 0x0d
+#define GMAC_SPEED_1000 0x05
+#define GMAC_SPEED_100 0x01
+#define GMAC_SPEED_10 0x00
+#define GMAC_FULL_DUPLEX BIT(4)
+
+#define RX_BQ_INT_THRESHOLD 0x01
+#define TX_RQ_INT_THRESHOLD 0x01
+#define RX_BQ_IN_TIMEOUT 0x10000
+#define TX_RQ_IN_TIMEOUT 0x50000
+
+#define MAC_MAX_FRAME_SIZE 1600
+#define DESC_SIZE 32
+#define RX_DESC_NUM 1024
+#define TX_DESC_NUM 1024
+
+#define DESC_VLD_FREE 0
+#define DESC_VLD_BUSY 0x80000000
+#define DESC_FL_MID 0
+#define DESC_FL_LAST 0x20000000
+#define DESC_FL_FIRST 0x40000000
+#define DESC_FL_FULL 0x60000000
+#define DESC_DATA_LEN_OFF 16
+#define DESC_BUFF_LEN_OFF 0
+#define DESC_DATA_MASK 0x7ff
+
+/* DMA descriptor ring helpers */
+#define dma_ring_incr(n, s) (((n) + 1) & ((s) - 1))
+#define dma_cnt(n) ((n) >> 5)
+#define dma_byte(n) ((n) << 5)
+
+struct hix5hd2_desc {
+ __le32 buff_addr;
+ __le32 cmd;
+} __aligned(32);
+
+struct hix5hd2_desc_sw {
+ struct hix5hd2_desc *desc;
+ dma_addr_t phys_addr;
+ unsigned int count;
+ unsigned int size;
+};
+
+#define QUEUE_NUMS 4
+struct hix5hd2_priv {
+ struct hix5hd2_desc_sw pool[QUEUE_NUMS];
+#define rx_fq pool[0]
+#define rx_bq pool[1]
+#define tx_bq pool[2]
+#define tx_rq pool[3]
+
+ void __iomem *base;
+ void __iomem *ctrl_base;
+
+ struct sk_buff *tx_skb[TX_DESC_NUM];
+ struct sk_buff *rx_skb[RX_DESC_NUM];
+
+ struct device *dev;
+ struct net_device *netdev;
+
+ struct phy_device *phy;
+ struct device_node *phy_node;
+ phy_interface_t phy_mode;
+
+ unsigned int speed;
+ unsigned int duplex;
+
+ struct clk *clk;
+ struct mii_bus *bus;
+ struct napi_struct napi;
+ struct work_struct tx_timeout_task;
+};
+
+static void hix5hd2_config_port(struct net_device *dev, u32 speed, u32 duplex)
+{
+ struct hix5hd2_priv *priv = netdev_priv(dev);
+ u32 val;
+
+ priv->speed = speed;
+ priv->duplex = duplex;
+
+ switch (priv->phy_mode) {
+ case PHY_INTERFACE_MODE_RGMII:
+ if (speed == SPEED_1000)
+ val = RGMII_SPEED_1000;
+ else if (speed == SPEED_100)
+ val = RGMII_SPEED_100;
+ else
+ val = RGMII_SPEED_10;
+ break;
+ case PHY_INTERFACE_MODE_MII:
+ if (speed == SPEED_100)
+ val = MII_SPEED_100;
+ else
+ val = MII_SPEED_10;
+ break;
+ default:
+ netdev_warn(dev, "not supported mode\n");
+ val = MII_SPEED_10;
+ break;
+ }
+
+ if (duplex)
+ val |= GMAC_FULL_DUPLEX;
+ writel_relaxed(val, priv->ctrl_base);
+
+ writel_relaxed(BIT_MODE_CHANGE_EN, priv->base + MODE_CHANGE_EN);
+ if (speed == SPEED_1000)
+ val = GMAC_SPEED_1000;
+ else if (speed == SPEED_100)
+ val = GMAC_SPEED_100;
+ else
+ val = GMAC_SPEED_10;
+ writel_relaxed(val, priv->base + PORT_MODE);
+ writel_relaxed(0, priv->base + MODE_CHANGE_EN);
+ writel_relaxed(duplex, priv->base + MAC_DUPLEX_HALF_CTRL);
+}
+
+static void hix5hd2_set_desc_depth(struct hix5hd2_priv *priv, int rx, int tx)
+{
+ writel_relaxed(BITS_RX_FQ_DEPTH_EN, priv->base + RX_FQ_REG_EN);
+ writel_relaxed(rx << 3, priv->base + RX_FQ_DEPTH);
+ writel_relaxed(0, priv->base + RX_FQ_REG_EN);
+
+ writel_relaxed(BITS_RX_BQ_DEPTH_EN, priv->base + RX_BQ_REG_EN);
+ writel_relaxed(rx << 3, priv->base + RX_BQ_DEPTH);
+ writel_relaxed(0, priv->base + RX_BQ_REG_EN);
+
+ writel_relaxed(BITS_TX_BQ_DEPTH_EN, priv->base + TX_BQ_REG_EN);
+ writel_relaxed(tx << 3, priv->base + TX_BQ_DEPTH);
+ writel_relaxed(0, priv->base + TX_BQ_REG_EN);
+
+ writel_relaxed(BITS_TX_RQ_DEPTH_EN, priv->base + TX_RQ_REG_EN);
+ writel_relaxed(tx << 3, priv->base + TX_RQ_DEPTH);
+ writel_relaxed(0, priv->base + TX_RQ_REG_EN);
+}
+
+static void hix5hd2_set_rx_fq(struct hix5hd2_priv *priv, dma_addr_t phy_addr)
+{
+ writel_relaxed(BITS_RX_FQ_START_ADDR_EN, priv->base + RX_FQ_REG_EN);
+ writel_relaxed(phy_addr, priv->base + RX_FQ_START_ADDR);
+ writel_relaxed(0, priv->base + RX_FQ_REG_EN);
+}
+
+static void hix5hd2_set_rx_bq(struct hix5hd2_priv *priv, dma_addr_t phy_addr)
+{
+ writel_relaxed(BITS_RX_BQ_START_ADDR_EN, priv->base + RX_BQ_REG_EN);
+ writel_relaxed(phy_addr, priv->base + RX_BQ_START_ADDR);
+ writel_relaxed(0, priv->base + RX_BQ_REG_EN);
+}
+
+static void hix5hd2_set_tx_bq(struct hix5hd2_priv *priv, dma_addr_t phy_addr)
+{
+ writel_relaxed(BITS_TX_BQ_START_ADDR_EN, priv->base + TX_BQ_REG_EN);
+ writel_relaxed(phy_addr, priv->base + TX_BQ_START_ADDR);
+ writel_relaxed(0, priv->base + TX_BQ_REG_EN);
+}
+
+static void hix5hd2_set_tx_rq(struct hix5hd2_priv *priv, dma_addr_t phy_addr)
+{
+ writel_relaxed(BITS_TX_RQ_START_ADDR_EN, priv->base + TX_RQ_REG_EN);
+ writel_relaxed(phy_addr, priv->base + TX_RQ_START_ADDR);
+ writel_relaxed(0, priv->base + TX_RQ_REG_EN);
+}
+
+static void hix5hd2_set_desc_addr(struct hix5hd2_priv *priv)
+{
+ hix5hd2_set_rx_fq(priv, priv->rx_fq.phys_addr);
+ hix5hd2_set_rx_bq(priv, priv->rx_bq.phys_addr);
+ hix5hd2_set_tx_rq(priv, priv->tx_rq.phys_addr);
+ hix5hd2_set_tx_bq(priv, priv->tx_bq.phys_addr);
+}
+
+static void hix5hd2_hw_init(struct hix5hd2_priv *priv)
+{
+ u32 val;
+
+ /* disable and clear all interrupts */
+ writel_relaxed(0, priv->base + ENA_PMU_INT);
+ writel_relaxed(~0, priv->base + RAW_PMU_INT);
+
+ writel_relaxed(BIT_CRC_ERR_PASS, priv->base + REC_FILT_CONTROL);
+ writel_relaxed(MAC_MAX_FRAME_SIZE, priv->base + CONTROL_WORD);
+ writel_relaxed(0, priv->base + COL_SLOT_TIME);
+
+ val = RX_BQ_INT_THRESHOLD | TX_RQ_INT_THRESHOLD << QUEUE_TX_BQ_SHIFT;
+ writel_relaxed(val, priv->base + IN_QUEUE_TH);
+
+ writel_relaxed(RX_BQ_IN_TIMEOUT, priv->base + RX_BQ_IN_TIMEOUT_TH);
+ writel_relaxed(TX_RQ_IN_TIMEOUT, priv->base + TX_RQ_IN_TIMEOUT_TH);
+
+ hix5hd2_set_desc_depth(priv, RX_DESC_NUM, TX_DESC_NUM);
+ hix5hd2_set_desc_addr(priv);
+}
+
+static void hix5hd2_irq_enable(struct hix5hd2_priv *priv)
+{
+ writel_relaxed(DEF_INT_MASK, priv->base + ENA_PMU_INT);
+}
+
+static void hix5hd2_irq_disable(struct hix5hd2_priv *priv)
+{
+ writel_relaxed(0, priv->base + ENA_PMU_INT);
+}
+
+static void hix5hd2_port_enable(struct hix5hd2_priv *priv)
+{
+ writel_relaxed(0xf, priv->base + DESC_WR_RD_ENA);
+ writel_relaxed(BITS_RX_EN | BITS_TX_EN, priv->base + PORT_EN);
+}
+
+static void hix5hd2_port_disable(struct hix5hd2_priv *priv)
+{
+ writel_relaxed(~(BITS_RX_EN | BITS_TX_EN), priv->base + PORT_EN);
+ writel_relaxed(0, priv->base + DESC_WR_RD_ENA);
+}
+
+static void hix5hd2_hw_set_mac_addr(struct net_device *dev)
+{
+ struct hix5hd2_priv *priv = netdev_priv(dev);
+ unsigned char *mac = dev->dev_addr;
+ u32 val;
+
+ val = mac[1] | (mac[0] << 8);
+ writel_relaxed(val, priv->base + STATION_ADDR_HIGH);
+
+ val = mac[5] | (mac[4] << 8) | (mac[3] << 16) | (mac[2] << 24);
+ writel_relaxed(val, priv->base + STATION_ADDR_LOW);
+}
+
+static int hix5hd2_net_set_mac_address(struct net_device *dev, void *p)
+{
+ int ret;
+
+ ret = eth_mac_addr(dev, p);
+ if (!ret)
+ hix5hd2_hw_set_mac_addr(dev);
+
+ return ret;
+}
+
+static void hix5hd2_adjust_link(struct net_device *dev)
+{
+ struct hix5hd2_priv *priv = netdev_priv(dev);
+ struct phy_device *phy = priv->phy;
+
+ if ((priv->speed != phy->speed) || (priv->duplex != phy->duplex)) {
+ hix5hd2_config_port(dev, phy->speed, phy->duplex);
+ phy_print_status(phy);
+ }
+}
+
+static void hix5hd2_rx_refill(struct hix5hd2_priv *priv)
+{
+ struct hix5hd2_desc *desc;
+ struct sk_buff *skb;
+ u32 start, end, num, pos, i;
+ u32 len = MAC_MAX_FRAME_SIZE;
+ dma_addr_t addr;
+
+ /* software write pointer */
+ start = dma_cnt(readl_relaxed(priv->base + RX_FQ_WR_ADDR));
+ /* logic read pointer */
+ end = dma_cnt(readl_relaxed(priv->base + RX_FQ_RD_ADDR));
+ num = CIRC_SPACE(start, end, RX_DESC_NUM);
+
+ for (i = 0, pos = start; i < num; i++) {
+ if (priv->rx_skb[pos]) {
+ break;
+ } else {
+ skb = netdev_alloc_skb_ip_align(priv->netdev, len);
+ if (unlikely(skb == NULL))
+ break;
+ }
+
+ addr = dma_map_single(priv->dev, skb->data, len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(priv->dev, addr)) {
+ dev_kfree_skb_any(skb);
+ break;
+ }
+
+ desc = priv->rx_fq.desc + pos;
+ desc->buff_addr = cpu_to_le32(addr);
+ priv->rx_skb[pos] = skb;
+ desc->cmd = cpu_to_le32(DESC_VLD_FREE |
+ (len - 1) << DESC_BUFF_LEN_OFF);
+ pos = dma_ring_incr(pos, RX_DESC_NUM);
+ }
+
+ /* ensure desc updated */
+ wmb();
+
+ if (pos != start)
+ writel_relaxed(dma_byte(pos), priv->base + RX_FQ_WR_ADDR);
+}
+
+static int hix5hd2_rx(struct net_device *dev, int limit)
+{
+ struct hix5hd2_priv *priv = netdev_priv(dev);
+ struct sk_buff *skb;
+ struct hix5hd2_desc *desc;
+ dma_addr_t addr;
+ u32 start, end, num, pos, i, len;
+
+ /* software read pointer */
+ start = dma_cnt(readl_relaxed(priv->base + RX_BQ_RD_ADDR));
+ /* logic write pointer */
+ end = dma_cnt(readl_relaxed(priv->base + RX_BQ_WR_ADDR));
+ num = CIRC_CNT(end, start, RX_DESC_NUM);
+ if (num > limit)
+ num = limit;
+
+ /* ensure get updated desc */
+ rmb();
+ for (i = 0, pos = start; i < num; i++) {
+ skb = priv->rx_skb[pos];
+ if (unlikely(!skb)) {
+ netdev_err(dev, "inconsistent rx_skb\n");
+ break;
+ }
+ priv->rx_skb[pos] = NULL;
+
+ desc = priv->rx_bq.desc + pos;
+ len = (le32_to_cpu(desc->cmd) >> DESC_DATA_LEN_OFF) &
+ DESC_DATA_MASK;
+ addr = le32_to_cpu(desc->buff_addr);
+ dma_unmap_single(priv->dev, addr, MAC_MAX_FRAME_SIZE,
+ DMA_FROM_DEVICE);
+
+ skb_put(skb, len);
+ if (skb->len > MAC_MAX_FRAME_SIZE) {
+ netdev_err(dev, "rcv len err, len = %d\n", skb->len);
+ dev->stats.rx_errors++;
+ dev->stats.rx_length_errors++;
+ dev_kfree_skb_any(skb);
+ goto next;
+ }
+
+ skb->protocol = eth_type_trans(skb, dev);
+ napi_gro_receive(&priv->napi, skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += skb->len;
+ dev->last_rx = jiffies;
+next:
+ pos = dma_ring_incr(pos, RX_DESC_NUM);
+ }
+
+ if (pos != start)
+ writel_relaxed(dma_byte(pos), priv->base + RX_BQ_RD_ADDR);
+
+ hix5hd2_rx_refill(priv);
+
+ return num;
+}
+
+static void hix5hd2_xmit_reclaim(struct net_device *dev)
+{
+ struct sk_buff *skb;
+ struct hix5hd2_desc *desc;
+ struct hix5hd2_priv *priv = netdev_priv(dev);
+ unsigned int bytes_compl = 0, pkts_compl = 0;
+ u32 start, end, num, pos, i;
+ dma_addr_t addr;
+
+ netif_tx_lock(dev);
+
+ /* software read */
+ start = dma_cnt(readl_relaxed(priv->base + TX_RQ_RD_ADDR));
+ /* logic write */
+ end = dma_cnt(readl_relaxed(priv->base + TX_RQ_WR_ADDR));
+ num = CIRC_CNT(end, start, TX_DESC_NUM);
+
+ for (i = 0, pos = start; i < num; i++) {
+ skb = priv->tx_skb[pos];
+ if (unlikely(!skb)) {
+ netdev_err(dev, "inconsistent tx_skb\n");
+ break;
+ }
+
+ pkts_compl++;
+ bytes_compl += skb->len;
+ desc = priv->tx_rq.desc + pos;
+ addr = le32_to_cpu(desc->buff_addr);
+ dma_unmap_single(priv->dev, addr, skb->len, DMA_TO_DEVICE);
+ priv->tx_skb[pos] = NULL;
+ dev_consume_skb_any(skb);
+ pos = dma_ring_incr(pos, TX_DESC_NUM);
+ }
+
+ if (pos != start)
+ writel_relaxed(dma_byte(pos), priv->base + TX_RQ_RD_ADDR);
+
+ netif_tx_unlock(dev);
+
+ if (pkts_compl || bytes_compl)
+ netdev_completed_queue(dev, pkts_compl, bytes_compl);
+
+ if (unlikely(netif_queue_stopped(priv->netdev)) && pkts_compl)
+ netif_wake_queue(priv->netdev);
+}
+
+static int hix5hd2_poll(struct napi_struct *napi, int budget)
+{
+ struct hix5hd2_priv *priv = container_of(napi,
+ struct hix5hd2_priv, napi);
+ struct net_device *dev = priv->netdev;
+ int work_done = 0, task = budget;
+ int ints, num;
+
+ do {
+ hix5hd2_xmit_reclaim(dev);
+ num = hix5hd2_rx(dev, task);
+ work_done += num;
+ task -= num;
+ if ((work_done >= budget) || (num == 0))
+ break;
+
+ ints = readl_relaxed(priv->base + RAW_PMU_INT);
+ writel_relaxed(ints, priv->base + RAW_PMU_INT);
+ } while (ints & DEF_INT_MASK);
+
+ if (work_done < budget) {
+ napi_complete(napi);
+ hix5hd2_irq_enable(priv);
+ }
+
+ return work_done;
+}
+
+static irqreturn_t hix5hd2_interrupt(int irq, void *dev_id)
+{
+ struct net_device *dev = (struct net_device *)dev_id;
+ struct hix5hd2_priv *priv = netdev_priv(dev);
+ int ints = readl_relaxed(priv->base + RAW_PMU_INT);
+
+ writel_relaxed(ints, priv->base + RAW_PMU_INT);
+ if (likely(ints & DEF_INT_MASK)) {
+ hix5hd2_irq_disable(priv);
+ napi_schedule(&priv->napi);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int hix5hd2_net_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct hix5hd2_priv *priv = netdev_priv(dev);
+ struct hix5hd2_desc *desc;
+ dma_addr_t addr;
+ u32 pos;
+
+ /* software write pointer */
+ pos = dma_cnt(readl_relaxed(priv->base + TX_BQ_WR_ADDR));
+ if (unlikely(priv->tx_skb[pos])) {
+ dev->stats.tx_dropped++;
+ dev->stats.tx_fifo_errors++;
+ netif_stop_queue(dev);
+ return NETDEV_TX_BUSY;
+ }
+
+ addr = dma_map_single(priv->dev, skb->data, skb->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(priv->dev, addr)) {
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+
+ desc = priv->tx_bq.desc + pos;
+ desc->buff_addr = cpu_to_le32(addr);
+ priv->tx_skb[pos] = skb;
+ desc->cmd = cpu_to_le32(DESC_VLD_BUSY | DESC_FL_FULL |
+ (skb->len & DESC_DATA_MASK) << DESC_DATA_LEN_OFF |
+ (skb->len & DESC_DATA_MASK) << DESC_BUFF_LEN_OFF);
+
+ /* ensure desc updated */
+ wmb();
+
+ pos = dma_ring_incr(pos, TX_DESC_NUM);
+ writel_relaxed(dma_byte(pos), priv->base + TX_BQ_WR_ADDR);
+
+ dev->trans_start = jiffies;
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += skb->len;
+ netdev_sent_queue(dev, skb->len);
+
+ return NETDEV_TX_OK;
+}
+
+static void hix5hd2_free_dma_desc_rings(struct hix5hd2_priv *priv)
+{
+ struct hix5hd2_desc *desc;
+ dma_addr_t addr;
+ int i;
+
+ for (i = 0; i < RX_DESC_NUM; i++) {
+ struct sk_buff *skb = priv->rx_skb[i];
+ if (skb == NULL)
+ continue;
+
+ desc = priv->rx_fq.desc + i;
+ addr = le32_to_cpu(desc->buff_addr);
+ dma_unmap_single(priv->dev, addr,
+ MAC_MAX_FRAME_SIZE, DMA_FROM_DEVICE);
+ dev_kfree_skb_any(skb);
+ priv->rx_skb[i] = NULL;
+ }
+
+ for (i = 0; i < TX_DESC_NUM; i++) {
+ struct sk_buff *skb = priv->tx_skb[i];
+ if (skb == NULL)
+ continue;
+
+ desc = priv->tx_rq.desc + i;
+ addr = le32_to_cpu(desc->buff_addr);
+ dma_unmap_single(priv->dev, addr, skb->len, DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ priv->tx_skb[i] = NULL;
+ }
+}
+
+static int hix5hd2_net_open(struct net_device *dev)
+{
+ struct hix5hd2_priv *priv = netdev_priv(dev);
+ int ret;
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret < 0) {
+ netdev_err(dev, "failed to enable clk %d\n", ret);
+ return ret;
+ }
+
+ priv->phy = of_phy_connect(dev, priv->phy_node,
+ &hix5hd2_adjust_link, 0, priv->phy_mode);
+ if (!priv->phy)
+ return -ENODEV;
+
+ phy_start(priv->phy);
+ hix5hd2_hw_init(priv);
+ hix5hd2_rx_refill(priv);
+
+ netdev_reset_queue(dev);
+ netif_start_queue(dev);
+ napi_enable(&priv->napi);
+
+ hix5hd2_port_enable(priv);
+ hix5hd2_irq_enable(priv);
+
+ return 0;
+}
+
+static int hix5hd2_net_close(struct net_device *dev)
+{
+ struct hix5hd2_priv *priv = netdev_priv(dev);
+
+ hix5hd2_port_disable(priv);
+ hix5hd2_irq_disable(priv);
+ napi_disable(&priv->napi);
+ netif_stop_queue(dev);
+ hix5hd2_free_dma_desc_rings(priv);
+
+ if (priv->phy) {
+ phy_stop(priv->phy);
+ phy_disconnect(priv->phy);
+ }
+
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+static void hix5hd2_tx_timeout_task(struct work_struct *work)
+{
+ struct hix5hd2_priv *priv;
+
+ priv = container_of(work, struct hix5hd2_priv, tx_timeout_task);
+ hix5hd2_net_close(priv->netdev);
+ hix5hd2_net_open(priv->netdev);
+}
+
+static void hix5hd2_net_timeout(struct net_device *dev)
+{
+ struct hix5hd2_priv *priv = netdev_priv(dev);
+
+ schedule_work(&priv->tx_timeout_task);
+}
+
+static const struct net_device_ops hix5hd2_netdev_ops = {
+ .ndo_open = hix5hd2_net_open,
+ .ndo_stop = hix5hd2_net_close,
+ .ndo_start_xmit = hix5hd2_net_xmit,
+ .ndo_tx_timeout = hix5hd2_net_timeout,
+ .ndo_set_mac_address = hix5hd2_net_set_mac_address,
+};
+
+static int hix5hd2_get_settings(struct net_device *net_dev,
+ struct ethtool_cmd *cmd)
+{
+ struct hix5hd2_priv *priv = netdev_priv(net_dev);
+
+ if (!priv->phy)
+ return -ENODEV;
+
+ return phy_ethtool_gset(priv->phy, cmd);
+}
+
+static int hix5hd2_set_settings(struct net_device *net_dev,
+ struct ethtool_cmd *cmd)
+{
+ struct hix5hd2_priv *priv = netdev_priv(net_dev);
+
+ if (!priv->phy)
+ return -ENODEV;
+
+ return phy_ethtool_sset(priv->phy, cmd);
+}
+
+static struct ethtool_ops hix5hd2_ethtools_ops = {
+ .get_link = ethtool_op_get_link,
+ .get_settings = hix5hd2_get_settings,
+ .set_settings = hix5hd2_set_settings,
+};
+
+static int hix5hd2_mdio_wait_ready(struct mii_bus *bus)
+{
+ struct hix5hd2_priv *priv = bus->priv;
+ void __iomem *base = priv->base;
+ int i, timeout = 10000;
+
+ for (i = 0; readl_relaxed(base + MDIO_SINGLE_CMD) & MDIO_START; i++) {
+ if (i == timeout)
+ return -ETIMEDOUT;
+ usleep_range(10, 20);
+ }
+
+ return 0;
+}
+
+static int hix5hd2_mdio_read(struct mii_bus *bus, int phy, int reg)
+{
+ struct hix5hd2_priv *priv = bus->priv;
+ void __iomem *base = priv->base;
+ int val, ret;
+
+ ret = hix5hd2_mdio_wait_ready(bus);
+ if (ret < 0)
+ goto out;
+
+ writel_relaxed(MDIO_READ | phy << 8 | reg, base + MDIO_SINGLE_CMD);
+ ret = hix5hd2_mdio_wait_ready(bus);
+ if (ret < 0)
+ goto out;
+
+ val = readl_relaxed(base + MDIO_RDATA_STATUS);
+ if (val & MDIO_R_VALID) {
+ dev_err(bus->parent, "SMI bus read not valid\n");
+ ret = -ENODEV;
+ goto out;
+ }
+
+ val = readl_relaxed(priv->base + MDIO_SINGLE_DATA);
+ ret = (val >> 16) & 0xFFFF;
+out:
+ return ret;
+}
+
+static int hix5hd2_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
+{
+ struct hix5hd2_priv *priv = bus->priv;
+ void __iomem *base = priv->base;
+ int ret;
+
+ ret = hix5hd2_mdio_wait_ready(bus);
+ if (ret < 0)
+ goto out;
+
+ writel_relaxed(val, base + MDIO_SINGLE_DATA);
+ writel_relaxed(MDIO_WRITE | phy << 8 | reg, base + MDIO_SINGLE_CMD);
+ ret = hix5hd2_mdio_wait_ready(bus);
+out:
+ return ret;
+}
+
+static void hix5hd2_destroy_hw_desc_queue(struct hix5hd2_priv *priv)
+{
+ int i;
+
+ for (i = 0; i < QUEUE_NUMS; i++) {
+ if (priv->pool[i].desc) {
+ dma_free_coherent(priv->dev, priv->pool[i].size,
+ priv->pool[i].desc,
+ priv->pool[i].phys_addr);
+ priv->pool[i].desc = NULL;
+ }
+ }
+}
+
+static int hix5hd2_init_hw_desc_queue(struct hix5hd2_priv *priv)
+{
+ struct device *dev = priv->dev;
+ struct hix5hd2_desc *virt_addr;
+ dma_addr_t phys_addr;
+ int size, i;
+
+ priv->rx_fq.count = RX_DESC_NUM;
+ priv->rx_bq.count = RX_DESC_NUM;
+ priv->tx_bq.count = TX_DESC_NUM;
+ priv->tx_rq.count = TX_DESC_NUM;
+
+ for (i = 0; i < QUEUE_NUMS; i++) {
+ size = priv->pool[i].count * sizeof(struct hix5hd2_desc);
+ virt_addr = dma_alloc_coherent(dev, size, &phys_addr,
+ GFP_KERNEL);
+ if (virt_addr == NULL)
+ goto error_free_pool;
+
+ memset(virt_addr, 0, size);
+ priv->pool[i].size = size;
+ priv->pool[i].desc = virt_addr;
+ priv->pool[i].phys_addr = phys_addr;
+ }
+ return 0;
+
+error_free_pool:
+ hix5hd2_destroy_hw_desc_queue(priv);
+
+ return -ENOMEM;
+}
+
+static int hix5hd2_dev_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *node = dev->of_node;
+ struct net_device *ndev;
+ struct hix5hd2_priv *priv;
+ struct resource *res;
+ struct mii_bus *bus;
+ const char *mac_addr;
+ int ret;
+
+ ndev = alloc_etherdev(sizeof(struct hix5hd2_priv));
+ if (!ndev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, ndev);
+
+ priv = netdev_priv(ndev);
+ priv->dev = dev;
+ priv->netdev = ndev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->base)) {
+ ret = PTR_ERR(priv->base);
+ goto out_free_netdev;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ priv->ctrl_base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(priv->ctrl_base)) {
+ ret = PTR_ERR(priv->ctrl_base);
+ goto out_free_netdev;
+ }
+
+ priv->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(priv->clk)) {
+ netdev_err(ndev, "failed to get clk\n");
+ ret = -ENODEV;
+ goto out_free_netdev;
+ }
+
+ ret = clk_prepare_enable(priv->clk);
+ if (ret < 0) {
+ netdev_err(ndev, "failed to enable clk %d\n", ret);
+ goto out_free_netdev;
+ }
+
+ bus = mdiobus_alloc();
+ if (bus == NULL) {
+ ret = -ENOMEM;
+ goto out_free_netdev;
+ }
+
+ bus->priv = priv;
+ bus->name = "hix5hd2_mii_bus";
+ bus->read = hix5hd2_mdio_read;
+ bus->write = hix5hd2_mdio_write;
+ bus->parent = &pdev->dev;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(&pdev->dev));
+ priv->bus = bus;
+
+ ret = of_mdiobus_register(bus, node);
+ if (ret)
+ goto err_free_mdio;
+
+ priv->phy_mode = of_get_phy_mode(node);
+ if (priv->phy_mode < 0) {
+ netdev_err(ndev, "not find phy-mode\n");
+ ret = -EINVAL;
+ goto err_mdiobus;
+ }
+
+ priv->phy_node = of_parse_phandle(node, "phy-handle", 0);
+ if (!priv->phy_node) {
+ netdev_err(ndev, "not find phy-handle\n");
+ ret = -EINVAL;
+ goto err_mdiobus;
+ }
+
+ ndev->irq = platform_get_irq(pdev, 0);
+ if (ndev->irq <= 0) {
+ netdev_err(ndev, "No irq resource\n");
+ ret = -EINVAL;
+ goto out_phy_node;
+ }
+
+ ret = devm_request_irq(dev, ndev->irq, hix5hd2_interrupt,
+ 0, pdev->name, ndev);
+ if (ret) {
+ netdev_err(ndev, "devm_request_irq failed\n");
+ goto out_phy_node;
+ }
+
+ mac_addr = of_get_mac_address(node);
+ if (mac_addr)
+ ether_addr_copy(ndev->dev_addr, mac_addr);
+ if (!is_valid_ether_addr(ndev->dev_addr)) {
+ eth_hw_addr_random(ndev);
+ netdev_warn(ndev, "using random MAC address %pM\n",
+ ndev->dev_addr);
+ }
+
+ INIT_WORK(&priv->tx_timeout_task, hix5hd2_tx_timeout_task);
+ ndev->watchdog_timeo = 6 * HZ;
+ ndev->priv_flags |= IFF_UNICAST_FLT;
+ ndev->netdev_ops = &hix5hd2_netdev_ops;
+ ndev->ethtool_ops = &hix5hd2_ethtools_ops;
+ SET_NETDEV_DEV(ndev, dev);
+
+ ret = hix5hd2_init_hw_desc_queue(priv);
+ if (ret)
+ goto out_phy_node;
+
+ netif_napi_add(ndev, &priv->napi, hix5hd2_poll, NAPI_POLL_WEIGHT);
+ ret = register_netdev(priv->netdev);
+ if (ret) {
+ netdev_err(ndev, "register_netdev failed!");
+ goto out_destroy_queue;
+ }
+
+ clk_disable_unprepare(priv->clk);
+
+ return ret;
+
+out_destroy_queue:
+ netif_napi_del(&priv->napi);
+ hix5hd2_destroy_hw_desc_queue(priv);
+out_phy_node:
+ of_node_put(priv->phy_node);
+err_mdiobus:
+ mdiobus_unregister(bus);
+err_free_mdio:
+ mdiobus_free(bus);
+out_free_netdev:
+ free_netdev(ndev);
+
+ return ret;
+}
+
+static int hix5hd2_dev_remove(struct platform_device *pdev)
+{
+ struct net_device *ndev = platform_get_drvdata(pdev);
+ struct hix5hd2_priv *priv = netdev_priv(ndev);
+
+ netif_napi_del(&priv->napi);
+ unregister_netdev(ndev);
+ mdiobus_unregister(priv->bus);
+ mdiobus_free(priv->bus);
+
+ hix5hd2_destroy_hw_desc_queue(priv);
+ of_node_put(priv->phy_node);
+ cancel_work_sync(&priv->tx_timeout_task);
+ free_netdev(ndev);
+
+ return 0;
+}
+
+static const struct of_device_id hix5hd2_of_match[] = {
+ {.compatible = "hisilicon,hix5hd2-gmac",},
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, hix5hd2_of_match);
+
+static struct platform_driver hix5hd2_dev_driver = {
+ .driver = {
+ .name = "hix5hd2-gmac",
+ .of_match_table = hix5hd2_of_match,
+ },
+ .probe = hix5hd2_dev_probe,
+ .remove = hix5hd2_dev_remove,
+};
+
+module_platform_driver(hix5hd2_dev_driver);
+
+MODULE_DESCRIPTION("HISILICON HIX5HD2 Ethernet driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:hix5hd2-gmac");
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c b/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
index 95837b9..85a3866 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
@@ -63,8 +63,8 @@ static int ehea_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cmd->duplex = port->full_duplex == 1 ?
DUPLEX_FULL : DUPLEX_HALF;
} else {
- speed = ~0;
- cmd->duplex = -1;
+ speed = SPEED_UNKNOWN;
+ cmd->duplex = DUPLEX_UNKNOWN;
}
ethtool_cmd_speed_set(cmd, speed);
@@ -278,5 +278,5 @@ static const struct ethtool_ops ehea_ethtool_ops = {
void ehea_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &ehea_ethtool_ops);
+ netdev->ethtool_ops = &ehea_ethtool_ops;
}
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 538903b..a0b418e 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -28,6 +28,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/device.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/tcp.h>
@@ -3273,7 +3274,7 @@ static int ehea_probe_adapter(struct platform_device *dev)
return -EINVAL;
}
- adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
+ adapter = devm_kzalloc(&dev->dev, sizeof(*adapter), GFP_KERNEL);
if (!adapter) {
ret = -ENOMEM;
dev_err(&dev->dev, "no mem for ehea_adapter\n");
@@ -3359,7 +3360,6 @@ out_kill_eq:
out_free_ad:
list_del(&adapter->list);
- kfree(adapter);
out:
ehea_update_firmware_handles();
@@ -3386,7 +3386,6 @@ static int ehea_remove(struct platform_device *dev)
ehea_destroy_eq(adapter->neq);
ehea_remove_adapter_mr(adapter);
list_del(&adapter->list);
- kfree(adapter);
ehea_update_firmware_handles();
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
index 9b03033b..a0820f7 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_qmr.c
@@ -103,12 +103,14 @@ out_nomem:
static void hw_queue_dtor(struct hw_queue *queue)
{
- int pages_per_kpage = PAGE_SIZE / queue->pagesize;
+ int pages_per_kpage;
int i, nr_pages;
if (!queue || !queue->queue_pages)
return;
+ pages_per_kpage = PAGE_SIZE / queue->pagesize;
+
nr_pages = queue->queue_length / queue->pagesize;
for (i = 0; i < nr_pages; i += pages_per_kpage)
diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c
index ae342fd..87bd953 100644
--- a/drivers/net/ethernet/ibm/emac/core.c
+++ b/drivers/net/ethernet/ibm/emac/core.c
@@ -2879,7 +2879,7 @@ static int emac_probe(struct platform_device *ofdev)
dev->commac.ops = &emac_commac_sg_ops;
} else
ndev->netdev_ops = &emac_netdev_ops;
- SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
+ ndev->ethtool_ops = &emac_ethtool_ops;
netif_carrier_off(ndev);
diff --git a/drivers/net/ethernet/icplus/ipg.c b/drivers/net/ethernet/icplus/ipg.c
index 25045ae..5727779 100644
--- a/drivers/net/ethernet/icplus/ipg.c
+++ b/drivers/net/ethernet/icplus/ipg.c
@@ -2245,7 +2245,7 @@ static int ipg_probe(struct pci_dev *pdev, const struct pci_device_id *id)
*/
dev->netdev_ops = &ipg_netdev_ops;
SET_NETDEV_DEV(dev, &pdev->dev);
- SET_ETHTOOL_OPS(dev, &ipg_ethtool_ops);
+ dev->ethtool_ops = &ipg_ethtool_ops;
rc = pci_request_regions(pdev, DRV_NAME);
if (rc)
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index b56461c..9d979d7 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -2854,7 +2854,7 @@ static int e100_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
netdev->hw_features |= NETIF_F_RXALL;
netdev->netdev_ops = &e100_netdev_ops;
- SET_ETHTOOL_OPS(netdev, &e100_ethtool_ops);
+ netdev->ethtool_ops = &e100_ethtool_ops;
netdev->watchdog_timeo = E100_WATCHDOG_PERIOD;
strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 73a8aee..d50f78a 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -168,8 +168,8 @@ static int e1000_get_settings(struct net_device *netdev,
else
ecmd->duplex = DUPLEX_HALF;
} else {
- ethtool_cmd_speed_set(ecmd, -1);
- ecmd->duplex = -1;
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ ecmd->duplex = DUPLEX_UNKNOWN;
}
ecmd->autoneg = ((hw->media_type == e1000_media_type_fiber) ||
@@ -1460,7 +1460,8 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
* enough time to complete the receives, if it's
* exceeded, break and error off
*/
- } while (good_cnt < 64 && jiffies < (time + 20));
+ } while (good_cnt < 64 && time_after(time + 20, jiffies));
+
if (good_cnt != 64) {
ret_val = 13; /* ret_val is the same as mis-compare */
break;
@@ -1905,5 +1906,5 @@ static const struct ethtool_ops e1000_ethtool_ops = {
void e1000_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &e1000_ethtool_ops);
+ netdev->ethtool_ops = &e1000_ethtool_ops;
}
diff --git a/drivers/net/ethernet/intel/e1000/e1000_hw.c b/drivers/net/ethernet/intel/e1000/e1000_hw.c
index c1d3fdb..e9b07cc 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_hw.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_hw.c
@@ -4877,10 +4877,10 @@ void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats,
* since the test for a multicast frame will test positive on
* a broadcast frame.
*/
- if ((mac_addr[0] == (u8) 0xff) && (mac_addr[1] == (u8) 0xff))
+ if (is_broadcast_ether_addr(mac_addr))
/* Broadcast packet */
stats->bprc++;
- else if (*mac_addr & 0x01)
+ else if (is_multicast_ether_addr(mac_addr))
/* Multicast packet */
stats->mprc++;
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 27058df..660971f 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -3105,11 +3105,6 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
*/
tx_ring = adapter->tx_ring;
- if (unlikely(skb->len <= 0)) {
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
/* On PCI/PCI-X HW, if packet size is less than ETH_ZLEN,
* packets may get corrupted during padding by HW.
* To WA this issue, pad all small packets manually.
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index a5f6b11..08f22f3 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -1365,6 +1365,7 @@ static const struct e1000_mac_operations es2_mac_ops = {
.setup_led = e1000e_setup_led_generic,
.config_collision_dist = e1000e_config_collision_dist_generic,
.rar_set = e1000e_rar_set_generic,
+ .rar_get_count = e1000e_rar_get_count_generic,
};
static const struct e1000_phy_operations es2_phy_ops = {
diff --git a/drivers/net/ethernet/intel/e1000e/82571.c b/drivers/net/ethernet/intel/e1000e/82571.c
index e0aa7f1..218481e 100644
--- a/drivers/net/ethernet/intel/e1000e/82571.c
+++ b/drivers/net/ethernet/intel/e1000e/82571.c
@@ -1896,6 +1896,7 @@ static const struct e1000_mac_operations e82571_mac_ops = {
.config_collision_dist = e1000e_config_collision_dist_generic,
.read_mac_addr = e1000_read_mac_addr_82571,
.rar_set = e1000e_rar_set_generic,
+ .rar_get_count = e1000e_rar_get_count_generic,
};
static const struct e1000_phy_operations e82_phy_ops_igp = {
diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h
index 1471c54..7785240 100644
--- a/drivers/net/ethernet/intel/e1000e/e1000.h
+++ b/drivers/net/ethernet/intel/e1000e/e1000.h
@@ -265,10 +265,10 @@ struct e1000_adapter {
u32 tx_hwtstamp_timeouts;
/* Rx */
- bool (*clean_rx) (struct e1000_ring *ring, int *work_done,
- int work_to_do) ____cacheline_aligned_in_smp;
- void (*alloc_rx_buf) (struct e1000_ring *ring, int cleaned_count,
- gfp_t gfp);
+ bool (*clean_rx)(struct e1000_ring *ring, int *work_done,
+ int work_to_do) ____cacheline_aligned_in_smp;
+ void (*alloc_rx_buf)(struct e1000_ring *ring, int cleaned_count,
+ gfp_t gfp);
struct e1000_ring *rx_ring;
u32 rx_int_delay;
@@ -391,6 +391,8 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca);
* 25MHz 46-bit 2^46 / 10^9 / 3600 = 19.55 hours
*/
#define E1000_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 60 * 4)
+#define E1000_MAX_82574_SYSTIM_REREADS 50
+#define E1000_82574_SYSTIM_EPSILON (1ULL << 35ULL)
/* hardware capability, feature, and workaround flags */
#define FLAG_HAS_AMT (1 << 0)
@@ -573,35 +575,8 @@ static inline u32 __er32(struct e1000_hw *hw, unsigned long reg)
#define er32(reg) __er32(hw, E1000_##reg)
-/**
- * __ew32_prepare - prepare to write to MAC CSR register on certain parts
- * @hw: pointer to the HW structure
- *
- * When updating the MAC CSR registers, the Manageability Engine (ME) could
- * be accessing the registers at the same time. Normally, this is handled in
- * h/w by an arbiter but on some parts there is a bug that acknowledges Host
- * accesses later than it should which could result in the register to have
- * an incorrect value. Workaround this by checking the FWSM register which
- * has bit 24 set while ME is accessing MAC CSR registers, wait if it is set
- * and try again a number of times.
- **/
-static inline s32 __ew32_prepare(struct e1000_hw *hw)
-{
- s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT;
-
- while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i)
- udelay(50);
-
- return i;
-}
-
-static inline void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val)
-{
- if (hw->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
- __ew32_prepare(hw);
-
- writel(val, hw->hw_addr + reg);
-}
+s32 __ew32_prepare(struct e1000_hw *hw);
+void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val);
#define ew32(reg, val) __ew32(hw, E1000_##reg, (val))
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index cad250b..815e26c 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -159,8 +159,8 @@ static int e1000_get_settings(struct net_device *netdev,
ecmd->transceiver = XCVR_EXTERNAL;
}
- speed = -1;
- ecmd->duplex = -1;
+ speed = SPEED_UNKNOWN;
+ ecmd->duplex = DUPLEX_UNKNOWN;
if (netif_running(netdev)) {
if (netif_carrier_ok(netdev)) {
@@ -169,6 +169,7 @@ static int e1000_get_settings(struct net_device *netdev,
}
} else if (!pm_runtime_suspended(netdev->dev.parent)) {
u32 status = er32(STATUS);
+
if (status & E1000_STATUS_LU) {
if (status & E1000_STATUS_SPEED_1000)
speed = SPEED_1000;
@@ -783,25 +784,26 @@ static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data,
reg + (offset << 2), val,
(test[pat] & write & mask));
*data = reg;
- return 1;
+ return true;
}
}
- return 0;
+ return false;
}
static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data,
int reg, u32 mask, u32 write)
{
u32 val;
+
__ew32(&adapter->hw, reg, write & mask);
val = __er32(&adapter->hw, reg);
if ((write & mask) != (val & mask)) {
e_err("set/check test failed (reg 0x%05X): got 0x%08X expected 0x%08X\n",
reg, (val & mask), (write & mask));
*data = reg;
- return 1;
+ return true;
}
- return 0;
+ return false;
}
#define REG_PATTERN_TEST_ARRAY(reg, offset, mask, write) \
@@ -1717,6 +1719,7 @@ static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
*data = 0;
if (hw->phy.media_type == e1000_media_type_internal_serdes) {
int i = 0;
+
hw->mac.serdes_has_link = false;
/* On some blade server designs, link establishment
@@ -2315,5 +2318,5 @@ static const struct ethtool_ops e1000_ethtool_ops = {
void e1000e_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &e1000_ethtool_ops);
+ netdev->ethtool_ops = &e1000_ethtool_ops;
}
diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h
index 6b3de5f..72f5475 100644
--- a/drivers/net/ethernet/intel/e1000e/hw.h
+++ b/drivers/net/ethernet/intel/e1000e/hw.h
@@ -469,8 +469,9 @@ struct e1000_mac_operations {
s32 (*setup_led)(struct e1000_hw *);
void (*write_vfta)(struct e1000_hw *, u32, u32);
void (*config_collision_dist)(struct e1000_hw *);
- void (*rar_set)(struct e1000_hw *, u8 *, u32);
+ int (*rar_set)(struct e1000_hw *, u8 *, u32);
s32 (*read_mac_addr)(struct e1000_hw *);
+ u32 (*rar_get_count)(struct e1000_hw *);
};
/* When to use various PHY register access functions:
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index f0bbd42..8894ab8 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -139,8 +139,9 @@ static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
-static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
-static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
+static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index);
+static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
+static u32 e1000_rar_get_count_pch_lpt(struct e1000_hw *hw);
static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force);
@@ -704,6 +705,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
mac->ops.rar_set = e1000_rar_set_pch_lpt;
mac->ops.setup_physical_interface =
e1000_setup_copper_link_pch_lpt;
+ mac->ops.rar_get_count = e1000_rar_get_count_pch_lpt;
}
/* Enable PCS Lock-loss workaround for ICH8 */
@@ -1334,6 +1336,7 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
if (((hw->mac.type == e1000_pch2lan) ||
(hw->mac.type == e1000_pch_lpt)) && link) {
u32 reg;
+
reg = er32(STATUS);
if (!(reg & (E1000_STATUS_FD | E1000_STATUS_SPEED_MASK))) {
u16 emi_addr;
@@ -1634,9 +1637,9 @@ static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
u32 fwsm;
fwsm = er32(FWSM);
- return ((fwsm & E1000_ICH_FWSM_FW_VALID) &&
+ return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
((fwsm & E1000_FWSM_MODE_MASK) ==
- (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT)));
+ (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
}
/**
@@ -1667,7 +1670,7 @@ static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
* contain the MAC address but RAR[1-6] are reserved for manageability (ME).
* Use SHRA[0-3] in place of those reserved for ME.
**/
-static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
+static int e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
{
u32 rar_low, rar_high;
@@ -1689,7 +1692,7 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
e1e_flush();
ew32(RAH(index), rar_high);
e1e_flush();
- return;
+ return 0;
}
/* RAR[1-6] are owned by manageability. Skip those and program the
@@ -1712,7 +1715,7 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
/* verify the register updates */
if ((er32(SHRAL(index - 1)) == rar_low) &&
(er32(SHRAH(index - 1)) == rar_high))
- return;
+ return 0;
e_dbg("SHRA[%d] might be locked by ME - FWSM=0x%8.8x\n",
(index - 1), er32(FWSM));
@@ -1720,6 +1723,43 @@ static void e1000_rar_set_pch2lan(struct e1000_hw *hw, u8 *addr, u32 index)
out:
e_dbg("Failed to write receive address at index %d\n", index);
+ return -E1000_ERR_CONFIG;
+}
+
+/**
+ * e1000_rar_get_count_pch_lpt - Get the number of available SHRA
+ * @hw: pointer to the HW structure
+ *
+ * Get the number of available receive registers that the Host can
+ * program. SHRA[0-10] are the shared receive address registers
+ * that are shared between the Host and manageability engine (ME).
+ * ME can reserve any number of addresses and the host needs to be
+ * able to tell how many available registers it has access to.
+ **/
+static u32 e1000_rar_get_count_pch_lpt(struct e1000_hw *hw)
+{
+ u32 wlock_mac;
+ u32 num_entries;
+
+ wlock_mac = er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK;
+ wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
+
+ switch (wlock_mac) {
+ case 0:
+ /* All SHRA[0..10] and RAR[0] available */
+ num_entries = hw->mac.rar_entry_count;
+ break;
+ case 1:
+ /* Only RAR[0] available */
+ num_entries = 1;
+ break;
+ default:
+ /* SHRA[0..(wlock_mac - 1)] available + RAR[0] */
+ num_entries = wlock_mac + 1;
+ break;
+ }
+
+ return num_entries;
}
/**
@@ -1733,7 +1773,7 @@ out:
* contain the MAC address. SHRA[0-10] are the shared receive address
* registers that are shared between the Host and manageability engine (ME).
**/
-static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
+static int e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
{
u32 rar_low, rar_high;
u32 wlock_mac;
@@ -1755,7 +1795,7 @@ static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
e1e_flush();
ew32(RAH(index), rar_high);
e1e_flush();
- return;
+ return 0;
}
/* The manageability engine (ME) can lock certain SHRAR registers that
@@ -1787,12 +1827,13 @@ static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
/* verify the register updates */
if ((er32(SHRAL_PCH_LPT(index - 1)) == rar_low) &&
(er32(SHRAH_PCH_LPT(index - 1)) == rar_high))
- return;
+ return 0;
}
}
out:
e_dbg("Failed to write receive address at index %d\n", index);
+ return -E1000_ERR_CONFIG;
}
/**
@@ -4976,6 +5017,7 @@ static const struct e1000_mac_operations ich8_mac_ops = {
/* id_led_init dependent on mac type */
.config_collision_dist = e1000e_config_collision_dist_generic,
.rar_set = e1000e_rar_set_generic,
+ .rar_get_count = e1000e_rar_get_count_generic,
};
static const struct e1000_phy_operations ich8_phy_ops = {
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index baa0a46..8c386f3a 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -211,6 +211,11 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
return 0;
}
+u32 e1000e_rar_get_count_generic(struct e1000_hw *hw)
+{
+ return hw->mac.rar_entry_count;
+}
+
/**
* e1000e_rar_set_generic - Set receive address register
* @hw: pointer to the HW structure
@@ -220,7 +225,7 @@ s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
* Sets the receive address array register at index to the address passed
* in by addr.
**/
-void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
+int e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
{
u32 rar_low, rar_high;
@@ -244,6 +249,8 @@ void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
e1e_flush();
ew32(RAH(index), rar_high);
e1e_flush();
+
+ return 0;
}
/**
diff --git a/drivers/net/ethernet/intel/e1000e/mac.h b/drivers/net/ethernet/intel/e1000e/mac.h
index 4e81c28..0513d90 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.h
+++ b/drivers/net/ethernet/intel/e1000e/mac.h
@@ -61,7 +61,8 @@ void e1000e_update_adaptive(struct e1000_hw *hw);
void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value);
void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw);
-void e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index);
+u32 e1000e_rar_get_count_generic(struct e1000_hw *hw);
+int e1000e_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index);
void e1000e_config_collision_dist_generic(struct e1000_hw *hw);
#endif
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 3e69386..201cc93 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -124,6 +124,36 @@ static const struct e1000_reg_info e1000_reg_info_tbl[] = {
};
/**
+ * __ew32_prepare - prepare to write to MAC CSR register on certain parts
+ * @hw: pointer to the HW structure
+ *
+ * When updating the MAC CSR registers, the Manageability Engine (ME) could
+ * be accessing the registers at the same time. Normally, this is handled in
+ * h/w by an arbiter but on some parts there is a bug that acknowledges Host
+ * accesses later than it should which could result in the register to have
+ * an incorrect value. Workaround this by checking the FWSM register which
+ * has bit 24 set while ME is accessing MAC CSR registers, wait if it is set
+ * and try again a number of times.
+ **/
+s32 __ew32_prepare(struct e1000_hw *hw)
+{
+ s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT;
+
+ while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i)
+ udelay(50);
+
+ return i;
+}
+
+void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val)
+{
+ if (hw->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
+ __ew32_prepare(hw);
+
+ writel(val, hw->hw_addr + reg);
+}
+
+/**
* e1000_regdump - register printout routine
* @hw: pointer to the HW structure
* @reginfo: pointer to the register info table
@@ -599,6 +629,7 @@ static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i)
if (unlikely(!ret_val && (i != readl(rx_ring->tail)))) {
u32 rctl = er32(RCTL);
+
ew32(RCTL, rctl & ~E1000_RCTL_EN);
e_err("ME firmware caused invalid RDT - resetting\n");
schedule_work(&adapter->reset_task);
@@ -615,6 +646,7 @@ static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i)
if (unlikely(!ret_val && (i != readl(tx_ring->tail)))) {
u32 tctl = er32(TCTL);
+
ew32(TCTL, tctl & ~E1000_TCTL_EN);
e_err("ME firmware caused invalid TDT - resetting\n");
schedule_work(&adapter->reset_task);
@@ -1198,6 +1230,7 @@ static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring)
while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
(count < tx_ring->count)) {
bool cleaned = false;
+
rmb(); /* read buffer_info after eop_desc */
for (; !cleaned; count++) {
tx_desc = E1000_TX_DESC(*tx_ring, i);
@@ -1753,6 +1786,7 @@ static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data)
adapter->flags & FLAG_RX_NEEDS_RESTART) {
/* disable receives */
u32 rctl = er32(RCTL);
+
ew32(RCTL, rctl & ~E1000_RCTL_EN);
adapter->flags |= FLAG_RESTART_NOW;
}
@@ -1960,6 +1994,7 @@ static void e1000_configure_msix(struct e1000_adapter *adapter)
/* Workaround issue with spurious interrupts on 82574 in MSI-X mode */
if (hw->mac.type == e1000_82574) {
u32 rfctl = er32(RFCTL);
+
rfctl |= E1000_RFCTL_ACK_DIS;
ew32(RFCTL, rfctl);
}
@@ -2204,6 +2239,7 @@ static void e1000_irq_disable(struct e1000_adapter *adapter)
if (adapter->msix_entries) {
int i;
+
for (i = 0; i < adapter->num_vectors; i++)
synchronize_irq(adapter->msix_entries[i].vector);
} else {
@@ -2921,6 +2957,7 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
if (adapter->flags2 & FLAG2_DMA_BURST) {
u32 txdctl = er32(TXDCTL(0));
+
txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH |
E1000_TXDCTL_WTHRESH);
/* set up some performance related parameters to encourage the
@@ -3239,6 +3276,7 @@ static void e1000_configure_rx(struct e1000_adapter *adapter)
if (adapter->flags & FLAG_IS_ICH) {
u32 rxdctl = er32(RXDCTL(0));
+
ew32(RXDCTL(0), rxdctl | 0x3);
}
@@ -3303,9 +3341,11 @@ static int e1000e_write_uc_addr_list(struct net_device *netdev)
{
struct e1000_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- unsigned int rar_entries = hw->mac.rar_entry_count;
+ unsigned int rar_entries;
int count = 0;
+ rar_entries = hw->mac.ops.rar_get_count(hw);
+
/* save a rar entry for our hardware address */
rar_entries--;
@@ -3324,9 +3364,13 @@ static int e1000e_write_uc_addr_list(struct net_device *netdev)
* combining
*/
netdev_for_each_uc_addr(ha, netdev) {
+ int rval;
+
if (!rar_entries)
break;
- hw->mac.ops.rar_set(hw, ha->addr, rar_entries--);
+ rval = hw->mac.ops.rar_set(hw, ha->addr, rar_entries--);
+ if (rval < 0)
+ return -ENOMEM;
count++;
}
}
@@ -4085,12 +4129,37 @@ static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc)
struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter,
cc);
struct e1000_hw *hw = &adapter->hw;
- cycle_t systim;
+ cycle_t systim, systim_next;
/* latch SYSTIMH on read of SYSTIML */
systim = (cycle_t)er32(SYSTIML);
systim |= (cycle_t)er32(SYSTIMH) << 32;
+ if ((hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82583)) {
+ u64 incvalue, time_delta, rem, temp;
+ int i;
+
+ /* errata for 82574/82583 possible bad bits read from SYSTIMH/L
+ * check to see that the time is incrementing at a reasonable
+ * rate and is a multiple of incvalue
+ */
+ incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK;
+ for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) {
+ /* latch SYSTIMH on read of SYSTIML */
+ systim_next = (cycle_t)er32(SYSTIML);
+ systim_next |= (cycle_t)er32(SYSTIMH) << 32;
+
+ time_delta = systim_next - systim;
+ temp = time_delta;
+ rem = do_div(temp, incvalue);
+
+ systim = systim_next;
+
+ if ((time_delta < E1000_82574_SYSTIM_EPSILON) &&
+ (rem == 0))
+ break;
+ }
+ }
return systim;
}
@@ -4491,7 +4560,7 @@ static void e1000e_update_phy_task(struct work_struct *work)
e1000_get_phy_info(hw);
/* Enable EEE on 82579 after link up */
- if (hw->phy.type == e1000_phy_82579)
+ if (hw->phy.type >= e1000_phy_82579)
e1000_set_eee_pchlan(hw);
}
@@ -4695,6 +4764,7 @@ static void e1000e_update_stats(struct e1000_adapter *adapter)
/* Correctable ECC Errors */
if (hw->mac.type == e1000_pch_lpt) {
u32 pbeccsts = er32(PBECCSTS);
+
adapter->corr_errors +=
pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
adapter->uncorr_errors +=
@@ -4808,6 +4878,7 @@ static void e1000e_enable_receives(struct e1000_adapter *adapter)
(adapter->flags & FLAG_RESTART_NOW)) {
struct e1000_hw *hw = &adapter->hw;
u32 rctl = er32(RCTL);
+
ew32(RCTL, rctl | E1000_RCTL_EN);
adapter->flags &= ~FLAG_RESTART_NOW;
}
@@ -4930,6 +5001,7 @@ static void e1000_watchdog_task(struct work_struct *work)
if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) &&
!txb2b) {
u32 tarc0;
+
tarc0 = er32(TARC(0));
tarc0 &= ~SPEED_MODE_BIT;
ew32(TARC(0), tarc0);
@@ -5170,7 +5242,7 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
__be16 protocol;
if (skb->ip_summed != CHECKSUM_PARTIAL)
- return 0;
+ return false;
if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
@@ -5215,7 +5287,7 @@ static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb)
i = 0;
tx_ring->next_to_use = i;
- return 1;
+ return true;
}
static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
@@ -6209,6 +6281,7 @@ static int __e1000_resume(struct pci_dev *pdev)
e1e_wphy(&adapter->hw, BM_WUS, ~0);
} else {
u32 wus = er32(WUS);
+
if (wus) {
e_info("MAC Wakeup cause - %s\n",
wus & E1000_WUS_EX ? "Unicast Packet" :
@@ -7027,7 +7100,7 @@ static const struct pci_error_handlers e1000_err_handler = {
.resume = e1000_io_resume,
};
-static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
+static const struct pci_device_id e1000_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
@@ -7144,6 +7217,7 @@ static struct pci_driver e1000_driver = {
static int __init e1000_init_module(void)
{
int ret;
+
pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
e1000e_driver_version);
pr_info("Copyright(c) 1999 - 2014 Intel Corporation.\n");
diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c
index a9a976f..b1f212b 100644
--- a/drivers/net/ethernet/intel/e1000e/nvm.c
+++ b/drivers/net/ethernet/intel/e1000e/nvm.c
@@ -398,6 +398,7 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
/* Loop to allow for up to whole page write of eeprom */
while (widx < words) {
u16 word_out = data[widx];
+
word_out = (word_out >> 8) | (word_out << 8);
e1000_shift_out_eec_bits(hw, word_out, 16);
widx++;
diff --git a/drivers/net/ethernet/intel/e1000e/param.c b/drivers/net/ethernet/intel/e1000e/param.c
index d0ac0f3..aa1923f 100644
--- a/drivers/net/ethernet/intel/e1000e/param.c
+++ b/drivers/net/ethernet/intel/e1000e/param.c
@@ -436,6 +436,7 @@ void e1000e_check_options(struct e1000_adapter *adapter)
if (num_IntMode > bd) {
unsigned int int_mode = IntMode[bd];
+
e1000_validate_option(&int_mode, &opt, adapter);
adapter->int_mode = int_mode;
} else {
@@ -457,6 +458,7 @@ void e1000e_check_options(struct e1000_adapter *adapter)
if (num_SmartPowerDownEnable > bd) {
unsigned int spd = SmartPowerDownEnable[bd];
+
e1000_validate_option(&spd, &opt, adapter);
if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && spd)
adapter->flags |= FLAG_SMART_POWER_DOWN;
@@ -473,6 +475,7 @@ void e1000e_check_options(struct e1000_adapter *adapter)
if (num_CrcStripping > bd) {
unsigned int crc_stripping = CrcStripping[bd];
+
e1000_validate_option(&crc_stripping, &opt, adapter);
if (crc_stripping == OPTION_ENABLED) {
adapter->flags2 |= FLAG2_CRC_STRIPPING;
@@ -495,6 +498,7 @@ void e1000e_check_options(struct e1000_adapter *adapter)
if (num_KumeranLockLoss > bd) {
unsigned int kmrn_lock_loss = KumeranLockLoss[bd];
+
e1000_validate_option(&kmrn_lock_loss, &opt, adapter);
enabled = kmrn_lock_loss;
}
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index 00b3fc9..b2005e1 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -2896,6 +2896,7 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
(hw->phy.addr == 2) &&
!(MAX_PHY_REG_ADDRESS & reg) && (data & (1 << 11))) {
u16 data2 = 0x7EFF;
+
ret_val = e1000_access_phy_debug_regs_hv(hw,
(1 << 6) | 0x3,
&data2, false);
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index beb7b43..6598584 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -72,6 +72,7 @@
#define I40E_MIN_NUM_DESCRIPTORS 64
#define I40E_MIN_MSIX 2
#define I40E_DEFAULT_NUM_VMDQ_VSI 8 /* max 256 VSIs */
+#define I40E_MIN_VSI_ALLOC 51 /* LAN, ATR, FCOE, 32 VF, 16 VMDQ */
#define I40E_DEFAULT_QUEUES_PER_VMDQ 2 /* max 16 qps */
#define I40E_DEFAULT_QUEUES_PER_VF 4
#define I40E_DEFAULT_QUEUES_PER_TC 1 /* should be a power of 2 */
@@ -97,10 +98,6 @@
#define STRINGIFY(foo) #foo
#define XSTRINGIFY(bar) STRINGIFY(bar)
-#ifndef ARCH_HAS_PREFETCH
-#define prefetch(X)
-#endif
-
#define I40E_RX_DESC(R, i) \
((ring_is_16byte_desc_enabled(R)) \
? (union i40e_32byte_rx_desc *) \
@@ -157,11 +154,23 @@ struct i40e_lump_tracking {
#define I40E_FDIR_BUFFER_FULL_MARGIN 10
#define I40E_FDIR_BUFFER_HEAD_ROOM 200
+enum i40e_fd_stat_idx {
+ I40E_FD_STAT_ATR,
+ I40E_FD_STAT_SB,
+ I40E_FD_STAT_PF_COUNT
+};
+#define I40E_FD_STAT_PF_IDX(pf_id) ((pf_id) * I40E_FD_STAT_PF_COUNT)
+#define I40E_FD_ATR_STAT_IDX(pf_id) \
+ (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_ATR)
+#define I40E_FD_SB_STAT_IDX(pf_id) \
+ (I40E_FD_STAT_PF_IDX(pf_id) + I40E_FD_STAT_SB)
+
struct i40e_fdir_filter {
struct hlist_node fdir_node;
/* filter ipnut set */
u8 flow_type;
u8 ip4_proto;
+ /* TX packet view of src and dst */
__be32 dst_ip[4];
__be32 src_ip[4];
__be16 src_port;
@@ -205,7 +214,6 @@ struct i40e_pf {
unsigned long state;
unsigned long link_check_timeout;
struct msix_entry *msix_entries;
- u16 num_msix_entries;
bool fc_autoneg_status;
u16 eeprom_version;
@@ -220,11 +228,14 @@ struct i40e_pf {
u16 rss_size; /* num queues in the RSS array */
u16 rss_size_max; /* HW defined max RSS queues */
u16 fdir_pf_filter_count; /* num of guaranteed filters for this PF */
+ u16 num_alloc_vsi; /* num VSIs this driver supports */
u8 atr_sample_rate;
bool wol_en;
struct hlist_head fdir_filter_list;
u16 fdir_pf_active_filters;
+ u16 fd_sb_cnt_idx;
+ u16 fd_atr_cnt_idx;
#ifdef CONFIG_I40E_VXLAN
__be16 vxlan_ports[I40E_MAX_PF_UDP_OFFLOAD_PORTS];
@@ -266,6 +277,7 @@ struct i40e_pf {
#ifdef CONFIG_I40E_VXLAN
#define I40E_FLAG_VXLAN_FILTER_SYNC (u64)(1 << 27)
#endif
+#define I40E_FLAG_DCB_CAPABLE (u64)(1 << 29)
/* tracks features that get auto disabled by errors */
u64 auto_disable_flags;
@@ -300,7 +312,6 @@ struct i40e_pf {
u16 pf_seid;
u16 main_vsi_seid;
u16 mac_seid;
- struct i40e_aqc_get_switch_config_data *sw_config;
struct kobject *switch_kobj;
#ifdef CONFIG_DEBUG_FS
struct dentry *i40e_dbg_pf;
@@ -329,9 +340,7 @@ struct i40e_pf {
struct ptp_clock *ptp_clock;
struct ptp_clock_info ptp_caps;
struct sk_buff *ptp_tx_skb;
- struct work_struct ptp_tx_work;
struct hwtstamp_config tstamp_config;
- unsigned long ptp_tx_start;
unsigned long last_rx_ptp_check;
spinlock_t tmreg_lock; /* Used to protect the device time registers. */
u64 ptp_base_adj;
@@ -420,6 +429,7 @@ struct i40e_vsi {
struct i40e_q_vector **q_vectors;
int num_q_vectors;
int base_vector;
+ bool irqs_ready;
u16 seid; /* HW index of this VSI (absolute index) */
u16 id; /* VSI number */
@@ -540,6 +550,15 @@ static inline bool i40e_rx_is_programming_status(u64 qw)
(qw >> I40E_RX_PROG_STATUS_DESC_LENGTH_SHIFT);
}
+/**
+ * i40e_get_fd_cnt_all - get the total FD filter space available
+ * @pf: pointer to the pf struct
+ **/
+static inline int i40e_get_fd_cnt_all(struct i40e_pf *pf)
+{
+ return pf->hw.fdir_shared_filter_count + pf->fdir_pf_filter_count;
+}
+
/* needed by i40e_ethtool.c */
int i40e_up(struct i40e_vsi *vsi);
void i40e_down(struct i40e_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index ed3902b..7a02749 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -33,6 +33,16 @@
static void i40e_resume_aq(struct i40e_hw *hw);
/**
+ * i40e_is_nvm_update_op - return true if this is an NVM update operation
+ * @desc: API request descriptor
+ **/
+static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
+{
+ return (desc->opcode == i40e_aqc_opc_nvm_erase) ||
+ (desc->opcode == i40e_aqc_opc_nvm_update);
+}
+
+/**
* i40e_adminq_init_regs - Initialize AdminQ registers
* @hw: pointer to the hardware structure
*
@@ -281,8 +291,11 @@ static void i40e_free_asq_bufs(struct i40e_hw *hw)
*
* Configure base address and length registers for the transmit queue
**/
-static void i40e_config_asq_regs(struct i40e_hw *hw)
+static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)
{
+ i40e_status ret_code = 0;
+ u32 reg = 0;
+
if (hw->mac.type == I40E_MAC_VF) {
/* configure the transmit queue */
wr32(hw, I40E_VF_ATQBAH1,
@@ -291,6 +304,7 @@ static void i40e_config_asq_regs(struct i40e_hw *hw)
lower_32_bits(hw->aq.asq.desc_buf.pa));
wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries |
I40E_VF_ATQLEN1_ATQENABLE_MASK));
+ reg = rd32(hw, I40E_VF_ATQBAL1);
} else {
/* configure the transmit queue */
wr32(hw, I40E_PF_ATQBAH,
@@ -299,7 +313,14 @@ static void i40e_config_asq_regs(struct i40e_hw *hw)
lower_32_bits(hw->aq.asq.desc_buf.pa));
wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries |
I40E_PF_ATQLEN_ATQENABLE_MASK));
+ reg = rd32(hw, I40E_PF_ATQBAL);
}
+
+ /* Check one register to verify that config was applied */
+ if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa))
+ ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+
+ return ret_code;
}
/**
@@ -308,8 +329,11 @@ static void i40e_config_asq_regs(struct i40e_hw *hw)
*
* Configure base address and length registers for the receive (event queue)
**/
-static void i40e_config_arq_regs(struct i40e_hw *hw)
+static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)
{
+ i40e_status ret_code = 0;
+ u32 reg = 0;
+
if (hw->mac.type == I40E_MAC_VF) {
/* configure the receive queue */
wr32(hw, I40E_VF_ARQBAH1,
@@ -318,6 +342,7 @@ static void i40e_config_arq_regs(struct i40e_hw *hw)
lower_32_bits(hw->aq.arq.desc_buf.pa));
wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries |
I40E_VF_ARQLEN1_ARQENABLE_MASK));
+ reg = rd32(hw, I40E_VF_ARQBAL1);
} else {
/* configure the receive queue */
wr32(hw, I40E_PF_ARQBAH,
@@ -326,10 +351,17 @@ static void i40e_config_arq_regs(struct i40e_hw *hw)
lower_32_bits(hw->aq.arq.desc_buf.pa));
wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries |
I40E_PF_ARQLEN_ARQENABLE_MASK));
+ reg = rd32(hw, I40E_PF_ARQBAL);
}
/* Update tail in the HW to post pre-allocated buffers */
wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
+
+ /* Check one register to verify that config was applied */
+ if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa))
+ ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+
+ return ret_code;
}
/**
@@ -377,7 +409,9 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw)
goto init_adminq_free_rings;
/* initialize base registers */
- i40e_config_asq_regs(hw);
+ ret_code = i40e_config_asq_regs(hw);
+ if (ret_code)
+ goto init_adminq_free_rings;
/* success! */
goto init_adminq_exit;
@@ -434,7 +468,9 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw)
goto init_adminq_free_rings;
/* initialize base registers */
- i40e_config_arq_regs(hw);
+ ret_code = i40e_config_arq_regs(hw);
+ if (ret_code)
+ goto init_adminq_free_rings;
/* success! */
goto init_adminq_exit;
@@ -577,14 +613,14 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
- if (hw->aq.api_maj_ver != I40E_FW_API_VERSION_MAJOR ||
- hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR) {
+ if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
ret_code = I40E_ERR_FIRMWARE_API_VERSION;
goto init_adminq_free_arq;
}
/* pre-emptive resource lock release */
i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
+ hw->aq.nvm_busy = false;
ret_code = i40e_aq_set_hmc_resource_profile(hw,
I40E_HMC_PROFILE_DEFAULT,
@@ -708,6 +744,12 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
goto asq_send_command_exit;
}
+ if (i40e_is_nvm_update_op(desc) && hw->aq.nvm_busy) {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: NVM busy.\n");
+ status = I40E_ERR_NVM;
+ goto asq_send_command_exit;
+ }
+
details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
if (cmd_details) {
*details = *cmd_details;
@@ -835,6 +877,9 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
}
+ if (i40e_is_nvm_update_op(desc))
+ hw->aq.nvm_busy = true;
+
/* update the error if time out occurred */
if ((!cmd_completed) &&
(!details->async && !details->postpone)) {
@@ -929,6 +974,9 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
e->msg_size);
}
+ if (i40e_is_nvm_update_op(&e->desc))
+ hw->aq.nvm_busy = false;
+
/* Restore the original datalen and buffer address in the desc,
* FW updates datalen to indicate the event message
* size
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
index 993f768..b1552fb 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h
@@ -90,6 +90,7 @@ struct i40e_adminq_info {
u16 fw_min_ver; /* firmware minor version */
u16 api_maj_ver; /* api major version */
u16 api_min_ver; /* api minor version */
+ bool nvm_busy;
struct mutex asq_mutex; /* Send queue lock */
struct mutex arq_mutex; /* Receive queue lock */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 7b6374a..15f289f 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -34,7 +34,7 @@
*/
#define I40E_FW_API_VERSION_MAJOR 0x0001
-#define I40E_FW_API_VERSION_MINOR 0x0001
+#define I40E_FW_API_VERSION_MINOR 0x0002
struct i40e_aq_desc {
__le16 flags;
@@ -123,6 +123,7 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_get_version = 0x0001,
i40e_aqc_opc_driver_version = 0x0002,
i40e_aqc_opc_queue_shutdown = 0x0003,
+ i40e_aqc_opc_set_pf_context = 0x0004,
/* resource ownership */
i40e_aqc_opc_request_resource = 0x0008,
@@ -182,9 +183,6 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_add_mirror_rule = 0x0260,
i40e_aqc_opc_delete_mirror_rule = 0x0261,
- i40e_aqc_opc_set_storm_control_config = 0x0280,
- i40e_aqc_opc_get_storm_control_config = 0x0281,
-
/* DCB commands */
i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
i40e_aqc_opc_dcb_updated = 0x0302,
@@ -207,6 +205,7 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_query_switching_comp_bw_config = 0x041A,
i40e_aqc_opc_suspend_port_tx = 0x041B,
i40e_aqc_opc_resume_port_tx = 0x041C,
+ i40e_aqc_opc_configure_partition_bw = 0x041D,
/* hmc */
i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
@@ -224,13 +223,15 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_get_partner_advt = 0x0616,
i40e_aqc_opc_set_lb_modes = 0x0618,
i40e_aqc_opc_get_phy_wol_caps = 0x0621,
- i40e_aqc_opc_set_phy_reset = 0x0622,
+ i40e_aqc_opc_set_phy_debug = 0x0622,
i40e_aqc_opc_upload_ext_phy_fm = 0x0625,
/* NVM commands */
- i40e_aqc_opc_nvm_read = 0x0701,
- i40e_aqc_opc_nvm_erase = 0x0702,
- i40e_aqc_opc_nvm_update = 0x0703,
+ i40e_aqc_opc_nvm_read = 0x0701,
+ i40e_aqc_opc_nvm_erase = 0x0702,
+ i40e_aqc_opc_nvm_update = 0x0703,
+ i40e_aqc_opc_nvm_config_read = 0x0704,
+ i40e_aqc_opc_nvm_config_write = 0x0705,
/* virtualization commands */
i40e_aqc_opc_send_msg_to_pf = 0x0801,
@@ -272,8 +273,6 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_debug_set_mode = 0xFF01,
i40e_aqc_opc_debug_read_reg = 0xFF03,
i40e_aqc_opc_debug_write_reg = 0xFF04,
- i40e_aqc_opc_debug_read_reg_sg = 0xFF05,
- i40e_aqc_opc_debug_write_reg_sg = 0xFF06,
i40e_aqc_opc_debug_modify_reg = 0xFF07,
i40e_aqc_opc_debug_dump_internals = 0xFF08,
i40e_aqc_opc_debug_modify_internals = 0xFF09,
@@ -341,6 +340,14 @@ struct i40e_aqc_queue_shutdown {
I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown);
+/* Set PF context (0x0004, direct) */
+struct i40e_aqc_set_pf_context {
+ u8 pf_id;
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context);
+
/* Request resource ownership (direct 0x0008)
* Release resource ownership (direct 0x0009)
*/
@@ -1289,27 +1296,6 @@ struct i40e_aqc_add_delete_mirror_rule_completion {
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
-/* Set Storm Control Configuration (direct 0x0280)
- * Get Storm Control Configuration (direct 0x0281)
- * the command and response use the same descriptor structure
- */
-struct i40e_aqc_set_get_storm_control_config {
- __le32 broadcast_threshold;
- __le32 multicast_threshold;
- __le32 control_flags;
-#define I40E_AQC_STORM_CONTROL_MDIPW 0x01
-#define I40E_AQC_STORM_CONTROL_MDICW 0x02
-#define I40E_AQC_STORM_CONTROL_BDIPW 0x04
-#define I40E_AQC_STORM_CONTROL_BDICW 0x08
-#define I40E_AQC_STORM_CONTROL_BIDU 0x10
-#define I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT 8
-#define I40E_AQC_STORM_CONTROL_INTERVAL_MASK (0x3FF << \
- I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT)
- u8 reserved[4];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_set_get_storm_control_config);
-
/* DCB 0x03xx*/
/* PFC Ignore (direct 0x0301)
@@ -1427,11 +1413,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit);
struct i40e_aqc_configure_switching_comp_ets_data {
u8 reserved[4];
u8 tc_valid_bits;
- u8 reserved1;
+ u8 seepage;
+#define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1
u8 tc_strict_priority_flags;
- u8 reserved2[17];
+ u8 reserved1[17];
u8 tc_bw_share_credits[8];
- u8 reserved3[96];
+ u8 reserved2[96];
};
/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */
@@ -1499,6 +1486,15 @@ struct i40e_aqc_query_switching_comp_bw_config_resp {
* (direct 0x041B and 0x041C) uses the generic SEID struct
*/
+/* Configure partition BW
+ * (indirect 0x041D)
+ */
+struct i40e_aqc_configure_partition_bw_data {
+ __le16 pf_valid_bits;
+ u8 min_bw[16]; /* guaranteed bandwidth */
+ u8 max_bw[16]; /* bandwidth limit */
+};
+
/* Get and set the active HMC resource profile and status.
* (direct 0x0500) and (direct 0x0501)
*/
@@ -1539,6 +1535,8 @@ enum i40e_aq_phy_type {
I40E_PHY_TYPE_XLPPI = 0x9,
I40E_PHY_TYPE_40GBASE_CR4_CU = 0xA,
I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB,
+ I40E_PHY_TYPE_10GBASE_AOC = 0xC,
+ I40E_PHY_TYPE_40GBASE_AOC = 0xD,
I40E_PHY_TYPE_100BASE_TX = 0x11,
I40E_PHY_TYPE_1000BASE_T = 0x12,
I40E_PHY_TYPE_10GBASE_T = 0x13,
@@ -1549,7 +1547,10 @@ enum i40e_aq_phy_type {
I40E_PHY_TYPE_40GBASE_CR4 = 0x18,
I40E_PHY_TYPE_40GBASE_SR4 = 0x19,
I40E_PHY_TYPE_40GBASE_LR4 = 0x1A,
- I40E_PHY_TYPE_20GBASE_KR2 = 0x1B,
+ I40E_PHY_TYPE_1000BASE_SX = 0x1B,
+ I40E_PHY_TYPE_1000BASE_LX = 0x1C,
+ I40E_PHY_TYPE_1000BASE_T_OPTICAL = 0x1D,
+ I40E_PHY_TYPE_20GBASE_KR2 = 0x1E,
I40E_PHY_TYPE_MAX
};
@@ -1583,11 +1584,8 @@ struct i40e_aq_get_phy_abilities_resp {
#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01
#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02
#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04
-#define I40E_AQ_PHY_FLAG_AN_SHIFT 3
-#define I40E_AQ_PHY_FLAG_AN_MASK (0x3 << I40E_AQ_PHY_FLAG_AN_SHIFT)
-#define I40E_AQ_PHY_FLAG_AN_OFF 0x00 /* link forced on */
-#define I40E_AQ_PHY_FLAG_AN_OFF_LINK_DOWN 0x01
-#define I40E_AQ_PHY_FLAG_AN_ON 0x02
+#define I40E_AQ_PHY_LINK_ENABLED 0x08
+#define I40E_AQ_PHY_AN_ENABLED 0x10
#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20
__le16 eee_capability;
#define I40E_AQ_EEE_100BASE_TX 0x0002
@@ -1696,6 +1694,7 @@ struct i40e_aqc_get_link_status {
#define I40E_AQ_LINK_TX_ACTIVE 0x00
#define I40E_AQ_LINK_TX_DRAINED 0x01
#define I40E_AQ_LINK_TX_FLUSHED 0x03
+#define I40E_AQ_LINK_FORCED_40G 0x10
u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
__le16 max_frame_size;
u8 config;
@@ -1747,14 +1746,21 @@ struct i40e_aqc_set_lb_mode {
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode);
-/* Set PHY Reset command (0x0622) */
-struct i40e_aqc_set_phy_reset {
- u8 reset_flags;
-#define I40E_AQ_PHY_RESET_REQUEST 0x02
+/* Set PHY Debug command (0x0622) */
+struct i40e_aqc_set_phy_debug {
+ u8 command_flags;
+#define I40E_AQ_PHY_DEBUG_RESET_INTERNAL 0x02
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT 2
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK (0x03 << \
+ I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT)
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE 0x00
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02
+#define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10
u8 reserved[15];
};
-I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_reset);
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_debug);
enum i40e_aq_phy_reg_type {
I40E_AQC_PHY_REG_INTERNAL = 0x1,
@@ -1779,6 +1785,47 @@ struct i40e_aqc_nvm_update {
I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update);
+/* NVM Config Read (indirect 0x0704) */
+struct i40e_aqc_nvm_config_read {
+ __le16 cmd_flags;
+#define ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1
+#define ANVM_READ_SINGLE_FEATURE 0
+#define ANVM_READ_MULTIPLE_FEATURES 1
+ __le16 element_count;
+ __le16 element_id; /* Feature/field ID */
+ u8 reserved[2];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_read);
+
+/* NVM Config Write (indirect 0x0705) */
+struct i40e_aqc_nvm_config_write {
+ __le16 cmd_flags;
+ __le16 element_count;
+ u8 reserved[4];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
+
+struct i40e_aqc_nvm_config_data_feature {
+ __le16 feature_id;
+ __le16 instance_id;
+ __le16 feature_options;
+ __le16 feature_selection;
+};
+
+struct i40e_aqc_nvm_config_data_immediate_field {
+#define ANVM_FEATURE_OR_IMMEDIATE_MASK 0x2
+ __le16 field_id;
+ __le16 instance_id;
+ __le16 field_options;
+ __le16 field_value;
+};
+
/* Send to PF command (indirect 0x0801) id is only used by PF
* Send to VF command (indirect 0x0802) id is only used by PF
* Send to Peer PF command (indirect 0x0803)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c
index 922cdcc..6e65f19 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_common.c
@@ -43,12 +43,10 @@ static i40e_status i40e_set_mac_type(struct i40e_hw *hw)
if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
switch (hw->device_id) {
case I40E_DEV_ID_SFP_XL710:
- case I40E_DEV_ID_SFP_X710:
case I40E_DEV_ID_QEMU:
case I40E_DEV_ID_KX_A:
case I40E_DEV_ID_KX_B:
case I40E_DEV_ID_KX_C:
- case I40E_DEV_ID_KX_D:
case I40E_DEV_ID_QSFP_A:
case I40E_DEV_ID_QSFP_B:
case I40E_DEV_ID_QSFP_C:
@@ -133,7 +131,11 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
**/
bool i40e_check_asq_alive(struct i40e_hw *hw)
{
- return !!(rd32(hw, hw->aq.asq.len) & I40E_PF_ATQLEN_ATQENABLE_MASK);
+ if (hw->aq.asq.len)
+ return !!(rd32(hw, hw->aq.asq.len) &
+ I40E_PF_ATQLEN_ATQENABLE_MASK);
+ else
+ return false;
}
/**
@@ -653,6 +655,36 @@ i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
}
/**
+ * i40e_pre_tx_queue_cfg - pre tx queue configure
+ * @hw: pointer to the HW structure
+ * @queue: target pf queue index
+ * @enable: state change request
+ *
+ * Handles hw requirement to indicate intention to enable
+ * or disable target queue.
+ **/
+void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)
+{
+ u32 abs_queue_idx = hw->func_caps.base_queue + queue;
+ u32 reg_block = 0;
+ u32 reg_val;
+
+ if (abs_queue_idx >= 128)
+ reg_block = abs_queue_idx / 128;
+
+ reg_val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
+ reg_val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
+ reg_val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
+
+ if (enable)
+ reg_val |= I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK;
+ else
+ reg_val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
+
+ wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val);
+}
+
+/**
* i40e_get_media_type - Gets media type
* @hw: pointer to the hardware structure
**/
@@ -699,7 +731,7 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
}
#define I40E_PF_RESET_WAIT_COUNT_A0 200
-#define I40E_PF_RESET_WAIT_COUNT 10
+#define I40E_PF_RESET_WAIT_COUNT 100
/**
* i40e_pf_reset - Reset the PF
* @hw: pointer to the hardware structure
@@ -789,6 +821,9 @@ void i40e_clear_pxe_mode(struct i40e_hw *hw)
{
u32 reg;
+ if (i40e_check_asq_alive(hw))
+ i40e_aq_clear_pxe_mode(hw, NULL);
+
/* Clear single descriptor fetch/write-back mode */
reg = rd32(hw, I40E_GLLAN_RCTL_0);
@@ -907,6 +942,33 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
/* Admin command wrappers */
/**
+ * i40e_aq_clear_pxe_mode
+ * @hw: pointer to the hw struct
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Tell the firmware that the driver is taking over from PXE
+ **/
+i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ i40e_status status;
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_clear_pxe *cmd =
+ (struct i40e_aqc_clear_pxe *)&desc.params.raw;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_clear_pxe_mode);
+
+ cmd->rx_cnt = 0x2;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ wr32(hw, I40E_GLLAN_RCTL_0, 0x1);
+
+ return status;
+}
+
+/**
* i40e_aq_set_link_restart_an
* @hw: pointer to the hw struct
* @cmd_details: pointer to command details structure or NULL
@@ -975,6 +1037,13 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
hw_link_info->an_info = resp->an_info;
hw_link_info->ext_info = resp->ext_info;
hw_link_info->loopback = resp->loopback;
+ hw_link_info->max_frame_size = le16_to_cpu(resp->max_frame_size);
+ hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK;
+
+ if (resp->config & I40E_AQ_CONFIG_CRC_ENA)
+ hw_link_info->crc_enable = true;
+ else
+ hw_link_info->crc_enable = false;
if (resp->command_flags & cpu_to_le16(I40E_AQ_LSE_ENABLE))
hw_link_info->lse_enable = true;
@@ -1021,8 +1090,6 @@ i40e_status i40e_aq_add_vsi(struct i40e_hw *hw,
cmd->vsi_flags = cpu_to_le16(vsi_ctx->flags);
desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
- if (sizeof(vsi_ctx->info) > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
sizeof(vsi_ctx->info), cmd_details);
@@ -1163,8 +1230,6 @@ i40e_status i40e_aq_get_vsi_params(struct i40e_hw *hw,
cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
- if (sizeof(vsi_ctx->info) > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
sizeof(vsi_ctx->info), NULL);
@@ -1203,8 +1268,6 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
cmd->uplink_seid = cpu_to_le16(vsi_ctx->seid);
desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
- if (sizeof(vsi_ctx->info) > I40E_AQ_LARGE_BUF)
- desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
sizeof(vsi_ctx->info), cmd_details);
@@ -1300,6 +1363,7 @@ i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
struct i40e_aqc_driver_version *cmd =
(struct i40e_aqc_driver_version *)&desc.params.raw;
i40e_status status;
+ u16 len;
if (dv == NULL)
return I40E_ERR_PARAM;
@@ -1311,7 +1375,14 @@ i40e_status i40e_aq_send_driver_version(struct i40e_hw *hw,
cmd->driver_minor_ver = dv->minor_version;
cmd->driver_build_ver = dv->build_version;
cmd->driver_subbuild_ver = dv->subbuild_version;
- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ len = 0;
+ while (len < sizeof(dv->driver_string) &&
+ (dv->driver_string[len] < 0x80) &&
+ dv->driver_string[len])
+ len++;
+ status = i40e_asq_send_command(hw, &desc, dv->driver_string,
+ len, cmd_details);
return status;
}
@@ -1900,6 +1971,12 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
}
}
+ /* Software override ensuring FCoE is disabled if npar or mfp
+ * mode because it is not supported in these modes.
+ */
+ if (p->npar_enable || p->mfp_mode_1)
+ p->fcoe = false;
+
/* additional HW specific goodies that might
* someday be HW version specific
*/
@@ -2094,8 +2171,8 @@ i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
* @cmd_details: pointer to command details structure or NULL
**/
i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
- u16 udp_port, u8 header_len,
- u8 protocol_index, u8 *filter_index,
+ u16 udp_port, u8 protocol_index,
+ u8 *filter_index,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
@@ -2253,6 +2330,35 @@ static i40e_status i40e_aq_tx_sched_cmd(struct i40e_hw *hw, u16 seid,
}
/**
+ * i40e_aq_config_vsi_bw_limit - Configure VSI BW Limit
+ * @hw: pointer to the hw struct
+ * @seid: VSI seid
+ * @credit: BW limit credits (0 = disabled)
+ * @max_credit: Max BW limit credits
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
+ u16 seid, u16 credit, u8 max_credit,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_configure_vsi_bw_limit *cmd =
+ (struct i40e_aqc_configure_vsi_bw_limit *)&desc.params.raw;
+ i40e_status status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_configure_vsi_bw_limit);
+
+ cmd->vsi_seid = cpu_to_le16(seid);
+ cmd->credit = cpu_to_le16(credit);
+ cmd->max_credit = max_credit;
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
* i40e_aq_config_vsi_tc_bw - Config VSI BW Allocation per TC
* @hw: pointer to the hw struct
* @seid: VSI seid
@@ -2405,7 +2511,7 @@ static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
{
u32 fcoe_cntx_size, fcoe_filt_size;
u32 pe_cntx_size, pe_filt_size;
- u32 fcoe_fmax, pe_fmax;
+ u32 fcoe_fmax;
u32 val;
/* Validate FCoE settings passed */
@@ -2480,13 +2586,6 @@ static i40e_status i40e_validate_filter_settings(struct i40e_hw *hw,
if (fcoe_filt_size + fcoe_cntx_size > fcoe_fmax)
return I40E_ERR_INVALID_SIZE;
- /* PEHSIZE + PEDSIZE should not be greater than PMPEXFMAX */
- val = rd32(hw, I40E_GLHMC_PEXFMAX);
- pe_fmax = (val & I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK)
- >> I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT;
- if (pe_filt_size + pe_cntx_size > pe_fmax)
- return I40E_ERR_INVALID_SIZE;
-
return 0;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
index 6e8103a..00bc0cd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_dcb_nl.c
@@ -232,7 +232,7 @@ static void i40e_dcbnl_del_app(struct i40e_pf *pf,
struct i40e_ieee_app_priority_table *app)
{
int v, err;
- for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ for (v = 0; v < pf->num_alloc_vsi; v++) {
if (pf->vsi[v] && pf->vsi[v]->netdev) {
err = i40e_dcbnl_vsi_del_app(pf->vsi[v], app);
if (err)
@@ -302,8 +302,8 @@ void i40e_dcbnl_setup(struct i40e_vsi *vsi)
struct net_device *dev = vsi->netdev;
struct i40e_pf *pf = i40e_netdev_to_pf(dev);
- /* DCB not enabled */
- if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
+ /* Not DCB capable */
+ if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
return;
/* Do not setup DCB NL ops for MFP mode */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 3c37386..cffdfc2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -45,7 +45,7 @@ static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid)
if (seid < 0)
dev_info(&pf->pdev->dev, "%d: bad seid\n", seid);
else
- for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+ for (i = 0; i < pf->num_alloc_vsi; i++)
if (pf->vsi[i] && (pf->vsi[i]->seid == seid))
return pf->vsi[i];
@@ -843,7 +843,7 @@ static void i40e_dbg_dump_vsi_no_seid(struct i40e_pf *pf)
{
int i;
- for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+ for (i = 0; i < pf->num_alloc_vsi; i++)
if (pf->vsi[i])
dev_info(&pf->pdev->dev, "dump vsi[%d]: %d\n",
i, pf->vsi[i]->seid);
@@ -862,12 +862,11 @@ static void i40e_dbg_dump_eth_stats(struct i40e_pf *pf,
" rx_bytes = \t%lld \trx_unicast = \t\t%lld \trx_multicast = \t%lld\n",
estats->rx_bytes, estats->rx_unicast, estats->rx_multicast);
dev_info(&pf->pdev->dev,
- " rx_broadcast = \t%lld \trx_discards = \t\t%lld \trx_errors = \t%lld\n",
- estats->rx_broadcast, estats->rx_discards, estats->rx_errors);
+ " rx_broadcast = \t%lld \trx_discards = \t\t%lld\n",
+ estats->rx_broadcast, estats->rx_discards);
dev_info(&pf->pdev->dev,
- " rx_missed = \t%lld \trx_unknown_protocol = \t%lld \ttx_bytes = \t%lld\n",
- estats->rx_missed, estats->rx_unknown_protocol,
- estats->tx_bytes);
+ " rx_unknown_protocol = \t%lld \ttx_bytes = \t%lld\n",
+ estats->rx_unknown_protocol, estats->tx_bytes);
dev_info(&pf->pdev->dev,
" tx_unicast = \t%lld \ttx_multicast = \t\t%lld \ttx_broadcast = \t%lld\n",
estats->tx_unicast, estats->tx_multicast, estats->tx_broadcast);
@@ -1527,7 +1526,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
cnt = sscanf(&cmd_buf[15], "%i", &vsi_seid);
if (cnt == 0) {
int i;
- for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+ for (i = 0; i < pf->num_alloc_vsi; i++)
i40e_vsi_reset_stats(pf->vsi[i]);
dev_info(&pf->pdev->dev, "vsi clear stats called for all vsi's\n");
} else if (cnt == 1) {
@@ -1744,10 +1743,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, false);
} else if (strncmp(cmd_buf, "fd-atr on", 9) == 0) {
i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, true);
- } else if (strncmp(cmd_buf, "fd-sb off", 9) == 0) {
- i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_SB_ENABLED, false);
- } else if (strncmp(cmd_buf, "fd-sb on", 8) == 0) {
- i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_SB_ENABLED, true);
} else if (strncmp(cmd_buf, "lldp", 4) == 0) {
if (strncmp(&cmd_buf[5], "stop", 4) == 0) {
int ret;
@@ -1967,8 +1962,6 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
dev_info(&pf->pdev->dev, " rem fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n");
dev_info(&pf->pdev->dev, " fd-atr off\n");
dev_info(&pf->pdev->dev, " fd-atr on\n");
- dev_info(&pf->pdev->dev, " fd-sb off\n");
- dev_info(&pf->pdev->dev, " fd-sb on\n");
dev_info(&pf->pdev->dev, " lldp start\n");
dev_info(&pf->pdev->dev, " lldp stop\n");
dev_info(&pf->pdev->dev, " lldp get local\n");
diff --git a/drivers/net/ethernet/intel/i40e/i40e_diag.c b/drivers/net/ethernet/intel/i40e/i40e_diag.c
index b2380da..56438bd 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_diag.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_diag.c
@@ -67,17 +67,25 @@ static i40e_status i40e_diag_reg_pattern_test(struct i40e_hw *hw,
struct i40e_diag_reg_test_info i40e_reg_list[] = {
/* offset mask elements stride */
- {I40E_QTX_CTL(0), 0x0000FFBF, 4, I40E_QTX_CTL(1) - I40E_QTX_CTL(0)},
- {I40E_PFINT_ITR0(0), 0x00000FFF, 3, I40E_PFINT_ITR0(1) - I40E_PFINT_ITR0(0)},
- {I40E_PFINT_ITRN(0, 0), 0x00000FFF, 8, I40E_PFINT_ITRN(0, 1) - I40E_PFINT_ITRN(0, 0)},
- {I40E_PFINT_ITRN(1, 0), 0x00000FFF, 8, I40E_PFINT_ITRN(1, 1) - I40E_PFINT_ITRN(1, 0)},
- {I40E_PFINT_ITRN(2, 0), 0x00000FFF, 8, I40E_PFINT_ITRN(2, 1) - I40E_PFINT_ITRN(2, 0)},
- {I40E_PFINT_STAT_CTL0, 0x0000000C, 1, 0},
- {I40E_PFINT_LNKLST0, 0x00001FFF, 1, 0},
- {I40E_PFINT_LNKLSTN(0), 0x000007FF, 64, I40E_PFINT_LNKLSTN(1) - I40E_PFINT_LNKLSTN(0)},
- {I40E_QINT_TQCTL(0), 0x000000FF, 64, I40E_QINT_TQCTL(1) - I40E_QINT_TQCTL(0)},
- {I40E_QINT_RQCTL(0), 0x000000FF, 64, I40E_QINT_RQCTL(1) - I40E_QINT_RQCTL(0)},
- {I40E_PFINT_ICR0_ENA, 0xF7F20000, 1, 0},
+ {I40E_QTX_CTL(0), 0x0000FFBF, 1,
+ I40E_QTX_CTL(1) - I40E_QTX_CTL(0)},
+ {I40E_PFINT_ITR0(0), 0x00000FFF, 3,
+ I40E_PFINT_ITR0(1) - I40E_PFINT_ITR0(0)},
+ {I40E_PFINT_ITRN(0, 0), 0x00000FFF, 1,
+ I40E_PFINT_ITRN(0, 1) - I40E_PFINT_ITRN(0, 0)},
+ {I40E_PFINT_ITRN(1, 0), 0x00000FFF, 1,
+ I40E_PFINT_ITRN(1, 1) - I40E_PFINT_ITRN(1, 0)},
+ {I40E_PFINT_ITRN(2, 0), 0x00000FFF, 1,
+ I40E_PFINT_ITRN(2, 1) - I40E_PFINT_ITRN(2, 0)},
+ {I40E_PFINT_STAT_CTL0, 0x0000000C, 1, 0},
+ {I40E_PFINT_LNKLST0, 0x00001FFF, 1, 0},
+ {I40E_PFINT_LNKLSTN(0), 0x000007FF, 1,
+ I40E_PFINT_LNKLSTN(1) - I40E_PFINT_LNKLSTN(0)},
+ {I40E_QINT_TQCTL(0), 0x000000FF, 1,
+ I40E_QINT_TQCTL(1) - I40E_QINT_TQCTL(0)},
+ {I40E_QINT_RQCTL(0), 0x000000FF, 1,
+ I40E_QINT_RQCTL(1) - I40E_QINT_RQCTL(0)},
+ {I40E_PFINT_ICR0_ENA, 0xF7F20000, 1, 0},
{ 0 }
};
@@ -93,9 +101,25 @@ i40e_status i40e_diag_reg_test(struct i40e_hw *hw)
u32 reg, mask;
u32 i, j;
- for (i = 0; (i40e_reg_list[i].offset != 0) && !ret_code; i++) {
+ for (i = 0; i40e_reg_list[i].offset != 0 &&
+ !ret_code; i++) {
+
+ /* set actual reg range for dynamically allocated resources */
+ if (i40e_reg_list[i].offset == I40E_QTX_CTL(0) &&
+ hw->func_caps.num_tx_qp != 0)
+ i40e_reg_list[i].elements = hw->func_caps.num_tx_qp;
+ if ((i40e_reg_list[i].offset == I40E_PFINT_ITRN(0, 0) ||
+ i40e_reg_list[i].offset == I40E_PFINT_ITRN(1, 0) ||
+ i40e_reg_list[i].offset == I40E_PFINT_ITRN(2, 0) ||
+ i40e_reg_list[i].offset == I40E_QINT_TQCTL(0) ||
+ i40e_reg_list[i].offset == I40E_QINT_RQCTL(0)) &&
+ hw->func_caps.num_msix_vectors != 0)
+ i40e_reg_list[i].elements =
+ hw->func_caps.num_msix_vectors - 1;
+
+ /* test register access */
mask = i40e_reg_list[i].mask;
- for (j = 0; (j < i40e_reg_list[i].elements) && !ret_code; j++) {
+ for (j = 0; j < i40e_reg_list[i].elements && !ret_code; j++) {
reg = i40e_reg_list[i].offset +
(j * i40e_reg_list[i].stride);
ret_code = i40e_diag_reg_pattern_test(hw, reg, mask);
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
index 03d99cb..4a488ff 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
@@ -46,6 +46,8 @@ struct i40e_stats {
I40E_STAT(struct i40e_pf, _name, _stat)
#define I40E_VSI_STAT(_name, _stat) \
I40E_STAT(struct i40e_vsi, _name, _stat)
+#define I40E_VEB_STAT(_name, _stat) \
+ I40E_STAT(struct i40e_veb, _name, _stat)
static const struct i40e_stats i40e_gstrings_net_stats[] = {
I40E_NETDEV_STAT(rx_packets),
@@ -56,12 +58,36 @@ static const struct i40e_stats i40e_gstrings_net_stats[] = {
I40E_NETDEV_STAT(tx_errors),
I40E_NETDEV_STAT(rx_dropped),
I40E_NETDEV_STAT(tx_dropped),
- I40E_NETDEV_STAT(multicast),
I40E_NETDEV_STAT(collisions),
I40E_NETDEV_STAT(rx_length_errors),
I40E_NETDEV_STAT(rx_crc_errors),
};
+static const struct i40e_stats i40e_gstrings_veb_stats[] = {
+ I40E_VEB_STAT("rx_bytes", stats.rx_bytes),
+ I40E_VEB_STAT("tx_bytes", stats.tx_bytes),
+ I40E_VEB_STAT("rx_unicast", stats.rx_unicast),
+ I40E_VEB_STAT("tx_unicast", stats.tx_unicast),
+ I40E_VEB_STAT("rx_multicast", stats.rx_multicast),
+ I40E_VEB_STAT("tx_multicast", stats.tx_multicast),
+ I40E_VEB_STAT("rx_broadcast", stats.rx_broadcast),
+ I40E_VEB_STAT("tx_broadcast", stats.tx_broadcast),
+ I40E_VEB_STAT("rx_discards", stats.rx_discards),
+ I40E_VEB_STAT("tx_discards", stats.tx_discards),
+ I40E_VEB_STAT("tx_errors", stats.tx_errors),
+ I40E_VEB_STAT("rx_unknown_protocol", stats.rx_unknown_protocol),
+};
+
+static const struct i40e_stats i40e_gstrings_misc_stats[] = {
+ I40E_VSI_STAT("rx_unicast", eth_stats.rx_unicast),
+ I40E_VSI_STAT("tx_unicast", eth_stats.tx_unicast),
+ I40E_VSI_STAT("rx_multicast", eth_stats.rx_multicast),
+ I40E_VSI_STAT("tx_multicast", eth_stats.tx_multicast),
+ I40E_VSI_STAT("rx_broadcast", eth_stats.rx_broadcast),
+ I40E_VSI_STAT("tx_broadcast", eth_stats.tx_broadcast),
+ I40E_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol),
+};
+
static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
struct ethtool_rxnfc *cmd);
@@ -78,7 +104,12 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
static struct i40e_stats i40e_gstrings_stats[] = {
I40E_PF_STAT("rx_bytes", stats.eth.rx_bytes),
I40E_PF_STAT("tx_bytes", stats.eth.tx_bytes),
- I40E_PF_STAT("rx_errors", stats.eth.rx_errors),
+ I40E_PF_STAT("rx_unicast", stats.eth.rx_unicast),
+ I40E_PF_STAT("tx_unicast", stats.eth.tx_unicast),
+ I40E_PF_STAT("rx_multicast", stats.eth.rx_multicast),
+ I40E_PF_STAT("tx_multicast", stats.eth.tx_multicast),
+ I40E_PF_STAT("rx_broadcast", stats.eth.rx_broadcast),
+ I40E_PF_STAT("tx_broadcast", stats.eth.tx_broadcast),
I40E_PF_STAT("tx_errors", stats.eth.tx_errors),
I40E_PF_STAT("rx_dropped", stats.eth.rx_discards),
I40E_PF_STAT("tx_dropped", stats.eth.tx_discards),
@@ -88,6 +119,7 @@ static struct i40e_stats i40e_gstrings_stats[] = {
I40E_PF_STAT("mac_local_faults", stats.mac_local_faults),
I40E_PF_STAT("mac_remote_faults", stats.mac_remote_faults),
I40E_PF_STAT("tx_timeout", tx_timeout_count),
+ I40E_PF_STAT("rx_csum_bad", hw_csum_rx_error),
I40E_PF_STAT("rx_length_errors", stats.rx_length_errors),
I40E_PF_STAT("link_xon_rx", stats.link_xon_rx),
I40E_PF_STAT("link_xoff_rx", stats.link_xoff_rx),
@@ -112,8 +144,10 @@ static struct i40e_stats i40e_gstrings_stats[] = {
I40E_PF_STAT("rx_oversize", stats.rx_oversize),
I40E_PF_STAT("rx_jabber", stats.rx_jabber),
I40E_PF_STAT("VF_admin_queue_requests", vf_aq_requests),
- I40E_PF_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts),
I40E_PF_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared),
+ I40E_PF_STAT("fdir_atr_match", stats.fd_atr_match),
+ I40E_PF_STAT("fdir_sb_match", stats.fd_sb_match),
+
/* LPI stats */
I40E_PF_STAT("tx_lpi_status", stats.tx_lpi_status),
I40E_PF_STAT("rx_lpi_status", stats.rx_lpi_status),
@@ -122,11 +156,14 @@ static struct i40e_stats i40e_gstrings_stats[] = {
};
#define I40E_QUEUE_STATS_LEN(n) \
- ((((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs + \
- ((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs) * 2)
+ (((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs \
+ * 2 /* Tx and Rx together */ \
+ * (sizeof(struct i40e_queue_stats) / sizeof(u64)))
#define I40E_GLOBAL_STATS_LEN ARRAY_SIZE(i40e_gstrings_stats)
#define I40E_NETDEV_STATS_LEN ARRAY_SIZE(i40e_gstrings_net_stats)
+#define I40E_MISC_STATS_LEN ARRAY_SIZE(i40e_gstrings_misc_stats)
#define I40E_VSI_STATS_LEN(n) (I40E_NETDEV_STATS_LEN + \
+ I40E_MISC_STATS_LEN + \
I40E_QUEUE_STATS_LEN((n)))
#define I40E_PFC_STATS_LEN ( \
(FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_rx) + \
@@ -135,6 +172,7 @@ static struct i40e_stats i40e_gstrings_stats[] = {
FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_tx) + \
FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_2_xoff)) \
/ sizeof(u64))
+#define I40E_VEB_STATS_LEN ARRAY_SIZE(i40e_gstrings_veb_stats)
#define I40E_PF_STATS_LEN(n) (I40E_GLOBAL_STATS_LEN + \
I40E_PFC_STATS_LEN + \
I40E_VSI_STATS_LEN((n)))
@@ -620,10 +658,15 @@ static int i40e_get_sset_count(struct net_device *netdev, int sset)
case ETH_SS_TEST:
return I40E_TEST_LEN;
case ETH_SS_STATS:
- if (vsi == pf->vsi[pf->lan_vsi])
- return I40E_PF_STATS_LEN(netdev);
- else
+ if (vsi == pf->vsi[pf->lan_vsi]) {
+ int len = I40E_PF_STATS_LEN(netdev);
+
+ if (pf->lan_veb != I40E_NO_VEB)
+ len += I40E_VEB_STATS_LEN;
+ return len;
+ } else {
return I40E_VSI_STATS_LEN(netdev);
+ }
default:
return -EOPNOTSUPP;
}
@@ -633,6 +676,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
struct ethtool_stats *stats, u64 *data)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_ring *tx_ring, *rx_ring;
struct i40e_vsi *vsi = np->vsi;
struct i40e_pf *pf = vsi->back;
int i = 0;
@@ -648,10 +692,14 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
data[i++] = (i40e_gstrings_net_stats[j].sizeof_stat ==
sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
+ for (j = 0; j < I40E_MISC_STATS_LEN; j++) {
+ p = (char *)vsi + i40e_gstrings_misc_stats[j].stat_offset;
+ data[i++] = (i40e_gstrings_misc_stats[j].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
rcu_read_lock();
- for (j = 0; j < vsi->num_queue_pairs; j++, i += 4) {
- struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[j]);
- struct i40e_ring *rx_ring;
+ for (j = 0; j < vsi->num_queue_pairs; j++) {
+ tx_ring = ACCESS_ONCE(vsi->tx_rings[j]);
if (!tx_ring)
continue;
@@ -662,33 +710,45 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
data[i] = tx_ring->stats.packets;
data[i + 1] = tx_ring->stats.bytes;
} while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
+ i += 2;
/* Rx ring is the 2nd half of the queue pair */
rx_ring = &tx_ring[1];
do {
start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
- data[i + 2] = rx_ring->stats.packets;
- data[i + 3] = rx_ring->stats.bytes;
+ data[i] = rx_ring->stats.packets;
+ data[i + 1] = rx_ring->stats.bytes;
} while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
+ i += 2;
}
rcu_read_unlock();
- if (vsi == pf->vsi[pf->lan_vsi]) {
- for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) {
- p = (char *)pf + i40e_gstrings_stats[j].stat_offset;
- data[i++] = (i40e_gstrings_stats[j].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
- }
- for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
- data[i++] = pf->stats.priority_xon_tx[j];
- data[i++] = pf->stats.priority_xoff_tx[j];
- }
- for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
- data[i++] = pf->stats.priority_xon_rx[j];
- data[i++] = pf->stats.priority_xoff_rx[j];
+ if (vsi != pf->vsi[pf->lan_vsi])
+ return;
+
+ if (pf->lan_veb != I40E_NO_VEB) {
+ struct i40e_veb *veb = pf->veb[pf->lan_veb];
+ for (j = 0; j < I40E_VEB_STATS_LEN; j++) {
+ p = (char *)veb;
+ p += i40e_gstrings_veb_stats[j].stat_offset;
+ data[i++] = (i40e_gstrings_veb_stats[j].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
- for (j = 0; j < I40E_MAX_USER_PRIORITY; j++)
- data[i++] = pf->stats.priority_xon_2_xoff[j];
}
+ for (j = 0; j < I40E_GLOBAL_STATS_LEN; j++) {
+ p = (char *)pf + i40e_gstrings_stats[j].stat_offset;
+ data[i++] = (i40e_gstrings_stats[j].sizeof_stat ==
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ }
+ for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
+ data[i++] = pf->stats.priority_xon_tx[j];
+ data[i++] = pf->stats.priority_xoff_tx[j];
+ }
+ for (j = 0; j < I40E_MAX_USER_PRIORITY; j++) {
+ data[i++] = pf->stats.priority_xon_rx[j];
+ data[i++] = pf->stats.priority_xoff_rx[j];
+ }
+ for (j = 0; j < I40E_MAX_USER_PRIORITY; j++)
+ data[i++] = pf->stats.priority_xon_2_xoff[j];
}
static void i40e_get_strings(struct net_device *netdev, u32 stringset,
@@ -713,6 +773,11 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
i40e_gstrings_net_stats[i].stat_string);
p += ETH_GSTRING_LEN;
}
+ for (i = 0; i < I40E_MISC_STATS_LEN; i++) {
+ snprintf(p, ETH_GSTRING_LEN, "%s",
+ i40e_gstrings_misc_stats[i].stat_string);
+ p += ETH_GSTRING_LEN;
+ }
for (i = 0; i < vsi->num_queue_pairs; i++) {
snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_packets", i);
p += ETH_GSTRING_LEN;
@@ -723,34 +788,42 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
snprintf(p, ETH_GSTRING_LEN, "rx-%u.rx_bytes", i);
p += ETH_GSTRING_LEN;
}
- if (vsi == pf->vsi[pf->lan_vsi]) {
- for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) {
- snprintf(p, ETH_GSTRING_LEN, "port.%s",
- i40e_gstrings_stats[i].stat_string);
- p += ETH_GSTRING_LEN;
- }
- for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
- snprintf(p, ETH_GSTRING_LEN,
- "port.tx_priority_%u_xon", i);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN,
- "port.tx_priority_%u_xoff", i);
- p += ETH_GSTRING_LEN;
- }
- for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
- snprintf(p, ETH_GSTRING_LEN,
- "port.rx_priority_%u_xon", i);
- p += ETH_GSTRING_LEN;
- snprintf(p, ETH_GSTRING_LEN,
- "port.rx_priority_%u_xoff", i);
- p += ETH_GSTRING_LEN;
- }
- for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
- snprintf(p, ETH_GSTRING_LEN,
- "port.rx_priority_%u_xon_2_xoff", i);
+ if (vsi != pf->vsi[pf->lan_vsi])
+ return;
+
+ if (pf->lan_veb != I40E_NO_VEB) {
+ for (i = 0; i < I40E_VEB_STATS_LEN; i++) {
+ snprintf(p, ETH_GSTRING_LEN, "veb.%s",
+ i40e_gstrings_veb_stats[i].stat_string);
p += ETH_GSTRING_LEN;
}
}
+ for (i = 0; i < I40E_GLOBAL_STATS_LEN; i++) {
+ snprintf(p, ETH_GSTRING_LEN, "port.%s",
+ i40e_gstrings_stats[i].stat_string);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+ snprintf(p, ETH_GSTRING_LEN,
+ "port.tx_priority_%u_xon", i);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN,
+ "port.tx_priority_%u_xoff", i);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+ snprintf(p, ETH_GSTRING_LEN,
+ "port.rx_priority_%u_xon", i);
+ p += ETH_GSTRING_LEN;
+ snprintf(p, ETH_GSTRING_LEN,
+ "port.rx_priority_%u_xoff", i);
+ p += ETH_GSTRING_LEN;
+ }
+ for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
+ snprintf(p, ETH_GSTRING_LEN,
+ "port.rx_priority_%u_xon_2_xoff", i);
+ p += ETH_GSTRING_LEN;
+ }
/* BUG_ON(p - data != I40E_STATS_LEN * ETH_GSTRING_LEN); */
break;
}
@@ -1007,14 +1080,13 @@ static int i40e_get_coalesce(struct net_device *netdev,
ec->rx_max_coalesced_frames_irq = vsi->work_limit;
if (ITR_IS_DYNAMIC(vsi->rx_itr_setting))
- ec->rx_coalesce_usecs = 1;
- else
- ec->rx_coalesce_usecs = vsi->rx_itr_setting;
+ ec->use_adaptive_rx_coalesce = 1;
if (ITR_IS_DYNAMIC(vsi->tx_itr_setting))
- ec->tx_coalesce_usecs = 1;
- else
- ec->tx_coalesce_usecs = vsi->tx_itr_setting;
+ ec->use_adaptive_tx_coalesce = 1;
+
+ ec->rx_coalesce_usecs = vsi->rx_itr_setting & ~I40E_ITR_DYNAMIC;
+ ec->tx_coalesce_usecs = vsi->tx_itr_setting & ~I40E_ITR_DYNAMIC;
return 0;
}
@@ -1033,37 +1105,27 @@ static int i40e_set_coalesce(struct net_device *netdev,
if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
vsi->work_limit = ec->tx_max_coalesced_frames_irq;
- switch (ec->rx_coalesce_usecs) {
- case 0:
- vsi->rx_itr_setting = 0;
- break;
- case 1:
- vsi->rx_itr_setting = (I40E_ITR_DYNAMIC |
- ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
- break;
- default:
- if ((ec->rx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
- (ec->rx_coalesce_usecs > (I40E_MAX_ITR << 1)))
- return -EINVAL;
+ if ((ec->rx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
+ (ec->rx_coalesce_usecs <= (I40E_MAX_ITR << 1)))
vsi->rx_itr_setting = ec->rx_coalesce_usecs;
- break;
- }
+ else
+ return -EINVAL;
- switch (ec->tx_coalesce_usecs) {
- case 0:
- vsi->tx_itr_setting = 0;
- break;
- case 1:
- vsi->tx_itr_setting = (I40E_ITR_DYNAMIC |
- ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
- break;
- default:
- if ((ec->tx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
- (ec->tx_coalesce_usecs > (I40E_MAX_ITR << 1)))
- return -EINVAL;
+ if ((ec->tx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
+ (ec->tx_coalesce_usecs <= (I40E_MAX_ITR << 1)))
vsi->tx_itr_setting = ec->tx_coalesce_usecs;
- break;
- }
+ else
+ return -EINVAL;
+
+ if (ec->use_adaptive_rx_coalesce)
+ vsi->rx_itr_setting |= I40E_ITR_DYNAMIC;
+ else
+ vsi->rx_itr_setting &= ~I40E_ITR_DYNAMIC;
+
+ if (ec->use_adaptive_tx_coalesce)
+ vsi->tx_itr_setting |= I40E_ITR_DYNAMIC;
+ else
+ vsi->tx_itr_setting &= ~I40E_ITR_DYNAMIC;
vector = vsi->base_vector;
for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
@@ -1140,8 +1202,7 @@ static int i40e_get_ethtool_fdir_all(struct i40e_pf *pf,
int cnt = 0;
/* report total rule count */
- cmd->data = pf->hw.fdir_shared_filter_count +
- pf->fdir_pf_filter_count;
+ cmd->data = i40e_get_fd_cnt_all(pf);
hlist_for_each_entry_safe(rule, node2,
&pf->fdir_filter_list, fdir_node) {
@@ -1175,10 +1236,6 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
struct i40e_fdir_filter *rule = NULL;
struct hlist_node *node2;
- /* report total rule count */
- cmd->data = pf->hw.fdir_shared_filter_count +
- pf->fdir_pf_filter_count;
-
hlist_for_each_entry_safe(rule, node2,
&pf->fdir_filter_list, fdir_node) {
if (fsp->location <= rule->fd_id)
@@ -1189,11 +1246,24 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf,
return -EINVAL;
fsp->flow_type = rule->flow_type;
- fsp->h_u.tcp_ip4_spec.psrc = rule->src_port;
- fsp->h_u.tcp_ip4_spec.pdst = rule->dst_port;
- fsp->h_u.tcp_ip4_spec.ip4src = rule->src_ip[0];
- fsp->h_u.tcp_ip4_spec.ip4dst = rule->dst_ip[0];
- fsp->ring_cookie = rule->q_index;
+ if (fsp->flow_type == IP_USER_FLOW) {
+ fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4;
+ fsp->h_u.usr_ip4_spec.proto = 0;
+ fsp->m_u.usr_ip4_spec.proto = 0;
+ }
+
+ /* Reverse the src and dest notion, since the HW views them from
+ * Tx perspective where as the user expects it from Rx filter view.
+ */
+ fsp->h_u.tcp_ip4_spec.psrc = rule->dst_port;
+ fsp->h_u.tcp_ip4_spec.pdst = rule->src_port;
+ fsp->h_u.tcp_ip4_spec.ip4src = rule->dst_ip[0];
+ fsp->h_u.tcp_ip4_spec.ip4dst = rule->src_ip[0];
+
+ if (rule->dest_ctl == I40E_FILTER_PROGRAM_DESC_DEST_DROP_PACKET)
+ fsp->ring_cookie = RX_CLS_FLOW_DISC;
+ else
+ fsp->ring_cookie = rule->q_index;
return 0;
}
@@ -1223,6 +1293,8 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
break;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = pf->fdir_pf_active_filters;
+ /* report total rule count */
+ cmd->data = i40e_get_fd_cnt_all(pf);
ret = 0;
break;
case ETHTOOL_GRXCLSRULE:
@@ -1291,16 +1363,12 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
case UDP_V4_FLOW:
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0:
- hena &=
- ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+ hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
break;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
- hena |=
- (((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+ hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
break;
default:
return -EINVAL;
@@ -1309,16 +1377,12 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
case UDP_V6_FLOW:
switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
case 0:
- hena &=
- ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+ hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
break;
case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
- hena |=
- (((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+ hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
break;
default:
return -EINVAL;
@@ -1503,7 +1567,8 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
return -EINVAL;
}
- if (fsp->ring_cookie >= vsi->num_queue_pairs)
+ if ((fsp->ring_cookie != RX_CLS_FLOW_DISC) &&
+ (fsp->ring_cookie >= vsi->num_queue_pairs))
return -EINVAL;
input = kzalloc(sizeof(*input), GFP_KERNEL);
@@ -1524,13 +1589,17 @@ static int i40e_add_fdir_ethtool(struct i40e_vsi *vsi,
input->pctype = 0;
input->dest_vsi = vsi->id;
input->fd_status = I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID;
- input->cnt_index = 0;
+ input->cnt_index = pf->fd_sb_cnt_idx;
input->flow_type = fsp->flow_type;
input->ip4_proto = fsp->h_u.usr_ip4_spec.proto;
- input->src_port = fsp->h_u.tcp_ip4_spec.psrc;
- input->dst_port = fsp->h_u.tcp_ip4_spec.pdst;
- input->src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
- input->dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
+
+ /* Reverse the src and dest notion, since the HW expects them to be from
+ * Tx perspective where as the input from user is from Rx filter view.
+ */
+ input->dst_port = fsp->h_u.tcp_ip4_spec.psrc;
+ input->src_port = fsp->h_u.tcp_ip4_spec.pdst;
+ input->dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src;
+ input->src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst;
ret = i40e_add_del_fdir(vsi, input, true);
if (ret)
@@ -1692,5 +1761,5 @@ static const struct ethtool_ops i40e_ethtool_ops = {
void i40e_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &i40e_ethtool_ops);
+ netdev->ethtool_ops = &i40e_ethtool_ops;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
index bf2d4cc..9b987cc 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.c
@@ -201,7 +201,7 @@ exit:
**/
i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
- u32 idx, bool is_pf)
+ u32 idx)
{
i40e_status ret_code = 0;
struct i40e_hmc_pd_entry *pd_entry;
@@ -237,10 +237,7 @@ i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
pd_addr = (u64 *)pd_table->pd_page_addr.va;
pd_addr += rel_pd_idx;
memset(pd_addr, 0, sizeof(u64));
- if (is_pf)
- I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
- else
- I40E_INVALIDATE_VF_HMC_PD(hw, sd_idx, idx, hmc_info->hmc_fn_id);
+ I40E_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
/* free memory here */
ret_code = i40e_free_dma_mem(hw, &(pd_entry->bp.addr));
diff --git a/drivers/net/ethernet/intel/i40e/i40e_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
index 0cd4701..b45d8fe 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_hmc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_hmc.h
@@ -163,11 +163,6 @@ struct i40e_hmc_info {
(((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
-#define I40E_INVALIDATE_VF_HMC_PD(hw, sd_idx, pd_idx, hmc_fn_id) \
- wr32((hw), I40E_GLHMC_VFPDINV((hmc_fn_id) - I40E_FIRST_VF_FPM_ID), \
- (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
- ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
-
/**
* I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit
* @hmc_info: pointer to the HMC configuration information structure
@@ -226,7 +221,7 @@ i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
u32 pd_index);
i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
- u32 idx, bool is_pf);
+ u32 idx);
i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
u32 idx);
i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
index d5d98fe..870ab1e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
@@ -397,7 +397,7 @@ static i40e_status i40e_create_lan_hmc_object(struct i40e_hw *hw,
/* remove the backing pages from pd_idx1 to i */
while (i && (i > pd_idx1)) {
i40e_remove_pd_bp(hw, info->hmc_info,
- (i - 1), true);
+ (i - 1));
i--;
}
}
@@ -433,11 +433,7 @@ exit_sd_error:
((j - 1) * I40E_HMC_MAX_BP_COUNT));
pd_lmt1 = min(pd_lmt, (j * I40E_HMC_MAX_BP_COUNT));
for (i = pd_idx1; i < pd_lmt1; i++) {
- i40e_remove_pd_bp(
- hw,
- info->hmc_info,
- i,
- true);
+ i40e_remove_pd_bp(hw, info->hmc_info, i);
}
i40e_remove_pd_page(hw, info->hmc_info, (j - 1));
break;
@@ -616,8 +612,7 @@ static i40e_status i40e_delete_lan_hmc_object(struct i40e_hw *hw,
pd_table =
&info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
if (pd_table->pd_entry[rel_pd_idx].valid) {
- ret_code = i40e_remove_pd_bp(hw, info->hmc_info,
- j, true);
+ ret_code = i40e_remove_pd_bp(hw, info->hmc_info, j);
if (ret_code)
goto exit;
}
@@ -747,6 +742,7 @@ static struct i40e_context_ele i40e_hmc_rxq_ce_info[] = {
{ I40E_HMC_STORE(i40e_hmc_obj_rxq, tphdata_ena), 1, 195 },
{ I40E_HMC_STORE(i40e_hmc_obj_rxq, tphhead_ena), 1, 196 },
{ I40E_HMC_STORE(i40e_hmc_obj_rxq, lrxqthresh), 3, 198 },
+ { I40E_HMC_STORE(i40e_hmc_obj_rxq, prefena), 1, 201 },
{ 0 }
};
diff --git a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
index 341de92..eb65fe2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
@@ -56,6 +56,7 @@ struct i40e_hmc_obj_rxq {
u8 tphdata_ena;
u8 tphhead_ena;
u8 lrxqthresh;
+ u8 prefena; /* NOTE: normally must be set to 1 at init */
};
/* Tx queue context data */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 2e72449..275ca9a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -38,8 +38,8 @@ static const char i40e_driver_string[] =
#define DRV_KERN "-k"
#define DRV_VERSION_MAJOR 0
-#define DRV_VERSION_MINOR 3
-#define DRV_VERSION_BUILD 36
+#define DRV_VERSION_MINOR 4
+#define DRV_VERSION_BUILD 10
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -67,12 +67,10 @@ static int i40e_veb_get_bw_info(struct i40e_veb *veb);
*/
static DEFINE_PCI_DEVICE_TABLE(i40e_pci_tbl) = {
{PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
- {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X710), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_A), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
- {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_D), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
{PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
@@ -356,6 +354,7 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
struct rtnl_link_stats64 *stats)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_ring *tx_ring, *rx_ring;
struct i40e_vsi *vsi = np->vsi;
struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
int i;
@@ -368,7 +367,6 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
rcu_read_lock();
for (i = 0; i < vsi->num_queue_pairs; i++) {
- struct i40e_ring *tx_ring, *rx_ring;
u64 bytes, packets;
unsigned int start;
@@ -397,7 +395,7 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
}
rcu_read_unlock();
- /* following stats updated by ixgbe_watchdog_task() */
+ /* following stats updated by i40e_watchdog_subtask() */
stats->multicast = vsi_stats->multicast;
stats->tx_errors = vsi_stats->tx_errors;
stats->tx_dropped = vsi_stats->tx_dropped;
@@ -530,6 +528,12 @@ void i40e_update_eth_stats(struct i40e_vsi *vsi)
i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
vsi->stat_offsets_loaded,
&oes->rx_discards, &es->rx_discards);
+ i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
+ i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->tx_errors, &es->tx_errors);
i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
I40E_GLV_GORCL(stat_idx),
@@ -648,10 +652,10 @@ static void i40e_update_link_xoff_rx(struct i40e_pf *pf)
return;
/* Clear the __I40E_HANG_CHECK_ARMED bit for all Tx rings */
- for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ for (v = 0; v < pf->num_alloc_vsi; v++) {
struct i40e_vsi *vsi = pf->vsi[v];
- if (!vsi)
+ if (!vsi || !vsi->tx_rings[0])
continue;
for (i = 0; i < vsi->num_queue_pairs; i++) {
@@ -702,10 +706,10 @@ static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
}
/* Clear the __I40E_HANG_CHECK_ARMED bit for Tx rings */
- for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ for (v = 0; v < pf->num_alloc_vsi; v++) {
struct i40e_vsi *vsi = pf->vsi[v];
- if (!vsi)
+ if (!vsi || !vsi->tx_rings[0])
continue;
for (i = 0; i < vsi->num_queue_pairs; i++) {
@@ -720,19 +724,18 @@ static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
}
/**
- * i40e_update_stats - Update the board statistics counters.
+ * i40e_update_vsi_stats - Update the vsi statistics counters.
* @vsi: the VSI to be updated
*
* There are a few instances where we store the same stat in a
* couple of different structs. This is partly because we have
* the netdev stats that need to be filled out, which is slightly
* different from the "eth_stats" defined by the chip and used in
- * VF communications. We sort it all out here in a central place.
+ * VF communications. We sort it out here.
**/
-void i40e_update_stats(struct i40e_vsi *vsi)
+static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
{
struct i40e_pf *pf = vsi->back;
- struct i40e_hw *hw = &pf->hw;
struct rtnl_link_stats64 *ons;
struct rtnl_link_stats64 *ns; /* netdev stats */
struct i40e_eth_stats *oes;
@@ -741,8 +744,6 @@ void i40e_update_stats(struct i40e_vsi *vsi)
u32 rx_page, rx_buf;
u64 rx_p, rx_b;
u64 tx_p, tx_b;
- u32 val;
- int i;
u16 q;
if (test_bit(__I40E_DOWN, &vsi->state) ||
@@ -804,196 +805,256 @@ void i40e_update_stats(struct i40e_vsi *vsi)
ns->tx_packets = tx_p;
ns->tx_bytes = tx_b;
- i40e_update_eth_stats(vsi);
/* update netdev stats from eth stats */
- ons->rx_errors = oes->rx_errors;
- ns->rx_errors = es->rx_errors;
+ i40e_update_eth_stats(vsi);
ons->tx_errors = oes->tx_errors;
ns->tx_errors = es->tx_errors;
ons->multicast = oes->rx_multicast;
ns->multicast = es->rx_multicast;
+ ons->rx_dropped = oes->rx_discards;
+ ns->rx_dropped = es->rx_discards;
ons->tx_dropped = oes->tx_discards;
ns->tx_dropped = es->tx_discards;
- /* Get the port data only if this is the main PF VSI */
+ /* pull in a couple PF stats if this is the main vsi */
if (vsi == pf->vsi[pf->lan_vsi]) {
- struct i40e_hw_port_stats *nsd = &pf->stats;
- struct i40e_hw_port_stats *osd = &pf->stats_offsets;
+ ns->rx_crc_errors = pf->stats.crc_errors;
+ ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
+ ns->rx_length_errors = pf->stats.rx_length_errors;
+ }
+}
- i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
- I40E_GLPRT_GORCL(hw->port),
- pf->stat_offsets_loaded,
- &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
- i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
- I40E_GLPRT_GOTCL(hw->port),
- pf->stat_offsets_loaded,
- &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
- i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
- pf->stat_offsets_loaded,
- &osd->eth.rx_discards,
- &nsd->eth.rx_discards);
- i40e_stat_update32(hw, I40E_GLPRT_TDPC(hw->port),
- pf->stat_offsets_loaded,
- &osd->eth.tx_discards,
- &nsd->eth.tx_discards);
- i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
- I40E_GLPRT_MPRCL(hw->port),
- pf->stat_offsets_loaded,
- &osd->eth.rx_multicast,
- &nsd->eth.rx_multicast);
+/**
+ * i40e_update_pf_stats - Update the pf statistics counters.
+ * @pf: the PF to be updated
+ **/
+static void i40e_update_pf_stats(struct i40e_pf *pf)
+{
+ struct i40e_hw_port_stats *osd = &pf->stats_offsets;
+ struct i40e_hw_port_stats *nsd = &pf->stats;
+ struct i40e_hw *hw = &pf->hw;
+ u32 val;
+ int i;
- i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
- pf->stat_offsets_loaded,
- &osd->tx_dropped_link_down,
- &nsd->tx_dropped_link_down);
+ i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
+ I40E_GLPRT_GORCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
+ i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
+ I40E_GLPRT_GOTCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
+ i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.rx_discards,
+ &nsd->eth.rx_discards);
+ i40e_stat_update32(hw, I40E_GLPRT_TDPC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.tx_discards,
+ &nsd->eth.tx_discards);
- i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
- pf->stat_offsets_loaded,
- &osd->crc_errors, &nsd->crc_errors);
- ns->rx_crc_errors = nsd->crc_errors;
+ i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
+ I40E_GLPRT_UPRCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.rx_unicast,
+ &nsd->eth.rx_unicast);
+ i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
+ I40E_GLPRT_MPRCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.rx_multicast,
+ &nsd->eth.rx_multicast);
+ i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
+ I40E_GLPRT_BPRCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.rx_broadcast,
+ &nsd->eth.rx_broadcast);
+ i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
+ I40E_GLPRT_UPTCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.tx_unicast,
+ &nsd->eth.tx_unicast);
+ i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
+ I40E_GLPRT_MPTCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.tx_multicast,
+ &nsd->eth.tx_multicast);
+ i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
+ I40E_GLPRT_BPTCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.tx_broadcast,
+ &nsd->eth.tx_broadcast);
- i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
- pf->stat_offsets_loaded,
- &osd->illegal_bytes, &nsd->illegal_bytes);
- ns->rx_errors = nsd->crc_errors
- + nsd->illegal_bytes;
+ i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_dropped_link_down,
+ &nsd->tx_dropped_link_down);
- i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
- pf->stat_offsets_loaded,
- &osd->mac_local_faults,
- &nsd->mac_local_faults);
- i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
- pf->stat_offsets_loaded,
- &osd->mac_remote_faults,
- &nsd->mac_remote_faults);
+ i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->crc_errors, &nsd->crc_errors);
- i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
- pf->stat_offsets_loaded,
- &osd->rx_length_errors,
- &nsd->rx_length_errors);
- ns->rx_length_errors = nsd->rx_length_errors;
+ i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->illegal_bytes, &nsd->illegal_bytes);
- i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
- pf->stat_offsets_loaded,
- &osd->link_xon_rx, &nsd->link_xon_rx);
- i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
- pf->stat_offsets_loaded,
- &osd->link_xon_tx, &nsd->link_xon_tx);
- i40e_update_prio_xoff_rx(pf); /* handles I40E_GLPRT_LXOFFRXC */
- i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
- pf->stat_offsets_loaded,
- &osd->link_xoff_tx, &nsd->link_xoff_tx);
-
- for (i = 0; i < 8; i++) {
- i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
- pf->stat_offsets_loaded,
- &osd->priority_xon_rx[i],
- &nsd->priority_xon_rx[i]);
- i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
- pf->stat_offsets_loaded,
- &osd->priority_xon_tx[i],
- &nsd->priority_xon_tx[i]);
- i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
- pf->stat_offsets_loaded,
- &osd->priority_xoff_tx[i],
- &nsd->priority_xoff_tx[i]);
- i40e_stat_update32(hw,
- I40E_GLPRT_RXON2OFFCNT(hw->port, i),
- pf->stat_offsets_loaded,
- &osd->priority_xon_2_xoff[i],
- &nsd->priority_xon_2_xoff[i]);
- }
+ i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->mac_local_faults,
+ &nsd->mac_local_faults);
+ i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->mac_remote_faults,
+ &nsd->mac_remote_faults);
- i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
- I40E_GLPRT_PRC64L(hw->port),
- pf->stat_offsets_loaded,
- &osd->rx_size_64, &nsd->rx_size_64);
- i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
- I40E_GLPRT_PRC127L(hw->port),
- pf->stat_offsets_loaded,
- &osd->rx_size_127, &nsd->rx_size_127);
- i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
- I40E_GLPRT_PRC255L(hw->port),
- pf->stat_offsets_loaded,
- &osd->rx_size_255, &nsd->rx_size_255);
- i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
- I40E_GLPRT_PRC511L(hw->port),
- pf->stat_offsets_loaded,
- &osd->rx_size_511, &nsd->rx_size_511);
- i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
- I40E_GLPRT_PRC1023L(hw->port),
- pf->stat_offsets_loaded,
- &osd->rx_size_1023, &nsd->rx_size_1023);
- i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
- I40E_GLPRT_PRC1522L(hw->port),
- pf->stat_offsets_loaded,
- &osd->rx_size_1522, &nsd->rx_size_1522);
- i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
- I40E_GLPRT_PRC9522L(hw->port),
- pf->stat_offsets_loaded,
- &osd->rx_size_big, &nsd->rx_size_big);
+ i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_length_errors,
+ &nsd->rx_length_errors);
- i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
- I40E_GLPRT_PTC64L(hw->port),
- pf->stat_offsets_loaded,
- &osd->tx_size_64, &nsd->tx_size_64);
- i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
- I40E_GLPRT_PTC127L(hw->port),
- pf->stat_offsets_loaded,
- &osd->tx_size_127, &nsd->tx_size_127);
- i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
- I40E_GLPRT_PTC255L(hw->port),
- pf->stat_offsets_loaded,
- &osd->tx_size_255, &nsd->tx_size_255);
- i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
- I40E_GLPRT_PTC511L(hw->port),
- pf->stat_offsets_loaded,
- &osd->tx_size_511, &nsd->tx_size_511);
- i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
- I40E_GLPRT_PTC1023L(hw->port),
- pf->stat_offsets_loaded,
- &osd->tx_size_1023, &nsd->tx_size_1023);
- i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
- I40E_GLPRT_PTC1522L(hw->port),
- pf->stat_offsets_loaded,
- &osd->tx_size_1522, &nsd->tx_size_1522);
- i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
- I40E_GLPRT_PTC9522L(hw->port),
- pf->stat_offsets_loaded,
- &osd->tx_size_big, &nsd->tx_size_big);
+ i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->link_xon_rx, &nsd->link_xon_rx);
+ i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->link_xon_tx, &nsd->link_xon_tx);
+ i40e_update_prio_xoff_rx(pf); /* handles I40E_GLPRT_LXOFFRXC */
+ i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->link_xoff_tx, &nsd->link_xoff_tx);
- i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
- pf->stat_offsets_loaded,
- &osd->rx_undersize, &nsd->rx_undersize);
- i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
+ for (i = 0; i < 8; i++) {
+ i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
pf->stat_offsets_loaded,
- &osd->rx_fragments, &nsd->rx_fragments);
- i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
+ &osd->priority_xon_rx[i],
+ &nsd->priority_xon_rx[i]);
+ i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
pf->stat_offsets_loaded,
- &osd->rx_oversize, &nsd->rx_oversize);
- i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
+ &osd->priority_xon_tx[i],
+ &nsd->priority_xon_tx[i]);
+ i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
pf->stat_offsets_loaded,
- &osd->rx_jabber, &nsd->rx_jabber);
-
- val = rd32(hw, I40E_PRTPM_EEE_STAT);
- nsd->tx_lpi_status =
- (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
- I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
- nsd->rx_lpi_status =
- (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
- I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
- i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
+ &osd->priority_xoff_tx[i],
+ &nsd->priority_xoff_tx[i]);
+ i40e_stat_update32(hw,
+ I40E_GLPRT_RXON2OFFCNT(hw->port, i),
pf->stat_offsets_loaded,
- &osd->tx_lpi_count, &nsd->tx_lpi_count);
- i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
- pf->stat_offsets_loaded,
- &osd->rx_lpi_count, &nsd->rx_lpi_count);
+ &osd->priority_xon_2_xoff[i],
+ &nsd->priority_xon_2_xoff[i]);
}
+ i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
+ I40E_GLPRT_PRC64L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_64, &nsd->rx_size_64);
+ i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
+ I40E_GLPRT_PRC127L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_127, &nsd->rx_size_127);
+ i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
+ I40E_GLPRT_PRC255L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_255, &nsd->rx_size_255);
+ i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
+ I40E_GLPRT_PRC511L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_511, &nsd->rx_size_511);
+ i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
+ I40E_GLPRT_PRC1023L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_1023, &nsd->rx_size_1023);
+ i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
+ I40E_GLPRT_PRC1522L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_1522, &nsd->rx_size_1522);
+ i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
+ I40E_GLPRT_PRC9522L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_big, &nsd->rx_size_big);
+
+ i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
+ I40E_GLPRT_PTC64L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_64, &nsd->tx_size_64);
+ i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
+ I40E_GLPRT_PTC127L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_127, &nsd->tx_size_127);
+ i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
+ I40E_GLPRT_PTC255L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_255, &nsd->tx_size_255);
+ i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
+ I40E_GLPRT_PTC511L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_511, &nsd->tx_size_511);
+ i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
+ I40E_GLPRT_PTC1023L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_1023, &nsd->tx_size_1023);
+ i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
+ I40E_GLPRT_PTC1522L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_1522, &nsd->tx_size_1522);
+ i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
+ I40E_GLPRT_PTC9522L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_big, &nsd->tx_size_big);
+
+ i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_undersize, &nsd->rx_undersize);
+ i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_fragments, &nsd->rx_fragments);
+ i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_oversize, &nsd->rx_oversize);
+ i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_jabber, &nsd->rx_jabber);
+
+ /* FDIR stats */
+ i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_atr_cnt_idx),
+ pf->stat_offsets_loaded,
+ &osd->fd_atr_match, &nsd->fd_atr_match);
+ i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_sb_cnt_idx),
+ pf->stat_offsets_loaded,
+ &osd->fd_sb_match, &nsd->fd_sb_match);
+
+ val = rd32(hw, I40E_PRTPM_EEE_STAT);
+ nsd->tx_lpi_status =
+ (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
+ I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
+ nsd->rx_lpi_status =
+ (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
+ I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
+ i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
+ pf->stat_offsets_loaded,
+ &osd->tx_lpi_count, &nsd->tx_lpi_count);
+ i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
+ pf->stat_offsets_loaded,
+ &osd->rx_lpi_count, &nsd->rx_lpi_count);
+
pf->stat_offsets_loaded = true;
}
/**
+ * i40e_update_stats - Update the various statistics counters.
+ * @vsi: the VSI to be updated
+ *
+ * Update the various stats for this VSI and its related entities.
+ **/
+void i40e_update_stats(struct i40e_vsi *vsi)
+{
+ struct i40e_pf *pf = vsi->back;
+
+ if (vsi == pf->vsi[pf->lan_vsi])
+ i40e_update_pf_stats(pf);
+
+ i40e_update_vsi_stats(vsi);
+}
+
+/**
* i40e_find_filter - Search VSI filter list for specific mac/vlan filter
* @vsi: the VSI to be searched
* @macaddr: the MAC address
@@ -1101,6 +1162,30 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
}
/**
+ * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
+ * @vsi: the PF Main VSI - inappropriate for any other VSI
+ * @macaddr: the MAC address
+ **/
+static void i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
+{
+ struct i40e_aqc_remove_macvlan_element_data element;
+ struct i40e_pf *pf = vsi->back;
+ i40e_status aq_ret;
+
+ /* Only appropriate for the PF main VSI */
+ if (vsi->type != I40E_VSI_MAIN)
+ return;
+
+ ether_addr_copy(element.mac_addr, macaddr);
+ element.vlan_tag = 0;
+ element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
+ I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
+ aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
+ if (aq_ret)
+ dev_err(&pf->pdev->dev, "Could not remove default MAC-VLAN\n");
+}
+
+/**
* i40e_add_filter - Add a mac/vlan filter to the VSI
* @vsi: the VSI to be searched
* @macaddr: the MAC address
@@ -1125,7 +1210,7 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
if (!f)
goto add_filter_out;
- memcpy(f->macaddr, macaddr, ETH_ALEN);
+ ether_addr_copy(f->macaddr, macaddr);
f->vlan = vlan;
f->changed = true;
@@ -1249,7 +1334,7 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
return -EADDRNOTAVAIL;
}
- memcpy(vsi->back->hw.mac.addr, addr->sa_data, netdev->addr_len);
+ ether_addr_copy(vsi->back->hw.mac.addr, addr->sa_data);
}
/* In order to be sure to not drop any packets, add the new address
@@ -1263,7 +1348,7 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, false, false);
i40e_sync_vsi_filters(vsi);
- memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+ ether_addr_copy(netdev->dev_addr, addr->sa_data);
return 0;
}
@@ -1313,7 +1398,7 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
vsi->tc_config.numtc = numtc;
vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
/* Number of queues per enabled TC */
- num_tc_qps = rounddown_pow_of_two(vsi->alloc_queue_pairs/numtc);
+ num_tc_qps = vsi->alloc_queue_pairs/numtc;
num_tc_qps = min_t(int, num_tc_qps, I40E_MAX_QUEUES_PER_TC);
/* Setup queue offset/count for all TCs for given VSI */
@@ -1520,8 +1605,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
cmd_flags = 0;
/* add to delete list */
- memcpy(del_list[num_del].mac_addr,
- f->macaddr, ETH_ALEN);
+ ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
del_list[num_del].vlan_tag =
cpu_to_le16((u16)(f->vlan ==
I40E_VLAN_ANY ? 0 : f->vlan));
@@ -1542,7 +1626,9 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
num_del = 0;
memset(del_list, 0, sizeof(*del_list));
- if (aq_ret)
+ if (aq_ret &&
+ pf->hw.aq.asq_last_status !=
+ I40E_AQ_RC_ENOENT)
dev_info(&pf->pdev->dev,
"ignoring delete macvlan error, err %d, aq_err %d while flushing a full buffer\n",
aq_ret,
@@ -1554,7 +1640,8 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
del_list, num_del, NULL);
num_del = 0;
- if (aq_ret)
+ if (aq_ret &&
+ pf->hw.aq.asq_last_status != I40E_AQ_RC_ENOENT)
dev_info(&pf->pdev->dev,
"ignoring delete macvlan error, err %d, aq_err %d\n",
aq_ret, pf->hw.aq.asq_last_status);
@@ -1583,8 +1670,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
cmd_flags = 0;
/* add to add array */
- memcpy(add_list[num_add].mac_addr,
- f->macaddr, ETH_ALEN);
+ ether_addr_copy(add_list[num_add].mac_addr, f->macaddr);
add_list[num_add].vlan_tag =
cpu_to_le16(
(u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan));
@@ -1681,7 +1767,7 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
return;
pf->flags &= ~I40E_FLAG_FILTER_SYNC;
- for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ for (v = 0; v < pf->num_alloc_vsi; v++) {
if (pf->vsi[v] &&
(pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED))
i40e_sync_vsi_filters(pf->vsi[v]);
@@ -1698,7 +1784,7 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
- int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
+ int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
struct i40e_vsi *vsi = np->vsi;
/* MTU < 68 is an error and causes problems on some kernels */
@@ -2312,6 +2398,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
rx_ctx.crcstrip = 1;
rx_ctx.l2tsel = 1;
rx_ctx.showiv = 1;
+ /* set the prefena field to 1 because the manual says to */
+ rx_ctx.prefena = 1;
/* clear the context in the HMC */
err = i40e_clear_lan_rx_queue_context(hw, pf_q);
@@ -2413,6 +2501,7 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
**/
static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
{
+ struct i40e_ring *tx_ring, *rx_ring;
u16 qoffset, qcount;
int i, n;
@@ -2426,8 +2515,8 @@ static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
qoffset = vsi->tc_config.tc_info[n].qoffset;
qcount = vsi->tc_config.tc_info[n].qcount;
for (i = qoffset; i < (qoffset + qcount); i++) {
- struct i40e_ring *rx_ring = vsi->rx_rings[i];
- struct i40e_ring *tx_ring = vsi->tx_rings[i];
+ rx_ring = vsi->rx_rings[i];
+ tx_ring = vsi->tx_rings[i];
rx_ring->dcb_tc = n;
tx_ring->dcb_tc = n;
}
@@ -2565,7 +2654,6 @@ static void i40e_enable_misc_int_causes(struct i40e_hw *hw)
I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
I40E_PFINT_ICR0_ENA_GPIO_MASK |
I40E_PFINT_ICR0_ENA_TIMESYNC_MASK |
- I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK |
I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
I40E_PFINT_ICR0_ENA_VFLR_MASK |
I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
@@ -2733,6 +2821,7 @@ static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
&q_vector->affinity_mask);
}
+ vsi->irqs_ready = true;
return 0;
free_queue_irqs:
@@ -3152,6 +3241,12 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
pf_q = vsi->base_queue;
for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
+
+ /* warn the TX unit of coming changes */
+ i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
+ if (!enable)
+ udelay(10);
+
for (j = 0; j < 50; j++) {
tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
@@ -3160,9 +3255,7 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
usleep_range(1000, 2000);
}
/* Skip if the queue is already in the requested state */
- if (enable && (tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
- continue;
- if (!enable && !(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
+ if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
continue;
/* turn on/off the queue */
@@ -3178,13 +3271,8 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
/* wait for the change to finish */
for (j = 0; j < 10; j++) {
tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
- if (enable) {
- if ((tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
- break;
- } else {
- if (!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
- break;
- }
+ if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
+ break;
udelay(10);
}
@@ -3223,15 +3311,9 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
usleep_range(1000, 2000);
}
- if (enable) {
- /* is STAT set ? */
- if ((rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
- continue;
- } else {
- /* is !STAT set ? */
- if (!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
- continue;
- }
+ /* Skip if the queue is already in the requested state */
+ if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
+ continue;
/* turn on/off the queue */
if (enable)
@@ -3244,13 +3326,8 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
for (j = 0; j < 10; j++) {
rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
- if (enable) {
- if ((rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
- break;
- } else {
- if (!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
- break;
- }
+ if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
+ break;
udelay(10);
}
@@ -3304,6 +3381,10 @@ static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
if (!vsi->q_vectors)
return;
+ if (!vsi->irqs_ready)
+ return;
+
+ vsi->irqs_ready = false;
for (i = 0; i < vsi->num_q_vectors; i++) {
u16 vector = i + base;
@@ -3476,7 +3557,7 @@ static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
int i;
i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
- for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+ for (i = 0; i < pf->num_alloc_vsi; i++)
if (pf->vsi[i])
i40e_vsi_free_q_vectors(pf->vsi[i]);
i40e_reset_interrupt_capability(pf);
@@ -3513,6 +3594,19 @@ static void i40e_napi_disable_all(struct i40e_vsi *vsi)
}
/**
+ * i40e_vsi_close - Shut down a VSI
+ * @vsi: the vsi to be quelled
+ **/
+static void i40e_vsi_close(struct i40e_vsi *vsi)
+{
+ if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
+ i40e_down(vsi);
+ i40e_vsi_free_irq(vsi);
+ i40e_vsi_free_tx_resources(vsi);
+ i40e_vsi_free_rx_resources(vsi);
+}
+
+/**
* i40e_quiesce_vsi - Pause a given VSI
* @vsi: the VSI being paused
**/
@@ -3525,8 +3619,7 @@ static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
if (vsi->netdev && netif_running(vsi->netdev)) {
vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
} else {
- set_bit(__I40E_DOWN, &vsi->state);
- i40e_down(vsi);
+ i40e_vsi_close(vsi);
}
}
@@ -3543,7 +3636,7 @@ static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
if (vsi->netdev && netif_running(vsi->netdev))
vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
else
- i40e_up(vsi); /* this clears the DOWN bit */
+ i40e_vsi_open(vsi); /* this clears the DOWN bit */
}
/**
@@ -3554,7 +3647,7 @@ static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
{
int v;
- for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ for (v = 0; v < pf->num_alloc_vsi; v++) {
if (pf->vsi[v])
i40e_quiesce_vsi(pf->vsi[v]);
}
@@ -3568,7 +3661,7 @@ static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
{
int v;
- for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ for (v = 0; v < pf->num_alloc_vsi; v++) {
if (pf->vsi[v])
i40e_unquiesce_vsi(pf->vsi[v]);
}
@@ -4009,7 +4102,7 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf)
}
/* Update each VSI */
- for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ for (v = 0; v < pf->num_alloc_vsi; v++) {
if (!pf->vsi[v])
continue;
@@ -4028,6 +4121,8 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf)
pf->vsi[v]->seid);
/* Will try to configure as many components */
} else {
+ /* Re-configure VSI vectors based on updated TC map */
+ i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
if (pf->vsi[v]->netdev)
i40e_dcbnl_set_all(pf->vsi[v]);
}
@@ -4065,14 +4160,69 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
/* When status is not DISABLED then DCBX in FW */
pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
DCB_CAP_DCBX_VER_IEEE;
- pf->flags |= I40E_FLAG_DCB_ENABLED;
+
+ pf->flags |= I40E_FLAG_DCB_CAPABLE;
+ /* Enable DCB tagging only when more than one TC */
+ if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
+ pf->flags |= I40E_FLAG_DCB_ENABLED;
}
+ } else {
+ dev_info(&pf->pdev->dev, "AQ Querying DCB configuration failed: %d\n",
+ pf->hw.aq.asq_last_status);
}
out:
return err;
}
#endif /* CONFIG_I40E_DCB */
+#define SPEED_SIZE 14
+#define FC_SIZE 8
+/**
+ * i40e_print_link_message - print link up or down
+ * @vsi: the VSI for which link needs a message
+ */
+static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
+{
+ char speed[SPEED_SIZE] = "Unknown";
+ char fc[FC_SIZE] = "RX/TX";
+
+ if (!isup) {
+ netdev_info(vsi->netdev, "NIC Link is Down\n");
+ return;
+ }
+
+ switch (vsi->back->hw.phy.link_info.link_speed) {
+ case I40E_LINK_SPEED_40GB:
+ strncpy(speed, "40 Gbps", SPEED_SIZE);
+ break;
+ case I40E_LINK_SPEED_10GB:
+ strncpy(speed, "10 Gbps", SPEED_SIZE);
+ break;
+ case I40E_LINK_SPEED_1GB:
+ strncpy(speed, "1000 Mbps", SPEED_SIZE);
+ break;
+ default:
+ break;
+ }
+
+ switch (vsi->back->hw.fc.current_mode) {
+ case I40E_FC_FULL:
+ strncpy(fc, "RX/TX", FC_SIZE);
+ break;
+ case I40E_FC_TX_PAUSE:
+ strncpy(fc, "TX", FC_SIZE);
+ break;
+ case I40E_FC_RX_PAUSE:
+ strncpy(fc, "RX", FC_SIZE);
+ break;
+ default:
+ strncpy(fc, "None", FC_SIZE);
+ break;
+ }
+
+ netdev_info(vsi->netdev, "NIC Link is Up %s Full Duplex, Flow Control: %s\n",
+ speed, fc);
+}
/**
* i40e_up_complete - Finish the last steps of bringing up a connection
@@ -4099,11 +4249,11 @@ static int i40e_up_complete(struct i40e_vsi *vsi)
if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
(vsi->netdev)) {
- netdev_info(vsi->netdev, "NIC Link is Up\n");
+ i40e_print_link_message(vsi, true);
netif_tx_start_all_queues(vsi->netdev);
netif_carrier_on(vsi->netdev);
} else if (vsi->netdev) {
- netdev_info(vsi->netdev, "NIC Link is Down\n");
+ i40e_print_link_message(vsi, false);
}
/* replay FDIR SB filters */
@@ -4309,24 +4459,32 @@ int i40e_vsi_open(struct i40e_vsi *vsi)
if (err)
goto err_setup_rx;
- if (!vsi->netdev) {
- err = EINVAL;
- goto err_setup_rx;
- }
- snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
- dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
- err = i40e_vsi_request_irq(vsi, int_name);
- if (err)
- goto err_setup_rx;
+ if (vsi->netdev) {
+ snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
+ dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
+ err = i40e_vsi_request_irq(vsi, int_name);
+ if (err)
+ goto err_setup_rx;
- /* Notify the stack of the actual queue counts. */
- err = netif_set_real_num_tx_queues(vsi->netdev, vsi->num_queue_pairs);
- if (err)
- goto err_set_queues;
+ /* Notify the stack of the actual queue counts. */
+ err = netif_set_real_num_tx_queues(vsi->netdev,
+ vsi->num_queue_pairs);
+ if (err)
+ goto err_set_queues;
- err = netif_set_real_num_rx_queues(vsi->netdev, vsi->num_queue_pairs);
- if (err)
- goto err_set_queues;
+ err = netif_set_real_num_rx_queues(vsi->netdev,
+ vsi->num_queue_pairs);
+ if (err)
+ goto err_set_queues;
+
+ } else if (vsi->type == I40E_VSI_FDIR) {
+ snprintf(int_name, sizeof(int_name) - 1, "%s-fdir",
+ dev_driver_string(&pf->pdev->dev));
+ err = i40e_vsi_request_irq(vsi, int_name);
+ } else {
+ err = -EINVAL;
+ goto err_setup_rx;
+ }
err = i40e_up_complete(vsi);
if (err)
@@ -4383,14 +4541,7 @@ static int i40e_close(struct net_device *netdev)
struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi;
- if (test_and_set_bit(__I40E_DOWN, &vsi->state))
- return 0;
-
- i40e_down(vsi);
- i40e_vsi_free_irq(vsi);
-
- i40e_vsi_free_tx_resources(vsi);
- i40e_vsi_free_rx_resources(vsi);
+ i40e_vsi_close(vsi);
return 0;
}
@@ -4410,6 +4561,9 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
WARN_ON(in_interrupt());
+ if (i40e_check_asq_alive(&pf->hw))
+ i40e_vc_notify_reset(pf);
+
/* do the biggest reset indicated */
if (reset_flags & (1 << __I40E_GLOBAL_RESET_REQUESTED)) {
@@ -4475,7 +4629,7 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
/* Find the VSI(s) that requested a re-init */
dev_info(&pf->pdev->dev,
"VSI reinit requested\n");
- for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ for (v = 0; v < pf->num_alloc_vsi; v++) {
struct i40e_vsi *vsi = pf->vsi[v];
if (vsi != NULL &&
test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) {
@@ -4565,6 +4719,10 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
int ret = 0;
u8 type;
+ /* Not DCB capable or capability disabled */
+ if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
+ return ret;
+
/* Ignore if event is not for Nearest Bridge */
type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
& I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
@@ -4606,6 +4764,12 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
if (!need_reconfig)
goto exit;
+ /* Enable DCB tagging only when more than one TC */
+ if (i40e_dcb_get_num_tc(dcbx_cfg) > 1)
+ pf->flags |= I40E_FLAG_DCB_ENABLED;
+ else
+ pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+
/* Reconfiguration needed quiesce all VSIs */
i40e_pf_quiesce_all_vsi(pf);
@@ -4709,8 +4873,7 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
(pf->flags & I40E_FLAG_FD_SB_ENABLED))
return;
fcnt_prog = i40e_get_current_fd_count(pf);
- fcnt_avail = pf->hw.fdir_shared_filter_count +
- pf->fdir_pf_filter_count;
+ fcnt_avail = i40e_get_fd_cnt_all(pf);
if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) {
if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
@@ -4803,7 +4966,7 @@ static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
i40e_veb_link_event(pf->veb[i], link_up);
/* ... now the local VSIs */
- for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+ for (i = 0; i < pf->num_alloc_vsi; i++)
if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
i40e_vsi_link_event(pf->vsi[i], link_up);
}
@@ -4821,10 +4984,8 @@ static void i40e_link_event(struct i40e_pf *pf)
if (new_link == old_link)
return;
-
if (!test_bit(__I40E_DOWN, &pf->vsi[pf->lan_vsi]->state))
- netdev_info(pf->vsi[pf->lan_vsi]->netdev,
- "NIC Link is %s\n", (new_link ? "Up" : "Down"));
+ i40e_print_link_message(pf->vsi[pf->lan_vsi], new_link);
/* Notify the base of the switch tree connected to
* the link. Floating VEBs are not notified.
@@ -4862,7 +5023,7 @@ static void i40e_check_hang_subtask(struct i40e_pf *pf)
* for each q_vector
* force an interrupt
*/
- for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ for (v = 0; v < pf->num_alloc_vsi; v++) {
struct i40e_vsi *vsi = pf->vsi[v];
int armed = 0;
@@ -4912,7 +5073,7 @@ static void i40e_watchdog_subtask(struct i40e_pf *pf)
/* Update the stats for active netdevs so the network stack
* can look at updated numbers whenever it cares to
*/
- for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+ for (i = 0; i < pf->num_alloc_vsi; i++)
if (pf->vsi[i] && pf->vsi[i]->netdev)
i40e_update_stats(pf->vsi[i]);
@@ -5018,11 +5179,47 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
u16 pending, i = 0;
i40e_status ret;
u16 opcode;
+ u32 oldval;
u32 val;
if (!test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state))
return;
+ /* check for error indications */
+ val = rd32(&pf->hw, pf->hw.aq.arq.len);
+ oldval = val;
+ if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
+ dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
+ val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
+ }
+ if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
+ dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
+ val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
+ }
+ if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
+ dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
+ val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
+ }
+ if (oldval != val)
+ wr32(&pf->hw, pf->hw.aq.arq.len, val);
+
+ val = rd32(&pf->hw, pf->hw.aq.asq.len);
+ oldval = val;
+ if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
+ dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
+ val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
+ }
+ if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
+ dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
+ val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
+ }
+ if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
+ dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
+ val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
+ }
+ if (oldval != val)
+ wr32(&pf->hw, pf->hw.aq.asq.len, val);
+
event.msg_size = I40E_MAX_AQ_BUF_SIZE;
event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
if (!event.msg_buf)
@@ -5128,7 +5325,7 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb)
int ret;
/* build VSI that owns this VEB, temporarily attached to base VEB */
- for (v = 0; v < pf->hw.func_caps.num_vsis && !ctl_vsi; v++) {
+ for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
if (pf->vsi[v] &&
pf->vsi[v]->veb_idx == veb->idx &&
pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
@@ -5158,7 +5355,7 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb)
goto end_reconstitute;
/* create the remaining VSIs attached to this VEB */
- for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ for (v = 0; v < pf->num_alloc_vsi; v++) {
if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
continue;
@@ -5226,9 +5423,6 @@ static int i40e_get_capabilities(struct i40e_pf *pf)
}
} while (err);
- /* increment MSI-X count because current FW skips one */
- pf->hw.func_caps.num_msix_vectors++;
-
if (((pf->hw.aq.fw_maj_ver == 2) && (pf->hw.aq.fw_min_ver < 22)) ||
(pf->hw.aq.fw_maj_ver < 2)) {
pf->hw.func_caps.num_msix_vectors++;
@@ -5267,15 +5461,14 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi);
static void i40e_fdir_sb_setup(struct i40e_pf *pf)
{
struct i40e_vsi *vsi;
- bool new_vsi = false;
- int err, i;
+ int i;
if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
return;
/* find existing VSI and see if it needs configuring */
vsi = NULL;
- for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
vsi = pf->vsi[i];
break;
@@ -5288,47 +5481,12 @@ static void i40e_fdir_sb_setup(struct i40e_pf *pf)
pf->vsi[pf->lan_vsi]->seid, 0);
if (!vsi) {
dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
- goto err_vsi;
+ pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+ return;
}
- new_vsi = true;
- }
- i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
-
- err = i40e_vsi_setup_tx_resources(vsi);
- if (err)
- goto err_setup_tx;
- err = i40e_vsi_setup_rx_resources(vsi);
- if (err)
- goto err_setup_rx;
-
- if (new_vsi) {
- char int_name[IFNAMSIZ + 9];
- err = i40e_vsi_configure(vsi);
- if (err)
- goto err_setup_rx;
- snprintf(int_name, sizeof(int_name) - 1, "%s-fdir",
- dev_driver_string(&pf->pdev->dev));
- err = i40e_vsi_request_irq(vsi, int_name);
- if (err)
- goto err_setup_rx;
- err = i40e_up_complete(vsi);
- if (err)
- goto err_up_complete;
- clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
}
- return;
-
-err_up_complete:
- i40e_down(vsi);
- i40e_vsi_free_irq(vsi);
-err_setup_rx:
- i40e_vsi_free_rx_resources(vsi);
-err_setup_tx:
- i40e_vsi_free_tx_resources(vsi);
-err_vsi:
- pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
- i40e_vsi_clear(vsi);
+ i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
}
/**
@@ -5340,7 +5498,7 @@ static void i40e_fdir_teardown(struct i40e_pf *pf)
int i;
i40e_fdir_filter_exit(pf);
- for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
i40e_vsi_release(pf->vsi[i]);
break;
@@ -5357,7 +5515,7 @@ static void i40e_fdir_teardown(struct i40e_pf *pf)
static int i40e_prep_for_reset(struct i40e_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
- i40e_status ret;
+ i40e_status ret = 0;
u32 v;
clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
@@ -5366,13 +5524,10 @@ static int i40e_prep_for_reset(struct i40e_pf *pf)
dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
- if (i40e_check_asq_alive(hw))
- i40e_vc_notify_reset(pf);
-
/* quiesce the VSIs and their queues that are not already DOWN */
i40e_pf_quiesce_all_vsi(pf);
- for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
+ for (v = 0; v < pf->num_alloc_vsi; v++) {
if (pf->vsi[v])
pf->vsi[v]->seid = 0;
}
@@ -5380,22 +5535,40 @@ static int i40e_prep_for_reset(struct i40e_pf *pf)
i40e_shutdown_adminq(&pf->hw);
/* call shutdown HMC */
- ret = i40e_shutdown_lan_hmc(hw);
- if (ret) {
- dev_info(&pf->pdev->dev, "shutdown_lan_hmc failed: %d\n", ret);
- clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
+ if (hw->hmc.hmc_obj) {
+ ret = i40e_shutdown_lan_hmc(hw);
+ if (ret) {
+ dev_warn(&pf->pdev->dev,
+ "shutdown_lan_hmc failed: %d\n", ret);
+ clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
+ }
}
return ret;
}
/**
+ * i40e_send_version - update firmware with driver version
+ * @pf: PF struct
+ */
+static void i40e_send_version(struct i40e_pf *pf)
+{
+ struct i40e_driver_version dv;
+
+ dv.major_version = DRV_VERSION_MAJOR;
+ dv.minor_version = DRV_VERSION_MINOR;
+ dv.build_version = DRV_VERSION_BUILD;
+ dv.subbuild_version = 0;
+ strncpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
+ i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
+}
+
+/**
* i40e_reset_and_rebuild - reset and rebuild using a saved config
* @pf: board private structure
* @reinit: if the Main VSI needs to re-initialized.
**/
static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
{
- struct i40e_driver_version dv;
struct i40e_hw *hw = &pf->hw;
i40e_status ret;
u32 v;
@@ -5405,8 +5578,10 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
* because the reset will make them disappear.
*/
ret = i40e_pf_reset(hw);
- if (ret)
+ if (ret) {
dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
+ goto end_core_reset;
+ }
pf->pfr_count++;
if (test_bit(__I40E_DOWN, &pf->state))
@@ -5426,6 +5601,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
i40e_verify_eeprom(pf);
}
+ i40e_clear_pxe_mode(hw);
ret = i40e_get_capabilities(pf);
if (ret) {
dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n",
@@ -5526,13 +5702,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
}
/* tell the firmware that we're starting */
- dv.major_version = DRV_VERSION_MAJOR;
- dv.minor_version = DRV_VERSION_MINOR;
- dv.build_version = DRV_VERSION_BUILD;
- dv.subbuild_version = 0;
- i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
-
- dev_info(&pf->pdev->dev, "reset complete\n");
+ i40e_send_version(pf);
end_core_reset:
clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
@@ -5642,7 +5812,6 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
**/
static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
{
- const int vxlan_hdr_qwords = 4;
struct i40e_hw *hw = &pf->hw;
i40e_status ret;
u8 filter_index;
@@ -5660,7 +5829,6 @@ static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
port = pf->vxlan_ports[i];
ret = port ?
i40e_aq_add_udp_tunnel(hw, ntohs(port),
- vxlan_hdr_qwords,
I40E_AQC_TUNNEL_TYPE_VXLAN,
&filter_index, NULL)
: i40e_aq_del_udp_tunnel(hw, i, NULL);
@@ -5839,15 +6007,15 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
* find next empty vsi slot, looping back around if necessary
*/
i = pf->next_vsi;
- while (i < pf->hw.func_caps.num_vsis && pf->vsi[i])
+ while (i < pf->num_alloc_vsi && pf->vsi[i])
i++;
- if (i >= pf->hw.func_caps.num_vsis) {
+ if (i >= pf->num_alloc_vsi) {
i = 0;
while (i < pf->next_vsi && pf->vsi[i])
i++;
}
- if (i < pf->hw.func_caps.num_vsis && !pf->vsi[i]) {
+ if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
vsi_idx = i; /* Found one! */
} else {
ret = -ENODEV;
@@ -5870,6 +6038,7 @@ static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
vsi->netdev_registered = false;
vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
INIT_LIST_HEAD(&vsi->mac_filter_list);
+ vsi->irqs_ready = false;
ret = i40e_set_num_rings_in_vsi(vsi);
if (ret)
@@ -5987,14 +6156,12 @@ static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
**/
static int i40e_alloc_rings(struct i40e_vsi *vsi)
{
+ struct i40e_ring *tx_ring, *rx_ring;
struct i40e_pf *pf = vsi->back;
int i;
/* Set basic values in the rings to be used later during open() */
for (i = 0; i < vsi->alloc_queue_pairs; i++) {
- struct i40e_ring *tx_ring;
- struct i40e_ring *rx_ring;
-
/* allocate space for both Tx and Rx in one shot */
tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
if (!tx_ring)
@@ -6052,8 +6219,6 @@ static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
vectors = 0;
}
- pf->num_msix_entries = vectors;
-
return vectors;
}
@@ -6107,6 +6272,16 @@ static int i40e_init_msix(struct i40e_pf *pf)
for (i = 0; i < v_budget; i++)
pf->msix_entries[i].entry = i;
vec = i40e_reserve_msix_vectors(pf, v_budget);
+
+ if (vec != v_budget) {
+ /* If we have limited resources, we will start with no vectors
+ * for the special features and then allocate vectors to some
+ * of these features based on the policy and at the end disable
+ * the features that did not get any vectors.
+ */
+ pf->num_vmdq_msix = 0;
+ }
+
if (vec < I40E_MIN_MSIX) {
pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
kfree(pf->msix_entries);
@@ -6115,27 +6290,25 @@ static int i40e_init_msix(struct i40e_pf *pf)
} else if (vec == I40E_MIN_MSIX) {
/* Adjust for minimal MSIX use */
- dev_info(&pf->pdev->dev, "Features disabled, not enough MSI-X vectors\n");
- pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
pf->num_vmdq_vsis = 0;
pf->num_vmdq_qps = 0;
- pf->num_vmdq_msix = 0;
pf->num_lan_qps = 1;
pf->num_lan_msix = 1;
} else if (vec != v_budget) {
+ /* reserve the misc vector */
+ vec--;
+
/* Scale vector usage down */
pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */
- vec--; /* reserve the misc vector */
+ pf->num_vmdq_vsis = 1;
/* partition out the remaining vectors */
switch (vec) {
case 2:
- pf->num_vmdq_vsis = 1;
pf->num_lan_msix = 1;
break;
case 3:
- pf->num_vmdq_vsis = 1;
pf->num_lan_msix = 2;
break;
default:
@@ -6147,6 +6320,11 @@ static int i40e_init_msix(struct i40e_pf *pf)
}
}
+ if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
+ (pf->num_vmdq_msix == 0)) {
+ dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
+ pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
+ }
return err;
}
@@ -6171,7 +6349,7 @@ static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
if (vsi->netdev)
netif_napi_add(vsi->netdev, &q_vector->napi,
- i40e_napi_poll, vsi->work_limit);
+ i40e_napi_poll, NAPI_POLL_WEIGHT);
q_vector->rx.latency_range = I40E_LOW_LATENCY;
q_vector->tx.latency_range = I40E_LOW_LATENCY;
@@ -6231,7 +6409,7 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
if (err) {
pf->flags &= ~(I40E_FLAG_MSIX_ENABLED |
I40E_FLAG_RSS_ENABLED |
- I40E_FLAG_DCB_ENABLED |
+ I40E_FLAG_DCB_CAPABLE |
I40E_FLAG_SRIOV_ENABLED |
I40E_FLAG_FD_SB_ENABLED |
I40E_FLAG_FD_ATR_ENABLED |
@@ -6364,7 +6542,6 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
return 0;
queue_count = min_t(int, queue_count, pf->rss_size_max);
- queue_count = rounddown_pow_of_two(queue_count);
if (queue_count != pf->rss_size) {
i40e_prep_for_reset(pf);
@@ -6407,6 +6584,10 @@ static int i40e_sw_init(struct i40e_pf *pf)
I40E_FLAG_MSIX_ENABLED |
I40E_FLAG_RX_1BUF_ENABLED;
+ /* Set default ITR */
+ pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF;
+ pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF;
+
/* Depending on PF configurations, it is possible that the RSS
* maximum might end up larger than the available queues
*/
@@ -6416,7 +6597,6 @@ static int i40e_sw_init(struct i40e_pf *pf)
if (pf->hw.func_caps.rss) {
pf->flags |= I40E_FLAG_RSS_ENABLED;
pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus());
- pf->rss_size = rounddown_pow_of_two(pf->rss_size);
} else {
pf->rss_size = 1;
}
@@ -6432,8 +6612,12 @@ static int i40e_sw_init(struct i40e_pf *pf)
(pf->hw.func_caps.fd_filters_best_effort > 0)) {
pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
+ /* Setup a counter for fd_atr per pf */
+ pf->fd_atr_cnt_idx = I40E_FD_ATR_STAT_IDX(pf->hw.pf_id);
if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) {
pf->flags |= I40E_FLAG_FD_SB_ENABLED;
+ /* Setup a counter for fd_sb per pf */
+ pf->fd_sb_cnt_idx = I40E_FD_SB_STAT_IDX(pf->hw.pf_id);
} else {
dev_info(&pf->pdev->dev,
"Flow Director Sideband mode Disabled in MFP mode\n");
@@ -6649,6 +6833,96 @@ static void i40e_del_vxlan_port(struct net_device *netdev,
}
#endif
+#ifdef HAVE_FDB_OPS
+#ifdef USE_CONST_DEV_UC_CHAR
+static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
+ struct net_device *dev,
+ const unsigned char *addr,
+ u16 flags)
+#else
+static int i40e_ndo_fdb_add(struct ndmsg *ndm,
+ struct net_device *dev,
+ unsigned char *addr,
+ u16 flags)
+#endif
+{
+ struct i40e_netdev_priv *np = netdev_priv(dev);
+ struct i40e_pf *pf = np->vsi->back;
+ int err = 0;
+
+ if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
+ return -EOPNOTSUPP;
+
+ /* Hardware does not support aging addresses so if a
+ * ndm_state is given only allow permanent addresses
+ */
+ if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
+ netdev_info(dev, "FDB only supports static addresses\n");
+ return -EINVAL;
+ }
+
+ if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
+ err = dev_uc_add_excl(dev, addr);
+ else if (is_multicast_ether_addr(addr))
+ err = dev_mc_add_excl(dev, addr);
+ else
+ err = -EINVAL;
+
+ /* Only return duplicate errors if NLM_F_EXCL is set */
+ if (err == -EEXIST && !(flags & NLM_F_EXCL))
+ err = 0;
+
+ return err;
+}
+
+#ifndef USE_DEFAULT_FDB_DEL_DUMP
+#ifdef USE_CONST_DEV_UC_CHAR
+static int i40e_ndo_fdb_del(struct ndmsg *ndm,
+ struct net_device *dev,
+ const unsigned char *addr)
+#else
+static int i40e_ndo_fdb_del(struct ndmsg *ndm,
+ struct net_device *dev,
+ unsigned char *addr)
+#endif
+{
+ struct i40e_netdev_priv *np = netdev_priv(dev);
+ struct i40e_pf *pf = np->vsi->back;
+ int err = -EOPNOTSUPP;
+
+ if (ndm->ndm_state & NUD_PERMANENT) {
+ netdev_info(dev, "FDB only supports static addresses\n");
+ return -EINVAL;
+ }
+
+ if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
+ if (is_unicast_ether_addr(addr))
+ err = dev_uc_del(dev, addr);
+ else if (is_multicast_ether_addr(addr))
+ err = dev_mc_del(dev, addr);
+ else
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static int i40e_ndo_fdb_dump(struct sk_buff *skb,
+ struct netlink_callback *cb,
+ struct net_device *dev,
+ int idx)
+{
+ struct i40e_netdev_priv *np = netdev_priv(dev);
+ struct i40e_pf *pf = np->vsi->back;
+
+ if (pf->flags & I40E_FLAG_SRIOV_ENABLED)
+ idx = ndo_dflt_fdb_dump(skb, cb, dev, idx);
+
+ return idx;
+}
+
+#endif /* USE_DEFAULT_FDB_DEL_DUMP */
+#endif /* HAVE_FDB_OPS */
static const struct net_device_ops i40e_netdev_ops = {
.ndo_open = i40e_open,
.ndo_stop = i40e_close,
@@ -6669,13 +6943,21 @@ static const struct net_device_ops i40e_netdev_ops = {
.ndo_set_features = i40e_set_features,
.ndo_set_vf_mac = i40e_ndo_set_vf_mac,
.ndo_set_vf_vlan = i40e_ndo_set_vf_port_vlan,
- .ndo_set_vf_tx_rate = i40e_ndo_set_vf_bw,
+ .ndo_set_vf_rate = i40e_ndo_set_vf_bw,
.ndo_get_vf_config = i40e_ndo_get_vf_config,
.ndo_set_vf_link_state = i40e_ndo_set_vf_link_state,
+ .ndo_set_vf_spoofchk = i40e_ndo_set_vf_spoofck,
#ifdef CONFIG_I40E_VXLAN
.ndo_add_vxlan_port = i40e_add_vxlan_port,
.ndo_del_vxlan_port = i40e_del_vxlan_port,
#endif
+#ifdef HAVE_FDB_OPS
+ .ndo_fdb_add = i40e_ndo_fdb_add,
+#ifndef USE_DEFAULT_FDB_DEL_DUMP
+ .ndo_fdb_del = i40e_ndo_fdb_del,
+ .ndo_fdb_dump = i40e_ndo_fdb_dump,
+#endif
+#endif
};
/**
@@ -6720,16 +7002,26 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
NETIF_F_TSO_ECN |
NETIF_F_TSO6 |
NETIF_F_RXCSUM |
- NETIF_F_NTUPLE |
NETIF_F_RXHASH |
0;
+ if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
+ netdev->features |= NETIF_F_NTUPLE;
+
/* copy netdev features into list of user selectable features */
netdev->hw_features |= netdev->features;
if (vsi->type == I40E_VSI_MAIN) {
SET_NETDEV_DEV(netdev, &pf->pdev->dev);
- memcpy(mac_addr, hw->mac.perm_addr, ETH_ALEN);
+ ether_addr_copy(mac_addr, hw->mac.perm_addr);
+ /* The following two steps are necessary to prevent reception
+ * of tagged packets - by default the NVM loads a MAC-VLAN
+ * filter that will accept any tagged packet. This is to
+ * prevent that during normal operations until a specific
+ * VLAN tag filter has been set.
+ */
+ i40e_rm_default_mac_filter(vsi, mac_addr);
+ i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, true);
} else {
/* relate the VSI_VMDQ name to the VSI_MAIN name */
snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
@@ -6739,8 +7031,8 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
}
i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false);
- memcpy(netdev->dev_addr, mac_addr, ETH_ALEN);
- memcpy(netdev->perm_addr, mac_addr, ETH_ALEN);
+ ether_addr_copy(netdev->dev_addr, mac_addr);
+ ether_addr_copy(netdev->perm_addr, mac_addr);
/* vlan gets same features (except vlan offload)
* after any tweaks for specific VSI types
*/
@@ -6772,7 +7064,6 @@ static void i40e_vsi_delete(struct i40e_vsi *vsi)
return;
i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
- return;
}
/**
@@ -6898,6 +7189,13 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
+ if (pf->vf[vsi->vf_id].spoofchk) {
+ ctxt.info.valid_sections |=
+ cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
+ ctxt.info.sec_flags |=
+ (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
+ I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
+ }
/* Setup the VSI tx/rx queue map for TC0 only for now */
i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
break;
@@ -6982,11 +7280,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
unregister_netdev(vsi->netdev);
}
} else {
- if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
- i40e_down(vsi);
- i40e_vsi_free_irq(vsi);
- i40e_vsi_free_tx_resources(vsi);
- i40e_vsi_free_rx_resources(vsi);
+ i40e_vsi_close(vsi);
}
i40e_vsi_disable_irq(vsi);
}
@@ -7013,7 +7307,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
* the orphan VEBs yet. We'll wait for an explicit remove request
* from up the network stack.
*/
- for (n = 0, i = 0; i < pf->hw.func_caps.num_vsis; i++) {
+ for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
if (pf->vsi[i] &&
pf->vsi[i]->uplink_seid == uplink_seid &&
(pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
@@ -7192,7 +7486,7 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
if (!veb && uplink_seid != pf->mac_seid) {
- for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
vsi = pf->vsi[i];
break;
@@ -7435,7 +7729,7 @@ static void i40e_switch_branch_release(struct i40e_veb *branch)
* NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
* the VEB itself, so don't use (*branch) after this loop.
*/
- for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
if (!pf->vsi[i])
continue;
if (pf->vsi[i]->uplink_seid == branch_seid &&
@@ -7487,7 +7781,7 @@ void i40e_veb_release(struct i40e_veb *veb)
pf = veb->pf;
/* find the remaining VSI and check for extras */
- for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
n++;
vsi = pf->vsi[i];
@@ -7516,8 +7810,6 @@ void i40e_veb_release(struct i40e_veb *veb)
i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
i40e_veb_clear(veb);
-
- return;
}
/**
@@ -7601,10 +7893,10 @@ struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
}
/* make sure there is such a vsi and uplink */
- for (vsi_idx = 0; vsi_idx < pf->hw.func_caps.num_vsis; vsi_idx++)
+ for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
break;
- if (vsi_idx >= pf->hw.func_caps.num_vsis && vsi_seid != 0) {
+ if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) {
dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
vsi_seid);
return NULL;
@@ -7639,6 +7931,8 @@ struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
if (ret)
goto err_veb;
+ if (vsi_idx == pf->lan_vsi)
+ pf->lan_veb = veb->idx;
return veb;
@@ -7774,15 +8068,6 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
"header: %d reported %d total\n",
num_reported, num_total);
- if (num_reported) {
- int sz = sizeof(*sw_config) * num_reported;
-
- kfree(pf->sw_config);
- pf->sw_config = kzalloc(sz, GFP_KERNEL);
- if (pf->sw_config)
- memcpy(pf->sw_config, sw_config, sz);
- }
-
for (i = 0; i < num_reported; i++) {
struct i40e_aqc_switch_config_element_resp *ele =
&sw_config->element[i];
@@ -7949,9 +8234,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
queues_left = pf->hw.func_caps.num_tx_qp;
if ((queues_left == 1) ||
- !(pf->flags & I40E_FLAG_MSIX_ENABLED) ||
- !(pf->flags & (I40E_FLAG_RSS_ENABLED | I40E_FLAG_FD_SB_ENABLED |
- I40E_FLAG_DCB_ENABLED))) {
+ !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
/* one qp for PF, no queues for anything else */
queues_left = 0;
pf->rss_size = pf->num_lan_qps = 1;
@@ -7960,14 +8243,27 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
I40E_FLAG_FD_SB_ENABLED |
I40E_FLAG_FD_ATR_ENABLED |
- I40E_FLAG_DCB_ENABLED |
+ I40E_FLAG_DCB_CAPABLE |
I40E_FLAG_SRIOV_ENABLED |
I40E_FLAG_VMDQ_ENABLED);
+ } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
+ I40E_FLAG_FD_SB_ENABLED |
+ I40E_FLAG_FD_ATR_ENABLED |
+ I40E_FLAG_DCB_CAPABLE))) {
+ /* one qp for PF */
+ pf->rss_size = pf->num_lan_qps = 1;
+ queues_left -= pf->num_lan_qps;
+
+ pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
+ I40E_FLAG_FD_SB_ENABLED |
+ I40E_FLAG_FD_ATR_ENABLED |
+ I40E_FLAG_DCB_ENABLED |
+ I40E_FLAG_VMDQ_ENABLED);
} else {
/* Not enough queues for all TCs */
- if ((pf->flags & I40E_FLAG_DCB_ENABLED) &&
+ if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
(queues_left < I40E_MAX_TRAFFIC_CLASS)) {
- pf->flags &= ~I40E_FLAG_DCB_ENABLED;
+ pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
}
pf->num_lan_qps = pf->rss_size_max;
@@ -7998,7 +8294,6 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
}
pf->queues_left = queues_left;
- return;
}
/**
@@ -8055,12 +8350,13 @@ static void i40e_print_features(struct i40e_pf *pf)
if (pf->flags & I40E_FLAG_RSS_ENABLED)
buf += sprintf(buf, "RSS ");
- buf += sprintf(buf, "FDir ");
if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
- buf += sprintf(buf, "ATR ");
- if (pf->flags & I40E_FLAG_FD_SB_ENABLED)
+ buf += sprintf(buf, "FD_ATR ");
+ if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
+ buf += sprintf(buf, "FD_SB ");
buf += sprintf(buf, "NTUPLE ");
- if (pf->flags & I40E_FLAG_DCB_ENABLED)
+ }
+ if (pf->flags & I40E_FLAG_DCB_CAPABLE)
buf += sprintf(buf, "DCB ");
if (pf->flags & I40E_FLAG_PTP)
buf += sprintf(buf, "PTP ");
@@ -8083,13 +8379,13 @@ static void i40e_print_features(struct i40e_pf *pf)
**/
static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- struct i40e_driver_version dv;
struct i40e_pf *pf;
struct i40e_hw *hw;
static u16 pfs_found;
u16 link_status;
int err = 0;
u32 len;
+ u32 i;
err = pci_enable_device_mem(pdev);
if (err)
@@ -8201,6 +8497,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
i40e_verify_eeprom(pf);
+ /* Rev 0 hardware was never productized */
+ if (hw->revision_id < 1)
+ dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
+
i40e_clear_pxe_mode(hw);
err = i40e_get_capabilities(pf);
if (err)
@@ -8234,7 +8534,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_mac_addr;
}
dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
- memcpy(hw->mac.perm_addr, hw->mac.addr, ETH_ALEN);
+ ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
pci_set_drvdata(pdev, pf);
pci_save_state(pdev);
@@ -8242,8 +8542,8 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err = i40e_init_pf_dcb(pf);
if (err) {
dev_info(&pdev->dev, "init_pf_dcb failed: %d\n", err);
- pf->flags &= ~I40E_FLAG_DCB_ENABLED;
- goto err_init_dcb;
+ pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
+ /* Continue without DCB enabled */
}
#endif /* CONFIG_I40E_DCB */
@@ -8264,10 +8564,18 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
i40e_determine_queue_usage(pf);
i40e_init_interrupt_scheme(pf);
- /* Set up the *vsi struct based on the number of VSIs in the HW,
- * and set up our local tracking of the MAIN PF vsi.
+ /* The number of VSIs reported by the FW is the minimum guaranteed
+ * to us; HW supports far more and we share the remaining pool with
+ * the other PFs. We allocate space for more than the guarantee with
+ * the understanding that we might not get them all later.
*/
- len = sizeof(struct i40e_vsi *) * pf->hw.func_caps.num_vsis;
+ if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
+ pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
+ else
+ pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
+
+ /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
+ len = sizeof(struct i40e_vsi *) * pf->num_alloc_vsi;
pf->vsi = kzalloc(len, GFP_KERNEL);
if (!pf->vsi) {
err = -ENOMEM;
@@ -8279,6 +8587,13 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
goto err_vsis;
}
+ /* if FDIR VSI was set up, start it now */
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
+ if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
+ i40e_vsi_open(pf->vsi[i]);
+ break;
+ }
+ }
/* The main driver is (mostly) up and happy. We need to set this state
* before setting up the misc vector or we get a race and the vector
@@ -8300,6 +8615,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
}
+#ifdef CONFIG_PCI_IOV
/* prep for VF support */
if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
@@ -8322,17 +8638,14 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err);
}
}
+#endif /* CONFIG_PCI_IOV */
pfs_found++;
i40e_dbg_pf_init(pf);
/* tell the firmware that we're starting */
- dv.major_version = DRV_VERSION_MAJOR;
- dv.minor_version = DRV_VERSION_MINOR;
- dv.build_version = DRV_VERSION_BUILD;
- dv.subbuild_version = 0;
- i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
+ i40e_send_version(pf);
/* since everything's happy, start the service_task timer */
mod_timer(&pf->service_timer,
@@ -8373,9 +8686,6 @@ err_vsis:
err_switch_setup:
i40e_reset_interrupt_capability(pf);
del_timer_sync(&pf->service_timer);
-#ifdef CONFIG_I40E_DCB
-err_init_dcb:
-#endif /* CONFIG_I40E_DCB */
err_mac_addr:
err_configure_lan_hmc:
(void)i40e_shutdown_lan_hmc(hw);
@@ -8456,10 +8766,13 @@ static void i40e_remove(struct pci_dev *pdev)
}
/* shutdown and destroy the HMC */
- ret_code = i40e_shutdown_lan_hmc(&pf->hw);
- if (ret_code)
- dev_warn(&pdev->dev,
- "Failed to destroy the HMC resources: %d\n", ret_code);
+ if (pf->hw.hmc.hmc_obj) {
+ ret_code = i40e_shutdown_lan_hmc(&pf->hw);
+ if (ret_code)
+ dev_warn(&pdev->dev,
+ "Failed to destroy the HMC resources: %d\n",
+ ret_code);
+ }
/* shutdown the adminq */
ret_code = i40e_shutdown_adminq(&pf->hw);
@@ -8470,7 +8783,7 @@ static void i40e_remove(struct pci_dev *pdev)
/* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
i40e_clear_interrupt_scheme(pf);
- for (i = 0; i < pf->hw.func_caps.num_vsis; i++) {
+ for (i = 0; i < pf->num_alloc_vsi; i++) {
if (pf->vsi[i]) {
i40e_vsi_clear_rings(pf->vsi[i]);
i40e_vsi_clear(pf->vsi[i]);
@@ -8485,7 +8798,6 @@ static void i40e_remove(struct pci_dev *pdev)
kfree(pf->qp_pile);
kfree(pf->irq_pile);
- kfree(pf->sw_config);
kfree(pf->vsi);
/* force a PF reset to clean anything leftover */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
index 9cd57e6..a430699 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h
@@ -70,10 +70,12 @@ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
u16 *fw_major_version, u16 *fw_minor_version,
u16 *api_major_version, u16 *api_minor_version,
struct i40e_asq_cmd_details *cmd_details);
-i40e_status i40e_aq_set_phy_reset(struct i40e_hw *hw,
+i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
+ struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
@@ -157,8 +159,8 @@ i40e_status i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
i40e_status i40e_aq_start_lldp(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_add_udp_tunnel(struct i40e_hw *hw,
- u16 udp_port, u8 header_len,
- u8 protocol_index, u8 *filter_index,
+ u16 udp_port, u8 protocol_index,
+ u8 *filter_index,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_del_udp_tunnel(struct i40e_hw *hw, u8 index,
struct i40e_asq_cmd_details *cmd_details);
@@ -167,6 +169,9 @@ i40e_status i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
i40e_status i40e_aq_mac_address_write(struct i40e_hw *hw,
u16 flags, u8 *mac_addr,
struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
+ u16 seid, u16 credit, u8 max_credit,
+ struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_dcb_updated(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw,
@@ -216,6 +221,7 @@ bool i40e_get_link_status(struct i40e_hw *hw);
i40e_status i40e_get_mac_addr(struct i40e_hw *hw,
u8 *mac_addr);
i40e_status i40e_validate_mac_addr(u8 *mac_addr);
+void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable);
/* prototype for functions used for NVM access */
i40e_status i40e_init_nvm(struct i40e_hw *hw);
i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index e61e637..101f439 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -48,7 +48,6 @@
I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
#define I40E_PRTTSYN_CTL1_TSYNTYPE_V2 (0x2 << \
I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
-#define I40E_PTP_TX_TIMEOUT (HZ * 15)
/**
* i40e_ptp_read - Read the PHC time from the device
@@ -217,40 +216,6 @@ static int i40e_ptp_settime(struct ptp_clock_info *ptp,
}
/**
- * i40e_ptp_tx_work
- * @work: pointer to work struct
- *
- * This work function polls the PRTTSYN_STAT_0.TXTIME bit to determine when a
- * Tx timestamp event has occurred, in order to pass the Tx timestamp value up
- * the stack in the skb.
- */
-static void i40e_ptp_tx_work(struct work_struct *work)
-{
- struct i40e_pf *pf = container_of(work, struct i40e_pf,
- ptp_tx_work);
- struct i40e_hw *hw = &pf->hw;
- u32 prttsyn_stat_0;
-
- if (!pf->ptp_tx_skb)
- return;
-
- if (time_is_before_jiffies(pf->ptp_tx_start +
- I40E_PTP_TX_TIMEOUT)) {
- dev_kfree_skb_any(pf->ptp_tx_skb);
- pf->ptp_tx_skb = NULL;
- pf->tx_hwtstamp_timeouts++;
- dev_warn(&pf->pdev->dev, "clearing Tx timestamp hang\n");
- return;
- }
-
- prttsyn_stat_0 = rd32(hw, I40E_PRTTSYN_STAT_0);
- if (prttsyn_stat_0 & I40E_PRTTSYN_STAT_0_TXTIME_MASK)
- i40e_ptp_tx_hwtstamp(pf);
- else
- schedule_work(&pf->ptp_tx_work);
-}
-
-/**
* i40e_ptp_enable - Enable/disable ancillary features of the PHC subsystem
* @ptp: The PTP clock structure
* @rq: The requested feature to change
@@ -608,7 +573,6 @@ void i40e_ptp_init(struct i40e_pf *pf)
u32 regval;
spin_lock_init(&pf->tmreg_lock);
- INIT_WORK(&pf->ptp_tx_work, i40e_ptp_tx_work);
dev_info(&pf->pdev->dev, "%s: added PHC on %s\n", __func__,
netdev->name);
@@ -647,7 +611,6 @@ void i40e_ptp_stop(struct i40e_pf *pf)
pf->ptp_tx = false;
pf->ptp_rx = false;
- cancel_work_sync(&pf->ptp_tx_work);
if (pf->ptp_tx_skb) {
dev_kfree_skb_any(pf->ptp_tx_skb);
pf->ptp_tx_skb = NULL;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h
index 1d40f42..947de98 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_register.h
@@ -1340,8 +1340,6 @@
#define I40E_PFINT_ICR0_GPIO_MASK (0x1 << I40E_PFINT_ICR0_GPIO_SHIFT)
#define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23
#define I40E_PFINT_ICR0_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_TIMESYNC_SHIFT)
-#define I40E_PFINT_ICR0_STORM_DETECT_SHIFT 24
-#define I40E_PFINT_ICR0_STORM_DETECT_MASK (0x1 << I40E_PFINT_ICR0_STORM_DETECT_SHIFT)
#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
#define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26
@@ -1367,8 +1365,6 @@
#define I40E_PFINT_ICR0_ENA_GPIO_MASK (0x1 << I40E_PFINT_ICR0_ENA_GPIO_SHIFT)
#define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23
#define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT)
-#define I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT 24
-#define I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK (0x1 << I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT)
#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
#define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26
@@ -1589,6 +1585,14 @@
#define I40E_GLLAN_TSOMSK_M 0x000442DC
#define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0
#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK (0xFFF << I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS(_i) (0x000E6500 + ((_i) * 4)) /* i=0..11 */
+#define I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT 0
+#define I40E_GLLAN_TXPRE_QDIS_QINDX_MASK (0x7FF << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30
+#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK (0x1 << I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31
+#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK (0x1 << I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
+
#define I40E_PFLAN_QALLOC 0x001C0400
#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0
#define I40E_PFLAN_QALLOC_FIRSTQ_MASK (0x7FF << I40E_PFLAN_QALLOC_FIRSTQ_SHIFT)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 9478ddc..e49f31d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -24,6 +24,7 @@
*
******************************************************************************/
+#include <linux/prefetch.h>
#include "i40e.h"
#include "i40e_prototype.h"
@@ -61,7 +62,7 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
/* find existing FDIR VSI */
vsi = NULL;
- for (i = 0; i < pf->hw.func_caps.num_vsis; i++)
+ for (i = 0; i < pf->num_alloc_vsi; i++)
if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR)
vsi = pf->vsi[i];
if (!vsi)
@@ -120,7 +121,7 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
dcc |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
dcc |= ((u32)fdir_data->cnt_index <<
I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
- I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
+ I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
}
fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dcc);
@@ -183,7 +184,6 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
struct iphdr *ip;
bool err = false;
int ret;
- int i;
static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
@@ -199,21 +199,17 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
ip->saddr = fd_data->src_ip[0];
udp->source = fd_data->src_port;
- for (i = I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP;
- i <= I40E_FILTER_PCTYPE_NONF_IPV4_UDP; i++) {
- fd_data->pctype = i;
- ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
-
- if (ret) {
- dev_info(&pf->pdev->dev,
- "Filter command send failed for PCTYPE %d (ret = %d)\n",
- fd_data->pctype, ret);
- err = true;
- } else {
- dev_info(&pf->pdev->dev,
- "Filter OK for PCTYPE %d (ret = %d)\n",
- fd_data->pctype, ret);
- }
+ fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP;
+ ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
+ if (ret) {
+ dev_info(&pf->pdev->dev,
+ "Filter command send failed for PCTYPE %d (ret = %d)\n",
+ fd_data->pctype, ret);
+ err = true;
+ } else {
+ dev_info(&pf->pdev->dev,
+ "Filter OK for PCTYPE %d (ret = %d)\n",
+ fd_data->pctype, ret);
}
return err ? -EOPNOTSUPP : 0;
@@ -262,7 +258,7 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
}
}
- fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN;
+ fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
if (ret) {
@@ -455,22 +451,20 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
/* filter programming failed most likely due to table full */
fcnt_prog = i40e_get_current_fd_count(pf);
- fcnt_avail = pf->hw.fdir_shared_filter_count +
- pf->fdir_pf_filter_count;
-
+ fcnt_avail = i40e_get_fd_cnt_all(pf);
/* If ATR is running fcnt_prog can quickly change,
* if we are very close to full, it makes sense to disable
* FD ATR/SB and then re-enable it when there is room.
*/
if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
/* Turn off ATR first */
- if (pf->flags | I40E_FLAG_FD_ATR_ENABLED) {
+ if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
dev_warn(&pdev->dev, "FD filter space full, ATR for further flows will be turned off\n");
pf->auto_disable_flags |=
I40E_FLAG_FD_ATR_ENABLED;
pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT;
- } else if (pf->flags | I40E_FLAG_FD_SB_ENABLED) {
+ } else if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
pf->auto_disable_flags |=
@@ -1199,10 +1193,12 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
u32 rx_error,
u16 rx_ptype)
{
+ struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype);
+ bool ipv4 = false, ipv6 = false;
bool ipv4_tunnel, ipv6_tunnel;
__wsum rx_udp_csum;
- __sum16 csum;
struct iphdr *iph;
+ __sum16 csum;
ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
(rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
@@ -1213,29 +1209,57 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
skb->ip_summed = CHECKSUM_NONE;
/* Rx csum enabled and ip headers found? */
- if (!(vsi->netdev->features & NETIF_F_RXCSUM &&
- rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
+ if (!(vsi->netdev->features & NETIF_F_RXCSUM))
+ return;
+
+ /* did the hardware decode the packet and checksum? */
+ if (!(rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
+ return;
+
+ /* both known and outer_ip must be set for the below code to work */
+ if (!(decoded.known && decoded.outer_ip))
return;
+ if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
+ decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4)
+ ipv4 = true;
+ else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
+ decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6)
+ ipv6 = true;
+
+ if (ipv4 &&
+ (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
+ (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))))
+ goto checksum_fail;
+
/* likely incorrect csum if alternate IP extension headers found */
- if (rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
+ if (ipv6 &&
+ decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_TCP &&
+ rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT) &&
+ rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
+ /* don't increment checksum err here, non-fatal err */
return;
- /* IP or L4 or outmost IP checksum error */
- if (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
- (1 << I40E_RX_DESC_ERROR_L4E_SHIFT) |
- (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))) {
- vsi->back->hw_csum_rx_error++;
+ /* there was some L4 error, count error and punt packet to the stack */
+ if (rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))
+ goto checksum_fail;
+
+ /* handle packets that were not able to be checksummed due
+ * to arrival speed, in this case the stack can compute
+ * the csum.
+ */
+ if (rx_error & (1 << I40E_RX_DESC_ERROR_PPRS_SHIFT))
return;
- }
+ /* If VXLAN traffic has an outer UDPv4 checksum we need to check
+ * it in the driver, hardware does not do it for us.
+ * Since L3L4P bit was set we assume a valid IHL value (>=5)
+ * so the total length of IPv4 header is IHL*4 bytes
+ * The UDP_0 bit *may* bet set if the *inner* header is UDP
+ */
if (ipv4_tunnel &&
+ (decoded.inner_prot != I40E_RX_PTYPE_INNER_PROT_UDP) &&
!(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) {
- /* If VXLAN traffic has an outer UDPv4 checksum we need to check
- * it in the driver, hardware does not do it for us.
- * Since L3L4P bit was set we assume a valid IHL value (>=5)
- * so the total length of IPv4 header is IHL*4 bytes
- */
skb->transport_header = skb->mac_header +
sizeof(struct ethhdr) +
(ip_hdr(skb)->ihl * 4);
@@ -1252,13 +1276,16 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
(skb->len - skb_transport_offset(skb)),
IPPROTO_UDP, rx_udp_csum);
- if (udp_hdr(skb)->check != csum) {
- vsi->back->hw_csum_rx_error++;
- return;
- }
+ if (udp_hdr(skb)->check != csum)
+ goto checksum_fail;
}
skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ return;
+
+checksum_fail:
+ vsi->back->hw_csum_rx_error++;
}
/**
@@ -1435,6 +1462,9 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
/* ERR_MASK will only have valid bits if EOP set */
if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
dev_kfree_skb_any(skb);
+ /* TODO: shouldn't we increment a counter indicating the
+ * drop?
+ */
goto next_desc;
}
@@ -1665,6 +1695,11 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
dtype_cmd |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
+ dtype_cmd |= I40E_TXD_FLTR_QW1_CNT_ENA_MASK;
+ dtype_cmd |=
+ ((u32)pf->fd_atr_cnt_idx << I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) &
+ I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
+
fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
}
@@ -1825,9 +1860,6 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
*cd_type_cmd_tso_mss |= (u64)I40E_TX_CTX_DESC_TSYN <<
I40E_TXD_CTX_QW1_CMD_SHIFT;
- pf->ptp_tx_start = jiffies;
- schedule_work(&pf->ptp_tx_work);
-
return 1;
}
@@ -2179,9 +2211,7 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
static int i40e_xmit_descriptor_count(struct sk_buff *skb,
struct i40e_ring *tx_ring)
{
-#if PAGE_SIZE > I40E_MAX_DATA_PER_TXD
unsigned int f;
-#endif
int count = 0;
/* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
@@ -2190,12 +2220,9 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb,
* + 1 desc for context descriptor,
* otherwise try next time
*/
-#if PAGE_SIZE > I40E_MAX_DATA_PER_TXD
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
-#else
- count += skb_shinfo(skb)->nr_frags;
-#endif
+
count += TXD_USE_COUNT(skb_headlen(skb));
if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
tx_ring->tx_stats.tx_busy++;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index d534969..0277894 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -27,7 +27,7 @@
#ifndef _I40E_TXRX_H_
#define _I40E_TXRX_H_
-/* Interrupt Throttling and Rate Limiting (storm control) Goodies */
+/* Interrupt Throttling and Rate Limiting Goodies */
#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */
#define I40E_MIN_ITR 0x0004 /* reg uses 2 usec resolution */
@@ -69,16 +69,11 @@ enum i40e_dyn_idx_t {
/* Supported RSS offloads */
#define I40E_DEFAULT_RSS_HENA ( \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
@@ -122,11 +117,11 @@ enum i40e_dyn_idx_t {
#define i40e_rx_desc i40e_32byte_rx_desc
#define I40E_MIN_TX_LEN 17
-#define I40E_MAX_DATA_PER_TXD 16383 /* aka 16kB - 1 */
+#define I40E_MAX_DATA_PER_TXD 8192
/* Tx Descriptors needed, worst case */
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD)
-#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4)
+#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
#define I40E_TX_FLAGS_CSUM (u32)(1)
#define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1)
@@ -184,7 +179,6 @@ enum i40e_ring_state_t {
__I40E_TX_DETECT_HANG,
__I40E_HANG_CHECK_ARMED,
__I40E_RX_PS_ENABLED,
- __I40E_RX_LRO_ENABLED,
__I40E_RX_16BYTE_DESC_ENABLED,
};
@@ -200,12 +194,6 @@ enum i40e_ring_state_t {
set_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
#define clear_check_for_tx_hang(ring) \
clear_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
-#define ring_is_lro_enabled(ring) \
- test_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
-#define set_ring_lro_enabled(ring) \
- set_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
-#define clear_ring_lro_enabled(ring) \
- clear_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
#define ring_is_16byte_desc_enabled(ring) \
test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
#define set_ring_16byte_desc_enabled(ring) \
diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h
index 71a968f..9d39ff2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_type.h
@@ -36,12 +36,10 @@
/* Device IDs */
#define I40E_DEV_ID_SFP_XL710 0x1572
-#define I40E_DEV_ID_SFP_X710 0x1573
#define I40E_DEV_ID_QEMU 0x1574
#define I40E_DEV_ID_KX_A 0x157F
#define I40E_DEV_ID_KX_B 0x1580
#define I40E_DEV_ID_KX_C 0x1581
-#define I40E_DEV_ID_KX_D 0x1582
#define I40E_DEV_ID_QSFP_A 0x1583
#define I40E_DEV_ID_QSFP_B 0x1584
#define I40E_DEV_ID_QSFP_C 0x1585
@@ -60,8 +58,8 @@
/* Max default timeout in ms, */
#define I40E_MAX_NVM_TIMEOUT 18000
-/* Switch from mc to the 2usec global time (this is the GTIME resolution) */
-#define I40E_MS_TO_GTIME(time) (((time) * 1000) / 2)
+/* Switch from ms to the 1usec global time (this is the GTIME resolution) */
+#define I40E_MS_TO_GTIME(time) ((time) * 1000)
/* forward declaration */
struct i40e_hw;
@@ -167,6 +165,9 @@ struct i40e_link_status {
u8 loopback;
/* is Link Status Event notification to SW enabled */
bool lse_enable;
+ u16 max_frame_size;
+ bool crc_enable;
+ u8 pacing;
};
struct i40e_phy_info {
@@ -409,6 +410,7 @@ struct i40e_driver_version {
u8 minor_version;
u8 build_version;
u8 subbuild_version;
+ u8 driver_string[32];
};
/* RX Descriptors */
@@ -488,9 +490,6 @@ union i40e_32byte_rx_desc {
} wb; /* writeback */
};
-#define I40E_RXD_QW1_STATUS_SHIFT 0
-#define I40E_RXD_QW1_STATUS_MASK (0x7FFFUL << I40E_RXD_QW1_STATUS_SHIFT)
-
enum i40e_rx_desc_status_bits {
/* Note: These are predefined bit offsets */
I40E_RX_DESC_STATUS_DD_SHIFT = 0,
@@ -507,9 +506,14 @@ enum i40e_rx_desc_status_bits {
I40E_RX_DESC_STATUS_LPBK_SHIFT = 14,
I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */
- I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18
+ I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18,
+ I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */
};
+#define I40E_RXD_QW1_STATUS_SHIFT 0
+#define I40E_RXD_QW1_STATUS_MASK (((1 << I40E_RX_DESC_STATUS_LAST) - 1) \
+ << I40E_RXD_QW1_STATUS_SHIFT)
+
#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \
I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
@@ -537,7 +541,8 @@ enum i40e_rx_desc_error_bits {
I40E_RX_DESC_ERROR_IPE_SHIFT = 3,
I40E_RX_DESC_ERROR_L4E_SHIFT = 4,
I40E_RX_DESC_ERROR_EIPE_SHIFT = 5,
- I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6
+ I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6,
+ I40E_RX_DESC_ERROR_PPRS_SHIFT = 7
};
enum i40e_rx_desc_error_l3l4e_fcoe_masks {
@@ -658,7 +663,6 @@ enum i40e_rx_desc_ext_status_bits {
I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1,
I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */
I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */
- I40E_RX_DESC_EXT_STATUS_FTYPE_SHIFT = 6, /* 3 BITS */
I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9,
I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10,
I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11,
@@ -862,18 +866,14 @@ struct i40e_filter_program_desc {
/* Packet Classifier Types for filters */
enum i40e_filter_pctype {
- /* Note: Values 0-28 are reserved for future use */
- I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
- I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
+ /* Note: Values 0-30 are reserved for future use */
I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
- I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN = 32,
+ /* Note: Value 32 is reserved for future use */
I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
I40E_FILTER_PCTYPE_FRAG_IPV4 = 36,
- /* Note: Values 37-38 are reserved for future use */
- I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
- I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
+ /* Note: Values 37-40 are reserved for future use */
I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN = 42,
I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
@@ -955,6 +955,16 @@ struct i40e_vsi_context {
struct i40e_aqc_vsi_properties_data info;
};
+struct i40e_veb_context {
+ u16 seid;
+ u16 uplink_seid;
+ u16 veb_number;
+ u16 vebs_allocated;
+ u16 vebs_unallocated;
+ u16 flags;
+ struct i40e_aqc_get_veb_parameters_completion info;
+};
+
/* Statistics collected by each port, VSI, VEB, and S-channel */
struct i40e_eth_stats {
u64 rx_bytes; /* gorc */
@@ -962,8 +972,6 @@ struct i40e_eth_stats {
u64 rx_multicast; /* mprc */
u64 rx_broadcast; /* bprc */
u64 rx_discards; /* rdpc */
- u64 rx_errors; /* repc */
- u64 rx_missed; /* rmpc */
u64 rx_unknown_protocol; /* rupp */
u64 tx_bytes; /* gotc */
u64 tx_unicast; /* uptc */
@@ -1015,9 +1023,12 @@ struct i40e_hw_port_stats {
u64 tx_size_big; /* ptc9522 */
u64 mac_short_packet_dropped; /* mspdc */
u64 checksum_error; /* xec */
+ /* flow director stats */
+ u64 fd_atr_match;
+ u64 fd_sb_match;
/* EEE LPI */
- bool tx_lpi_status;
- bool rx_lpi_status;
+ u32 tx_lpi_status;
+ u32 rx_lpi_status;
u64 tx_lpi_count; /* etlpic */
u64 rx_lpi_count; /* erlpic */
};
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
index 22a1b69..70951d2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl.h
@@ -341,10 +341,6 @@ struct i40e_virtchnl_pf_event {
int severity;
};
-/* The following are TBD, not necessary for LAN functionality.
- * I40E_VIRTCHNL_OP_FCOE
- */
-
/* VF reset states - these are written into the RSTAT register:
* I40E_VFGEN_RSTAT1 on the PF
* I40E_VFGEN_RSTAT on the VF
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 02c11a7..f5b9d20 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -29,6 +29,24 @@
/***********************misc routines*****************************/
/**
+ * i40e_vc_disable_vf
+ * @pf: pointer to the pf info
+ * @vf: pointer to the vf info
+ *
+ * Disable the VF through a SW reset
+ **/
+static inline void i40e_vc_disable_vf(struct i40e_pf *pf, struct i40e_vf *vf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u32 reg;
+
+ reg = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id));
+ reg |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
+ wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_id), reg);
+ i40e_flush(hw);
+}
+
+/**
* i40e_vc_isvalid_vsi_id
* @vf: pointer to the vf info
* @vsi_id: vf relative vsi id
@@ -230,9 +248,8 @@ static int i40e_config_vsi_tx_queue(struct i40e_vf *vf, u16 vsi_idx,
tx_ctx.qlen = info->ring_len;
tx_ctx.rdylist = le16_to_cpu(pf->vsi[vsi_idx]->info.qs_handle[0]);
tx_ctx.rdylist_act = 0;
- tx_ctx.head_wb_ena = 1;
- tx_ctx.head_wb_addr = info->dma_ring_addr +
- (info->ring_len * sizeof(struct i40e_tx_desc));
+ tx_ctx.head_wb_ena = info->headwb_enabled;
+ tx_ctx.head_wb_addr = info->dma_headwb_addr;
/* clear the context in the HMC */
ret = i40e_clear_lan_tx_queue_context(hw, pf_queue_id);
@@ -336,6 +353,7 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx,
rx_ctx.tphhead_ena = 1;
rx_ctx.lrxqthresh = 2;
rx_ctx.crcstrip = 1;
+ rx_ctx.prefena = 1;
/* clear the context in the HMC */
ret = i40e_clear_lan_rx_queue_context(hw, pf_queue_id);
@@ -416,6 +434,15 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
if (ret)
dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
+ /* Set VF bandwidth if specified */
+ if (vf->tx_rate) {
+ ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
+ vf->tx_rate / 50, 0, NULL);
+ if (ret)
+ dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n",
+ vf->vf_id, ret);
+ }
+
error_alloc_vsi_res:
return ret;
}
@@ -815,6 +842,10 @@ void i40e_free_vfs(struct i40e_pf *pf)
kfree(pf->vf);
pf->vf = NULL;
+ /* This check is for when the driver is unloaded while VFs are
+ * assigned. Setting the number of VFs to 0 through sysfs is caught
+ * before this function ever gets called.
+ */
if (!i40e_vfs_are_assigned(pf)) {
pci_disable_sriov(pf->pdev);
/* Acknowledge VFLR for all VFS. Without this, VFs will fail to
@@ -867,6 +898,7 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
ret = -ENOMEM;
goto err_alloc;
}
+ pf->vf = vfs;
/* apply default profile */
for (i = 0; i < num_alloc_vfs; i++) {
@@ -876,13 +908,13 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
/* assign default capabilities */
set_bit(I40E_VIRTCHNL_VF_CAP_L2, &vfs[i].vf_caps);
+ vfs[i].spoofchk = true;
/* vf resources get allocated during reset */
i40e_reset_vf(&vfs[i], false);
/* enable vf vplan_qtable mappings */
i40e_enable_vf_mappings(&vfs[i]);
}
- pf->vf = vfs;
pf->num_alloc_vfs = num_alloc_vfs;
i40e_enable_pf_switch_lb(pf);
@@ -951,7 +983,12 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
if (num_vfs)
return i40e_pci_sriov_enable(pdev, num_vfs);
- i40e_free_vfs(pf);
+ if (!i40e_vfs_are_assigned(pf)) {
+ i40e_free_vfs(pf);
+ } else {
+ dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
+ return -EINVAL;
+ }
return 0;
}
@@ -2022,16 +2059,14 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
}
/* delete the temporary mac address */
- i40e_del_filter(vsi, vf->default_lan_addr.addr, 0, true, false);
+ i40e_del_filter(vsi, vf->default_lan_addr.addr, vf->port_vlan_id,
+ true, false);
- /* add the new mac address */
- f = i40e_add_filter(vsi, mac, 0, true, false);
- if (!f) {
- dev_err(&pf->pdev->dev,
- "Unable to add VF ucast filter\n");
- ret = -ENOMEM;
- goto error_param;
- }
+ /* Delete all the filters for this VSI - we're going to kill it
+ * anyway.
+ */
+ list_for_each_entry(f, &vsi->mac_filter_list, list)
+ i40e_del_filter(vsi, f->macaddr, f->vlan, true, false);
dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id);
/* program mac filter */
@@ -2040,7 +2075,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
ret = -EIO;
goto error_param;
}
- memcpy(vf->default_lan_addr.addr, mac, ETH_ALEN);
+ ether_addr_copy(vf->default_lan_addr.addr, mac);
vf->pf_set_mac = true;
dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n");
ret = 0;
@@ -2088,18 +2123,28 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
goto error_pvid;
}
- if (vsi->info.pvid == 0 && i40e_is_vsi_in_vlan(vsi))
+ if (vsi->info.pvid == 0 && i40e_is_vsi_in_vlan(vsi)) {
dev_err(&pf->pdev->dev,
"VF %d has already configured VLAN filters and the administrator is requesting a port VLAN override.\nPlease unload and reload the VF driver for this change to take effect.\n",
vf_id);
+ /* Administrator Error - knock the VF offline until he does
+ * the right thing by reconfiguring his network correctly
+ * and then reloading the VF driver.
+ */
+ i40e_vc_disable_vf(pf, vf);
+ }
/* Check for condition where there was already a port VLAN ID
* filter set and now it is being deleted by setting it to zero.
+ * Additionally check for the condition where there was a port
+ * VLAN but now there is a new and different port VLAN being set.
* Before deleting all the old VLAN filters we must add new ones
* with -1 (I40E_VLAN_ANY) or otherwise we're left with all our
* MAC addresses deleted.
*/
- if (!(vlan_id || qos) && vsi->info.pvid)
+ if ((!(vlan_id || qos) ||
+ (vlan_id | qos) != le16_to_cpu(vsi->info.pvid)) &&
+ vsi->info.pvid)
ret = i40e_vsi_add_vlan(vsi, I40E_VLAN_ANY);
if (vsi->info.pvid) {
@@ -2150,6 +2195,8 @@ error_pvid:
return ret;
}
+#define I40E_BW_CREDIT_DIVISOR 50 /* 50Mbps per BW credit */
+#define I40E_MAX_BW_INACTIVE_ACCUM 4 /* device can accumulate 4 credits max */
/**
* i40e_ndo_set_vf_bw
* @netdev: network interface device structure
@@ -2158,9 +2205,76 @@ error_pvid:
*
* configure vf tx rate
**/
-int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate)
+int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
+ int max_tx_rate)
{
- return -EOPNOTSUPP;
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_pf *pf = np->vsi->back;
+ struct i40e_vsi *vsi;
+ struct i40e_vf *vf;
+ int speed = 0;
+ int ret = 0;
+
+ /* validate the request */
+ if (vf_id >= pf->num_alloc_vfs) {
+ dev_err(&pf->pdev->dev, "Invalid VF Identifier %d.\n", vf_id);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ if (min_tx_rate) {
+ dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for vf %d.\n",
+ min_tx_rate, vf_id);
+ return -EINVAL;
+ }
+
+ vf = &(pf->vf[vf_id]);
+ vsi = pf->vsi[vf->lan_vsi_index];
+ if (!test_bit(I40E_VF_STAT_INIT, &vf->vf_states)) {
+ dev_err(&pf->pdev->dev, "Uninitialized VF %d.\n", vf_id);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ switch (pf->hw.phy.link_info.link_speed) {
+ case I40E_LINK_SPEED_40GB:
+ speed = 40000;
+ break;
+ case I40E_LINK_SPEED_10GB:
+ speed = 10000;
+ break;
+ case I40E_LINK_SPEED_1GB:
+ speed = 1000;
+ break;
+ default:
+ break;
+ }
+
+ if (max_tx_rate > speed) {
+ dev_err(&pf->pdev->dev, "Invalid max tx rate %d specified for vf %d.",
+ max_tx_rate, vf->vf_id);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ if ((max_tx_rate < 50) && (max_tx_rate > 0)) {
+ dev_warn(&pf->pdev->dev, "Setting max Tx rate to minimum usable value of 50Mbps.\n");
+ max_tx_rate = 50;
+ }
+
+ /* Tx rate credits are in values of 50Mbps, 0 is disabled*/
+ ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid,
+ max_tx_rate / I40E_BW_CREDIT_DIVISOR,
+ I40E_MAX_BW_INACTIVE_ACCUM, NULL);
+ if (ret) {
+ dev_err(&pf->pdev->dev, "Unable to set max tx rate, error code %d.\n",
+ ret);
+ ret = -EIO;
+ goto error;
+ }
+ vf->tx_rate = max_tx_rate;
+error:
+ return ret;
}
/**
@@ -2200,10 +2314,18 @@ int i40e_ndo_get_vf_config(struct net_device *netdev,
memcpy(&ivi->mac, vf->default_lan_addr.addr, ETH_ALEN);
- ivi->tx_rate = 0;
+ ivi->max_tx_rate = vf->tx_rate;
+ ivi->min_tx_rate = 0;
ivi->vlan = le16_to_cpu(vsi->info.pvid) & I40E_VLAN_MASK;
ivi->qos = (le16_to_cpu(vsi->info.pvid) & I40E_PRIORITY_MASK) >>
I40E_VLAN_PRIORITY_SHIFT;
+ if (vf->link_forced == false)
+ ivi->linkstate = IFLA_VF_LINK_STATE_AUTO;
+ else if (vf->link_up == true)
+ ivi->linkstate = IFLA_VF_LINK_STATE_ENABLE;
+ else
+ ivi->linkstate = IFLA_VF_LINK_STATE_DISABLE;
+ ivi->spoofchk = vf->spoofchk;
ret = 0;
error_param:
@@ -2270,3 +2392,50 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
error_out:
return ret;
}
+
+/**
+ * i40e_ndo_set_vf_spoofchk
+ * @netdev: network interface device structure
+ * @vf_id: vf identifier
+ * @enable: flag to enable or disable feature
+ *
+ * Enable or disable VF spoof checking
+ **/
+int i40e_ndo_set_vf_spoofck(struct net_device *netdev, int vf_id, bool enable)
+{
+ struct i40e_netdev_priv *np = netdev_priv(netdev);
+ struct i40e_vsi *vsi = np->vsi;
+ struct i40e_pf *pf = vsi->back;
+ struct i40e_vsi_context ctxt;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_vf *vf;
+ int ret = 0;
+
+ /* validate the request */
+ if (vf_id >= pf->num_alloc_vfs) {
+ dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ vf = &(pf->vf[vf_id]);
+
+ if (enable == vf->spoofchk)
+ goto out;
+
+ vf->spoofchk = enable;
+ memset(&ctxt, 0, sizeof(ctxt));
+ ctxt.seid = pf->vsi[vf->lan_vsi_index]->seid;
+ ctxt.pf_num = pf->hw.pf_id;
+ ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
+ if (enable)
+ ctxt.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
+ ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (ret) {
+ dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n",
+ ret);
+ ret = -EIO;
+ }
+out:
+ return ret;
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index 389c47f..63e7e0d 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -98,8 +98,10 @@ struct i40e_vf {
unsigned long vf_caps; /* vf's adv. capabilities */
unsigned long vf_states; /* vf's runtime states */
+ unsigned int tx_rate; /* Tx bandwidth limit in Mbps */
bool link_forced;
bool link_up; /* only valid if vf link is forced */
+ bool spoofchk;
};
void i40e_free_vfs(struct i40e_pf *pf);
@@ -115,10 +117,12 @@ void i40e_vc_notify_vf_reset(struct i40e_vf *vf);
int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac);
int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
int vf_id, u16 vlan_id, u8 qos);
-int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int tx_rate);
+int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
+ int max_tx_rate);
int i40e_ndo_get_vf_config(struct net_device *netdev,
int vf_id, struct ifla_vf_info *ivi);
int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link);
+int i40e_ndo_set_vf_spoofck(struct net_device *netdev, int vf_id, bool enable);
void i40e_vc_notify_link_state(struct i40e_pf *pf);
void i40e_vc_notify_reset(struct i40e_pf *pf);
diff --git a/drivers/net/ethernet/intel/i40evf/Makefile b/drivers/net/ethernet/intel/i40evf/Makefile
index e09be37..3a42383 100644
--- a/drivers/net/ethernet/intel/i40evf/Makefile
+++ b/drivers/net/ethernet/intel/i40evf/Makefile
@@ -1,7 +1,7 @@
################################################################################
#
# Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
-# Copyright(c) 2013 Intel Corporation.
+# Copyright(c) 2013 - 2014 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+#
# The full GNU General Public License is included in this distribution in
# the file called "COPYING".
#
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
index 5470ce9..eb67cce 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
@@ -28,6 +31,16 @@
#include "i40e_prototype.h"
/**
+ * i40e_is_nvm_update_op - return true if this is an NVM update operation
+ * @desc: API request descriptor
+ **/
+static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
+{
+ return (desc->opcode == i40e_aqc_opc_nvm_erase) ||
+ (desc->opcode == i40e_aqc_opc_nvm_update);
+}
+
+/**
* i40e_adminq_init_regs - Initialize AdminQ registers
* @hw: pointer to the hardware structure
*
@@ -276,8 +289,11 @@ static void i40e_free_asq_bufs(struct i40e_hw *hw)
*
* Configure base address and length registers for the transmit queue
**/
-static void i40e_config_asq_regs(struct i40e_hw *hw)
+static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)
{
+ i40e_status ret_code = 0;
+ u32 reg = 0;
+
if (hw->mac.type == I40E_MAC_VF) {
/* configure the transmit queue */
wr32(hw, I40E_VF_ATQBAH1,
@@ -286,6 +302,7 @@ static void i40e_config_asq_regs(struct i40e_hw *hw)
lower_32_bits(hw->aq.asq.desc_buf.pa));
wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries |
I40E_VF_ATQLEN1_ATQENABLE_MASK));
+ reg = rd32(hw, I40E_VF_ATQBAL1);
} else {
/* configure the transmit queue */
wr32(hw, I40E_PF_ATQBAH,
@@ -294,7 +311,14 @@ static void i40e_config_asq_regs(struct i40e_hw *hw)
lower_32_bits(hw->aq.asq.desc_buf.pa));
wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries |
I40E_PF_ATQLEN_ATQENABLE_MASK));
+ reg = rd32(hw, I40E_PF_ATQBAL);
}
+
+ /* Check one register to verify that config was applied */
+ if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa))
+ ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+
+ return ret_code;
}
/**
@@ -303,8 +327,11 @@ static void i40e_config_asq_regs(struct i40e_hw *hw)
*
* Configure base address and length registers for the receive (event queue)
**/
-static void i40e_config_arq_regs(struct i40e_hw *hw)
+static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)
{
+ i40e_status ret_code = 0;
+ u32 reg = 0;
+
if (hw->mac.type == I40E_MAC_VF) {
/* configure the receive queue */
wr32(hw, I40E_VF_ARQBAH1,
@@ -313,6 +340,7 @@ static void i40e_config_arq_regs(struct i40e_hw *hw)
lower_32_bits(hw->aq.arq.desc_buf.pa));
wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries |
I40E_VF_ARQLEN1_ARQENABLE_MASK));
+ reg = rd32(hw, I40E_VF_ARQBAL1);
} else {
/* configure the receive queue */
wr32(hw, I40E_PF_ARQBAH,
@@ -321,10 +349,17 @@ static void i40e_config_arq_regs(struct i40e_hw *hw)
lower_32_bits(hw->aq.arq.desc_buf.pa));
wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries |
I40E_PF_ARQLEN_ARQENABLE_MASK));
+ reg = rd32(hw, I40E_PF_ARQBAL);
}
/* Update tail in the HW to post pre-allocated buffers */
wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
+
+ /* Check one register to verify that config was applied */
+ if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa))
+ ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
+
+ return ret_code;
}
/**
@@ -372,7 +407,9 @@ static i40e_status i40e_init_asq(struct i40e_hw *hw)
goto init_adminq_free_rings;
/* initialize base registers */
- i40e_config_asq_regs(hw);
+ ret_code = i40e_config_asq_regs(hw);
+ if (ret_code)
+ goto init_adminq_free_rings;
/* success! */
goto init_adminq_exit;
@@ -429,7 +466,9 @@ static i40e_status i40e_init_arq(struct i40e_hw *hw)
goto init_adminq_free_rings;
/* initialize base registers */
- i40e_config_arq_regs(hw);
+ ret_code = i40e_config_arq_regs(hw);
+ if (ret_code)
+ goto init_adminq_free_rings;
/* success! */
goto init_adminq_exit;
@@ -659,6 +698,12 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
goto asq_send_command_exit;
}
+ if (i40e_is_nvm_update_op(desc) && hw->aq.nvm_busy) {
+ i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: NVM busy.\n");
+ status = I40E_ERR_NVM;
+ goto asq_send_command_exit;
+ }
+
details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
if (cmd_details) {
*details = *cmd_details;
@@ -786,6 +831,9 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
}
+ if (i40e_is_nvm_update_op(desc))
+ hw->aq.nvm_busy = true;
+
/* update the error if time out occurred */
if ((!cmd_completed) &&
(!details->async && !details->postpone)) {
@@ -880,6 +928,9 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
e->msg_size);
}
+ if (i40e_is_nvm_update_op(&e->desc))
+ hw->aq.nvm_busy = false;
+
/* Restore the original datalen and buffer address in the desc,
* FW updates datalen to indicate the event message
* size
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
index 8f72c31d..e3472c6 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
@@ -87,6 +90,7 @@ struct i40e_adminq_info {
u16 fw_min_ver; /* firmware minor version */
u16 api_maj_ver; /* api major version */
u16 api_min_ver; /* api minor version */
+ bool nvm_busy;
struct mutex asq_mutex; /* Send queue lock */
struct mutex arq_mutex; /* Receive queue lock */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index 97662b6..e656ea7 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
@@ -31,7 +34,7 @@
*/
#define I40E_FW_API_VERSION_MAJOR 0x0001
-#define I40E_FW_API_VERSION_MINOR 0x0001
+#define I40E_FW_API_VERSION_MINOR 0x0002
#define I40E_FW_API_VERSION_A0_MINOR 0x0000
struct i40e_aq_desc {
@@ -121,6 +124,7 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_get_version = 0x0001,
i40e_aqc_opc_driver_version = 0x0002,
i40e_aqc_opc_queue_shutdown = 0x0003,
+ i40e_aqc_opc_set_pf_context = 0x0004,
/* resource ownership */
i40e_aqc_opc_request_resource = 0x0008,
@@ -180,9 +184,6 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_add_mirror_rule = 0x0260,
i40e_aqc_opc_delete_mirror_rule = 0x0261,
- i40e_aqc_opc_set_storm_control_config = 0x0280,
- i40e_aqc_opc_get_storm_control_config = 0x0281,
-
/* DCB commands */
i40e_aqc_opc_dcb_ignore_pfc = 0x0301,
i40e_aqc_opc_dcb_updated = 0x0302,
@@ -205,6 +206,7 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_query_switching_comp_bw_config = 0x041A,
i40e_aqc_opc_suspend_port_tx = 0x041B,
i40e_aqc_opc_resume_port_tx = 0x041C,
+ i40e_aqc_opc_configure_partition_bw = 0x041D,
/* hmc */
i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
@@ -222,13 +224,15 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_get_partner_advt = 0x0616,
i40e_aqc_opc_set_lb_modes = 0x0618,
i40e_aqc_opc_get_phy_wol_caps = 0x0621,
- i40e_aqc_opc_set_phy_reset = 0x0622,
+ i40e_aqc_opc_set_phy_debug = 0x0622,
i40e_aqc_opc_upload_ext_phy_fm = 0x0625,
/* NVM commands */
- i40e_aqc_opc_nvm_read = 0x0701,
- i40e_aqc_opc_nvm_erase = 0x0702,
- i40e_aqc_opc_nvm_update = 0x0703,
+ i40e_aqc_opc_nvm_read = 0x0701,
+ i40e_aqc_opc_nvm_erase = 0x0702,
+ i40e_aqc_opc_nvm_update = 0x0703,
+ i40e_aqc_opc_nvm_config_read = 0x0704,
+ i40e_aqc_opc_nvm_config_write = 0x0705,
/* virtualization commands */
i40e_aqc_opc_send_msg_to_pf = 0x0801,
@@ -270,8 +274,6 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_debug_set_mode = 0xFF01,
i40e_aqc_opc_debug_read_reg = 0xFF03,
i40e_aqc_opc_debug_write_reg = 0xFF04,
- i40e_aqc_opc_debug_read_reg_sg = 0xFF05,
- i40e_aqc_opc_debug_write_reg_sg = 0xFF06,
i40e_aqc_opc_debug_modify_reg = 0xFF07,
i40e_aqc_opc_debug_dump_internals = 0xFF08,
i40e_aqc_opc_debug_modify_internals = 0xFF09,
@@ -339,6 +341,14 @@ struct i40e_aqc_queue_shutdown {
I40E_CHECK_CMD_LENGTH(i40e_aqc_queue_shutdown);
+/* Set PF context (0x0004, direct) */
+struct i40e_aqc_set_pf_context {
+ u8 pf_id;
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_pf_context);
+
/* Request resource ownership (direct 0x0008)
* Release resource ownership (direct 0x0009)
*/
@@ -678,7 +688,6 @@ struct i40e_aqc_add_get_update_vsi {
#define I40E_AQ_VSI_TYPE_PF 0x2
#define I40E_AQ_VSI_TYPE_EMP_MNG 0x3
#define I40E_AQ_VSI_FLAG_CASCADED_PV 0x4
-#define I40E_AQ_VSI_FLAG_CLOUD_VSI 0x8
__le32 addr_high;
__le32 addr_low;
};
@@ -1040,7 +1049,9 @@ struct i40e_aqc_set_vsi_promiscuous_modes {
#define I40E_AQC_SET_VSI_PROMISC_VLAN 0x10
__le16 seid;
#define I40E_AQC_VSI_PROM_CMD_SEID_MASK 0x3FF
- u8 reserved[10];
+ __le16 vlan_tag;
+#define I40E_AQC_SET_VSI_VLAN_VALID 0x8000
+ u8 reserved[8];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_vsi_promiscuous_modes);
@@ -1289,27 +1300,6 @@ struct i40e_aqc_add_delete_mirror_rule_completion {
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_delete_mirror_rule_completion);
-/* Set Storm Control Configuration (direct 0x0280)
- * Get Storm Control Configuration (direct 0x0281)
- * the command and response use the same descriptor structure
- */
-struct i40e_aqc_set_get_storm_control_config {
- __le32 broadcast_threshold;
- __le32 multicast_threshold;
- __le32 control_flags;
-#define I40E_AQC_STORM_CONTROL_MDIPW 0x01
-#define I40E_AQC_STORM_CONTROL_MDICW 0x02
-#define I40E_AQC_STORM_CONTROL_BDIPW 0x04
-#define I40E_AQC_STORM_CONTROL_BDICW 0x08
-#define I40E_AQC_STORM_CONTROL_BIDU 0x10
-#define I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT 8
-#define I40E_AQC_STORM_CONTROL_INTERVAL_MASK (0x3FF << \
- I40E_AQC_STORM_CONTROL_INTERVAL_SHIFT)
- u8 reserved[4];
-};
-
-I40E_CHECK_CMD_LENGTH(i40e_aqc_set_get_storm_control_config);
-
/* DCB 0x03xx*/
/* PFC Ignore (direct 0x0301)
@@ -1427,11 +1417,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_configure_switching_comp_bw_limit);
struct i40e_aqc_configure_switching_comp_ets_data {
u8 reserved[4];
u8 tc_valid_bits;
- u8 reserved1;
+ u8 seepage;
+#define I40E_AQ_ETS_SEEPAGE_EN_MASK 0x1
u8 tc_strict_priority_flags;
- u8 reserved2[17];
+ u8 reserved1[17];
u8 tc_bw_share_credits[8];
- u8 reserved3[96];
+ u8 reserved2[96];
};
/* Configure Switching Component Bandwidth Limits per Tc (indirect 0x0416) */
@@ -1499,6 +1490,15 @@ struct i40e_aqc_query_switching_comp_bw_config_resp {
* (direct 0x041B and 0x041C) uses the generic SEID struct
*/
+/* Configure partition BW
+ * (indirect 0x041D)
+ */
+struct i40e_aqc_configure_partition_bw_data {
+ __le16 pf_valid_bits;
+ u8 min_bw[16]; /* guaranteed bandwidth */
+ u8 max_bw[16]; /* bandwidth limit */
+};
+
/* Get and set the active HMC resource profile and status.
* (direct 0x0500) and (direct 0x0501)
*/
@@ -1539,6 +1539,8 @@ enum i40e_aq_phy_type {
I40E_PHY_TYPE_XLPPI = 0x9,
I40E_PHY_TYPE_40GBASE_CR4_CU = 0xA,
I40E_PHY_TYPE_10GBASE_CR1_CU = 0xB,
+ I40E_PHY_TYPE_10GBASE_AOC = 0xC,
+ I40E_PHY_TYPE_40GBASE_AOC = 0xD,
I40E_PHY_TYPE_100BASE_TX = 0x11,
I40E_PHY_TYPE_1000BASE_T = 0x12,
I40E_PHY_TYPE_10GBASE_T = 0x13,
@@ -1549,7 +1551,10 @@ enum i40e_aq_phy_type {
I40E_PHY_TYPE_40GBASE_CR4 = 0x18,
I40E_PHY_TYPE_40GBASE_SR4 = 0x19,
I40E_PHY_TYPE_40GBASE_LR4 = 0x1A,
- I40E_PHY_TYPE_20GBASE_KR2 = 0x1B,
+ I40E_PHY_TYPE_1000BASE_SX = 0x1B,
+ I40E_PHY_TYPE_1000BASE_LX = 0x1C,
+ I40E_PHY_TYPE_1000BASE_T_OPTICAL = 0x1D,
+ I40E_PHY_TYPE_20GBASE_KR2 = 0x1E,
I40E_PHY_TYPE_MAX
};
@@ -1583,11 +1588,8 @@ struct i40e_aq_get_phy_abilities_resp {
#define I40E_AQ_PHY_FLAG_PAUSE_TX 0x01
#define I40E_AQ_PHY_FLAG_PAUSE_RX 0x02
#define I40E_AQ_PHY_FLAG_LOW_POWER 0x04
-#define I40E_AQ_PHY_FLAG_AN_SHIFT 3
-#define I40E_AQ_PHY_FLAG_AN_MASK (0x3 << I40E_AQ_PHY_FLAG_AN_SHIFT)
-#define I40E_AQ_PHY_FLAG_AN_OFF 0x00 /* link forced on */
-#define I40E_AQ_PHY_FLAG_AN_OFF_LINK_DOWN 0x01
-#define I40E_AQ_PHY_FLAG_AN_ON 0x02
+#define I40E_AQ_PHY_LINK_ENABLED 0x08
+#define I40E_AQ_PHY_AN_ENABLED 0x10
#define I40E_AQ_PHY_FLAG_MODULE_QUAL 0x20
__le16 eee_capability;
#define I40E_AQ_EEE_100BASE_TX 0x0002
@@ -1696,6 +1698,7 @@ struct i40e_aqc_get_link_status {
#define I40E_AQ_LINK_TX_ACTIVE 0x00
#define I40E_AQ_LINK_TX_DRAINED 0x01
#define I40E_AQ_LINK_TX_FLUSHED 0x03
+#define I40E_AQ_LINK_FORCED_40G 0x10
u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
__le16 max_frame_size;
u8 config;
@@ -1747,14 +1750,21 @@ struct i40e_aqc_set_lb_mode {
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_lb_mode);
-/* Set PHY Reset command (0x0622) */
-struct i40e_aqc_set_phy_reset {
- u8 reset_flags;
-#define I40E_AQ_PHY_RESET_REQUEST 0x02
+/* Set PHY Debug command (0x0622) */
+struct i40e_aqc_set_phy_debug {
+ u8 command_flags;
+#define I40E_AQ_PHY_DEBUG_RESET_INTERNAL 0x02
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT 2
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_MASK (0x03 << \
+ I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SHIFT)
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE 0x00
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01
+#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02
+#define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10
u8 reserved[15];
};
-I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_reset);
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_phy_debug);
enum i40e_aq_phy_reg_type {
I40E_AQC_PHY_REG_INTERNAL = 0x1,
@@ -1779,6 +1789,47 @@ struct i40e_aqc_nvm_update {
I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_update);
+/* NVM Config Read (indirect 0x0704) */
+struct i40e_aqc_nvm_config_read {
+ __le16 cmd_flags;
+#define ANVM_SINGLE_OR_MULTIPLE_FEATURES_MASK 1
+#define ANVM_READ_SINGLE_FEATURE 0
+#define ANVM_READ_MULTIPLE_FEATURES 1
+ __le16 element_count;
+ __le16 element_id; /* Feature/field ID */
+ u8 reserved[2];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_read);
+
+/* NVM Config Write (indirect 0x0705) */
+struct i40e_aqc_nvm_config_write {
+ __le16 cmd_flags;
+ __le16 element_count;
+ u8 reserved[4];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write);
+
+struct i40e_aqc_nvm_config_data_feature {
+ __le16 feature_id;
+ __le16 instance_id;
+ __le16 feature_options;
+ __le16 feature_selection;
+};
+
+struct i40e_aqc_nvm_config_data_immediate_field {
+#define ANVM_FEATURE_OR_IMMEDIATE_MASK 0x2
+ __le16 field_id;
+ __le16 instance_id;
+ __le16 field_options;
+ __le16 field_value;
+};
+
/* Send to PF command (indirect 0x0801) id is only used by PF
* Send to VF command (indirect 0x0802) id is only used by PF
* Send to Peer PF command (indirect 0x0803)
@@ -1948,19 +1999,12 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_start);
/* Add Udp Tunnel command and completion (direct 0x0B00) */
struct i40e_aqc_add_udp_tunnel {
__le16 udp_port;
- u8 header_len; /* in DWords, 1 to 15 */
+ u8 reserved0[3];
u8 protocol_type;
-#define I40E_AQC_TUNNEL_TYPE_TEREDO 0x0
-#define I40E_AQC_TUNNEL_TYPE_VXLAN 0x2
-#define I40E_AQC_TUNNEL_TYPE_NGE 0x3
- u8 variable_udp_length;
-#define I40E_AQC_TUNNEL_FIXED_UDP_LENGTH 0x0
-#define I40E_AQC_TUNNEL_VARIABLE_UDP_LENGTH 0x1
- u8 udp_key_index;
-#define I40E_AQC_TUNNEL_KEY_INDEX_VXLAN 0x0
-#define I40E_AQC_TUNNEL_KEY_INDEX_NGE 0x1
-#define I40E_AQC_TUNNEL_KEY_INDEX_PROPRIETARY_UDP 0x2
- u8 reserved[10];
+#define I40E_AQC_TUNNEL_TYPE_VXLAN 0x00
+#define I40E_AQC_TUNNEL_TYPE_NGE 0x01
+#define I40E_AQC_TUNNEL_TYPE_TEREDO 0x10
+ u8 reserved1[10];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_alloc.h b/drivers/net/ethernet/intel/i40evf/i40e_alloc.h
index d8654fb..8e6a6dd 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_alloc.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_alloc.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_common.c b/drivers/net/ethernet/intel/i40evf/i40e_common.c
index ae08437..a43155a 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_common.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_common.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
@@ -40,12 +43,10 @@ i40e_status i40e_set_mac_type(struct i40e_hw *hw)
if (hw->vendor_id == PCI_VENDOR_ID_INTEL) {
switch (hw->device_id) {
case I40E_DEV_ID_SFP_XL710:
- case I40E_DEV_ID_SFP_X710:
case I40E_DEV_ID_QEMU:
case I40E_DEV_ID_KX_A:
case I40E_DEV_ID_KX_B:
case I40E_DEV_ID_KX_C:
- case I40E_DEV_ID_KX_D:
case I40E_DEV_ID_QSFP_A:
case I40E_DEV_ID_QSFP_B:
case I40E_DEV_ID_QSFP_C:
@@ -130,7 +131,11 @@ void i40evf_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
**/
bool i40evf_check_asq_alive(struct i40e_hw *hw)
{
- return !!(rd32(hw, hw->aq.asq.len) & I40E_PF_ATQLEN_ATQENABLE_MASK);
+ if (hw->aq.asq.len)
+ return !!(rd32(hw, hw->aq.asq.len) &
+ I40E_PF_ATQLEN_ATQENABLE_MASK);
+ else
+ return false;
}
/**
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
index cb97b3e..a2ad9a4 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_hmc.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
@@ -160,11 +163,6 @@ struct i40e_hmc_info {
(((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
-#define I40E_INVALIDATE_VF_HMC_PD(hw, sd_idx, pd_idx, hmc_fn_id) \
- wr32((hw), I40E_GLHMC_VFPDINV((hmc_fn_id) - I40E_FIRST_VF_FPM_ID), \
- (((sd_idx) << I40E_PFHMC_PDINV_PMSDIDX_SHIFT) | \
- ((pd_idx) << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)))
-
/**
* I40E_FIND_SD_INDEX_LIMIT - finds segment descriptor index limit
* @hmc_info: pointer to the HMC configuration information structure
@@ -223,7 +221,7 @@ i40e_status i40e_add_pd_table_entry(struct i40e_hw *hw,
u32 pd_index);
i40e_status i40e_remove_pd_bp(struct i40e_hw *hw,
struct i40e_hmc_info *hmc_info,
- u32 idx, bool is_pf);
+ u32 idx);
i40e_status i40e_prep_remove_sd_bp(struct i40e_hmc_info *hmc_info,
u32 idx);
i40e_status i40e_remove_sd_bp_new(struct i40e_hw *hw,
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h b/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h
index 17e42ca..d6f7622 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
@@ -53,6 +56,7 @@ struct i40e_hmc_obj_rxq {
u8 tphdata_ena;
u8 tphhead_ena;
u8 lrxqthresh;
+ u8 prefena; /* NOTE: normally must be set to 1 at init */
};
/* Tx queue context data */
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_osdep.h b/drivers/net/ethernet/intel/i40evf/i40e_osdep.h
index 622f373..21a91b1 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_osdep.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_osdep.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
index 97ab8c2..849edcc 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_prototype.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_register.h b/drivers/net/ethernet/intel/i40evf/i40e_register.h
index 30af953..3698396 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_register.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_register.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
@@ -1337,8 +1340,6 @@
#define I40E_PFINT_ICR0_GPIO_MASK (0x1 << I40E_PFINT_ICR0_GPIO_SHIFT)
#define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23
#define I40E_PFINT_ICR0_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_TIMESYNC_SHIFT)
-#define I40E_PFINT_ICR0_STORM_DETECT_SHIFT 24
-#define I40E_PFINT_ICR0_STORM_DETECT_MASK (0x1 << I40E_PFINT_ICR0_STORM_DETECT_SHIFT)
#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
#define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26
@@ -1364,8 +1365,6 @@
#define I40E_PFINT_ICR0_ENA_GPIO_MASK (0x1 << I40E_PFINT_ICR0_ENA_GPIO_SHIFT)
#define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23
#define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT)
-#define I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT 24
-#define I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK (0x1 << I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT)
#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
#define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26
@@ -1586,6 +1585,14 @@
#define I40E_GLLAN_TSOMSK_M 0x000442DC
#define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0
#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK (0xFFF << I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS(_i) (0x000E6500 + ((_i) * 4)) /* i=0..11 */
+#define I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT 0
+#define I40E_GLLAN_TXPRE_QDIS_QINDX_MASK (0x7FF << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30
+#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK (0x1 << I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31
+#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK (0x1 << I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
+
#define I40E_PFLAN_QALLOC 0x001C0400
#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0
#define I40E_PFLAN_QALLOC_FIRSTQ_MASK (0x7FF << I40E_PFLAN_QALLOC_FIRSTQ_SHIFT)
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_status.h b/drivers/net/ethernet/intel/i40evf/i40e_status.h
index 7c08cc2..7fa7a41 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_status.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_status.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index b9f50f4..48ebb6c 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -12,6 +12,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
@@ -725,10 +728,12 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
u32 rx_error,
u16 rx_ptype)
{
+ struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(rx_ptype);
+ bool ipv4 = false, ipv6 = false;
bool ipv4_tunnel, ipv6_tunnel;
__wsum rx_udp_csum;
- __sum16 csum;
struct iphdr *iph;
+ __sum16 csum;
ipv4_tunnel = (rx_ptype > I40E_RX_PTYPE_GRENAT4_MAC_PAY3) &&
(rx_ptype < I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4);
@@ -739,29 +744,57 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
skb->ip_summed = CHECKSUM_NONE;
/* Rx csum enabled and ip headers found? */
- if (!(vsi->netdev->features & NETIF_F_RXCSUM &&
- rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
+ if (!(vsi->netdev->features & NETIF_F_RXCSUM))
return;
+ /* did the hardware decode the packet and checksum? */
+ if (!(rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
+ return;
+
+ /* both known and outer_ip must be set for the below code to work */
+ if (!(decoded.known && decoded.outer_ip))
+ return;
+
+ if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
+ decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4)
+ ipv4 = true;
+ else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP &&
+ decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6)
+ ipv6 = true;
+
+ if (ipv4 &&
+ (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
+ (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))))
+ goto checksum_fail;
+
/* likely incorrect csum if alternate IP extension headers found */
- if (rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
+ if (ipv6 &&
+ decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_TCP &&
+ rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT) &&
+ rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
+ /* don't increment checksum err here, non-fatal err */
return;
- /* IP or L4 or outmost IP checksum error */
- if (rx_error & ((1 << I40E_RX_DESC_ERROR_IPE_SHIFT) |
- (1 << I40E_RX_DESC_ERROR_L4E_SHIFT) |
- (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))) {
- vsi->back->hw_csum_rx_error++;
+ /* there was some L4 error, count error and punt packet to the stack */
+ if (rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))
+ goto checksum_fail;
+
+ /* handle packets that were not able to be checksummed due
+ * to arrival speed, in this case the stack can compute
+ * the csum.
+ */
+ if (rx_error & (1 << I40E_RX_DESC_ERROR_PPRS_SHIFT))
return;
- }
+ /* If VXLAN traffic has an outer UDPv4 checksum we need to check
+ * it in the driver, hardware does not do it for us.
+ * Since L3L4P bit was set we assume a valid IHL value (>=5)
+ * so the total length of IPv4 header is IHL*4 bytes
+ * The UDP_0 bit *may* bet set if the *inner* header is UDP
+ */
if (ipv4_tunnel &&
+ (decoded.inner_prot != I40E_RX_PTYPE_INNER_PROT_UDP) &&
!(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) {
- /* If VXLAN traffic has an outer UDPv4 checksum we need to check
- * it in the driver, hardware does not do it for us.
- * Since L3L4P bit was set we assume a valid IHL value (>=5)
- * so the total length of IPv4 header is IHL*4 bytes
- */
skb->transport_header = skb->mac_header +
sizeof(struct ethhdr) +
(ip_hdr(skb)->ihl * 4);
@@ -778,13 +811,16 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
(skb->len - skb_transport_offset(skb)),
IPPROTO_UDP, rx_udp_csum);
- if (udp_hdr(skb)->check != csum) {
- vsi->back->hw_csum_rx_error++;
- return;
- }
+ if (udp_hdr(skb)->check != csum)
+ goto checksum_fail;
}
skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ return;
+
+checksum_fail:
+ vsi->back->hw_csum_rx_error++;
}
/**
@@ -953,6 +989,9 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
/* ERR_MASK will only have valid bits if EOP set */
if (unlikely(rx_error & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) {
dev_kfree_skb_any(skb);
+ /* TODO: shouldn't we increment a counter indicating the
+ * drop?
+ */
goto next_desc;
}
@@ -1508,9 +1547,7 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
static int i40e_xmit_descriptor_count(struct sk_buff *skb,
struct i40e_ring *tx_ring)
{
-#if PAGE_SIZE > I40E_MAX_DATA_PER_TXD
unsigned int f;
-#endif
int count = 0;
/* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
@@ -1519,12 +1556,9 @@ static int i40e_xmit_descriptor_count(struct sk_buff *skb,
* + 1 desc for context descriptor,
* otherwise try next time
*/
-#if PAGE_SIZE > I40E_MAX_DATA_PER_TXD
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
-#else
- count += skb_shinfo(skb)->nr_frags;
-#endif
+
count += TXD_USE_COUNT(skb_headlen(skb));
if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
tx_ring->tx_stats.tx_busy++;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
index 10bf49e..30d248b 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
@@ -24,7 +27,7 @@
#ifndef _I40E_TXRX_H_
#define _I40E_TXRX_H_
-/* Interrupt Throttling and Rate Limiting (storm control) Goodies */
+/* Interrupt Throttling and Rate Limiting Goodies */
#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */
#define I40E_MIN_ITR 0x0004 /* reg uses 2 usec resolution */
@@ -66,16 +69,11 @@ enum i40e_dyn_idx_t {
/* Supported RSS offloads */
#define I40E_DEFAULT_RSS_HENA ( \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN) | \
((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
@@ -119,11 +117,11 @@ enum i40e_dyn_idx_t {
#define i40e_rx_desc i40e_32byte_rx_desc
#define I40E_MIN_TX_LEN 17
-#define I40E_MAX_DATA_PER_TXD 16383 /* aka 16kB - 1 */
+#define I40E_MAX_DATA_PER_TXD 8192
/* Tx Descriptors needed, worst case */
#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), I40E_MAX_DATA_PER_TXD)
-#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4)
+#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
#define I40E_TX_FLAGS_CSUM (u32)(1)
#define I40E_TX_FLAGS_HW_VLAN (u32)(1 << 1)
@@ -180,7 +178,6 @@ enum i40e_ring_state_t {
__I40E_TX_DETECT_HANG,
__I40E_HANG_CHECK_ARMED,
__I40E_RX_PS_ENABLED,
- __I40E_RX_LRO_ENABLED,
__I40E_RX_16BYTE_DESC_ENABLED,
};
@@ -196,12 +193,6 @@ enum i40e_ring_state_t {
set_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
#define clear_check_for_tx_hang(ring) \
clear_bit(__I40E_TX_DETECT_HANG, &(ring)->state)
-#define ring_is_lro_enabled(ring) \
- test_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
-#define set_ring_lro_enabled(ring) \
- set_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
-#define clear_ring_lro_enabled(ring) \
- clear_bit(__I40E_RX_LRO_ENABLED, &(ring)->state)
#define ring_is_16byte_desc_enabled(ring) \
test_bit(__I40E_RX_16BYTE_DESC_ENABLED, &(ring)->state)
#define set_ring_16byte_desc_enabled(ring) \
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index 4673b33..d3cf5a6 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -12,6 +12,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
@@ -32,13 +35,11 @@
#include "i40e_lan_hmc.h"
/* Device IDs */
-#define I40E_DEV_ID_SFP_XL710 0x1572
-#define I40E_DEV_ID_SFP_X710 0x1573
+#define I40E_DEV_ID_SFP_XL710 0x1572
#define I40E_DEV_ID_QEMU 0x1574
#define I40E_DEV_ID_KX_A 0x157F
#define I40E_DEV_ID_KX_B 0x1580
#define I40E_DEV_ID_KX_C 0x1581
-#define I40E_DEV_ID_KX_D 0x1582
#define I40E_DEV_ID_QSFP_A 0x1583
#define I40E_DEV_ID_QSFP_B 0x1584
#define I40E_DEV_ID_QSFP_C 0x1585
@@ -57,8 +58,8 @@
/* Max default timeout in ms, */
#define I40E_MAX_NVM_TIMEOUT 18000
-/* Switch from mc to the 2usec global time (this is the GTIME resolution) */
-#define I40E_MS_TO_GTIME(time) (((time) * 1000) / 2)
+/* Switch from ms to the 1usec global time (this is the GTIME resolution) */
+#define I40E_MS_TO_GTIME(time) ((time) * 1000)
/* forward declaration */
struct i40e_hw;
@@ -101,15 +102,6 @@ enum i40e_debug_mask {
I40E_DEBUG_ALL = 0xFFFFFFFF
};
-/* PCI Bus Info */
-#define I40E_PCI_LINK_WIDTH_1 0x10
-#define I40E_PCI_LINK_WIDTH_2 0x20
-#define I40E_PCI_LINK_WIDTH_4 0x40
-#define I40E_PCI_LINK_WIDTH_8 0x80
-#define I40E_PCI_LINK_SPEED_2500 0x1
-#define I40E_PCI_LINK_SPEED_5000 0x2
-#define I40E_PCI_LINK_SPEED_8000 0x3
-
/* These are structs for managing the hardware information and the operations.
* The structures of function pointers are filled out at init time when we
* know for sure exactly which hardware we're working with. This gives us the
@@ -173,6 +165,9 @@ struct i40e_link_status {
u8 loopback;
/* is Link Status Event notification to SW enabled */
bool lse_enable;
+ u16 max_frame_size;
+ bool crc_enable;
+ u8 pacing;
};
struct i40e_phy_info {
@@ -415,6 +410,7 @@ struct i40e_driver_version {
u8 minor_version;
u8 build_version;
u8 subbuild_version;
+ u8 driver_string[32];
};
/* RX Descriptors */
@@ -494,9 +490,6 @@ union i40e_32byte_rx_desc {
} wb; /* writeback */
};
-#define I40E_RXD_QW1_STATUS_SHIFT 0
-#define I40E_RXD_QW1_STATUS_MASK (0x7FFFUL << I40E_RXD_QW1_STATUS_SHIFT)
-
enum i40e_rx_desc_status_bits {
/* Note: These are predefined bit offsets */
I40E_RX_DESC_STATUS_DD_SHIFT = 0,
@@ -513,9 +506,14 @@ enum i40e_rx_desc_status_bits {
I40E_RX_DESC_STATUS_LPBK_SHIFT = 14,
I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
I40E_RX_DESC_STATUS_RESERVED_SHIFT = 16, /* 2 BITS */
- I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18
+ I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18,
+ I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */
};
+#define I40E_RXD_QW1_STATUS_SHIFT 0
+#define I40E_RXD_QW1_STATUS_MASK (((1 << I40E_RX_DESC_STATUS_LAST) - 1) \
+ << I40E_RXD_QW1_STATUS_SHIFT)
+
#define I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT I40E_RX_DESC_STATUS_TSYNINDX_SHIFT
#define I40E_RXD_QW1_STATUS_TSYNINDX_MASK (0x3UL << \
I40E_RXD_QW1_STATUS_TSYNINDX_SHIFT)
@@ -543,7 +541,8 @@ enum i40e_rx_desc_error_bits {
I40E_RX_DESC_ERROR_IPE_SHIFT = 3,
I40E_RX_DESC_ERROR_L4E_SHIFT = 4,
I40E_RX_DESC_ERROR_EIPE_SHIFT = 5,
- I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6
+ I40E_RX_DESC_ERROR_OVERSIZE_SHIFT = 6,
+ I40E_RX_DESC_ERROR_PPRS_SHIFT = 7
};
enum i40e_rx_desc_error_l3l4e_fcoe_masks {
@@ -664,7 +663,6 @@ enum i40e_rx_desc_ext_status_bits {
I40E_RX_DESC_EXT_STATUS_L2TAG3P_SHIFT = 1,
I40E_RX_DESC_EXT_STATUS_FLEXBL_SHIFT = 2, /* 2 BITS */
I40E_RX_DESC_EXT_STATUS_FLEXBH_SHIFT = 4, /* 2 BITS */
- I40E_RX_DESC_EXT_STATUS_FTYPE_SHIFT = 6, /* 3 BITS */
I40E_RX_DESC_EXT_STATUS_FDLONGB_SHIFT = 9,
I40E_RX_DESC_EXT_STATUS_FCOELONGB_SHIFT = 10,
I40E_RX_DESC_EXT_STATUS_PELONGB_SHIFT = 11,
@@ -868,18 +866,14 @@ struct i40e_filter_program_desc {
/* Packet Classifier Types for filters */
enum i40e_filter_pctype {
- /* Note: Values 0-28 are reserved for future use */
- I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
- I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
+ /* Note: Values 0-30 are reserved for future use */
I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
- I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN = 32,
+ /* Note: Value 32 is reserved for future use */
I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
I40E_FILTER_PCTYPE_FRAG_IPV4 = 36,
- /* Note: Values 37-38 are reserved for future use */
- I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
- I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
+ /* Note: Values 37-40 are reserved for future use */
I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN = 42,
I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
@@ -961,6 +955,16 @@ struct i40e_vsi_context {
struct i40e_aqc_vsi_properties_data info;
};
+struct i40e_veb_context {
+ u16 seid;
+ u16 uplink_seid;
+ u16 veb_number;
+ u16 vebs_allocated;
+ u16 vebs_unallocated;
+ u16 flags;
+ struct i40e_aqc_get_veb_parameters_completion info;
+};
+
/* Statistics collected by each port, VSI, VEB, and S-channel */
struct i40e_eth_stats {
u64 rx_bytes; /* gorc */
@@ -968,8 +972,6 @@ struct i40e_eth_stats {
u64 rx_multicast; /* mprc */
u64 rx_broadcast; /* bprc */
u64 rx_discards; /* rdpc */
- u64 rx_errors; /* repc */
- u64 rx_missed; /* rmpc */
u64 rx_unknown_protocol; /* rupp */
u64 tx_bytes; /* gotc */
u64 tx_unicast; /* uptc */
@@ -1021,9 +1023,12 @@ struct i40e_hw_port_stats {
u64 tx_size_big; /* ptc9522 */
u64 mac_short_packet_dropped; /* mspdc */
u64 checksum_error; /* xec */
+ /* flow director stats */
+ u64 fd_atr_match;
+ u64 fd_sb_match;
/* EEE LPI */
- bool tx_lpi_status;
- bool rx_lpi_status;
+ u32 tx_lpi_status;
+ u32 rx_lpi_status;
u64 tx_lpi_count; /* etlpic */
u64 rx_lpi_count; /* erlpic */
};
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
index ccf45d0..cd18d56 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_virtchnl.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -12,6 +12,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
@@ -338,10 +341,6 @@ struct i40e_virtchnl_pf_event {
int severity;
};
-/* The following are TBD, not necessary for LAN functionality.
- * I40E_VIRTCHNL_OP_FCOE
- */
-
/* VF reset states - these are written into the RSTAT register:
* I40E_VFGEN_RSTAT1 on the PF
* I40E_VFGEN_RSTAT on the VF
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index 807807d..30ef519 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -12,6 +12,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
@@ -77,7 +80,7 @@ struct i40e_vsi {
#define I40EVF_MIN_TXD 64
#define I40EVF_MAX_RXD 4096
#define I40EVF_MIN_RXD 64
-#define I40EVF_REQ_DESCRIPTOR_MULTIPLE 8
+#define I40EVF_REQ_DESCRIPTOR_MULTIPLE 32
/* Supported Rx Buffer Sizes */
#define I40EVF_RXBUFFER_64 64 /* Used for packet split */
@@ -193,10 +196,12 @@ struct i40evf_adapter {
struct i40e_ring *tx_rings[I40E_MAX_VSI_QP];
u32 tx_timeout_count;
struct list_head mac_filter_list;
+ u32 tx_desc_count;
/* RX */
struct i40e_ring *rx_rings[I40E_MAX_VSI_QP];
u64 hw_csum_rx_error;
+ u32 rx_desc_count;
int num_msix_vectors;
struct msix_entry *msix_entries;
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
index 8b0db1c..60407a9 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -12,6 +12,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
@@ -44,8 +47,6 @@ static const struct i40evf_stats i40evf_gstrings_stats[] = {
I40EVF_STAT("rx_multicast", current_stats.rx_multicast),
I40EVF_STAT("rx_broadcast", current_stats.rx_broadcast),
I40EVF_STAT("rx_discards", current_stats.rx_discards),
- I40EVF_STAT("rx_errors", current_stats.rx_errors),
- I40EVF_STAT("rx_missed", current_stats.rx_missed),
I40EVF_STAT("rx_unknown_protocol", current_stats.rx_unknown_protocol),
I40EVF_STAT("tx_bytes", current_stats.tx_bytes),
I40EVF_STAT("tx_unicast", current_stats.tx_unicast),
@@ -56,10 +57,12 @@ static const struct i40evf_stats i40evf_gstrings_stats[] = {
};
#define I40EVF_GLOBAL_STATS_LEN ARRAY_SIZE(i40evf_gstrings_stats)
-#define I40EVF_QUEUE_STATS_LEN \
+#define I40EVF_QUEUE_STATS_LEN(_dev) \
(((struct i40evf_adapter *) \
- netdev_priv(netdev))->vsi_res->num_queue_pairs * 4)
-#define I40EVF_STATS_LEN (I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN)
+ netdev_priv(_dev))->vsi_res->num_queue_pairs \
+ * 2 * (sizeof(struct i40e_queue_stats) / sizeof(u64)))
+#define I40EVF_STATS_LEN(_dev) \
+ (I40EVF_GLOBAL_STATS_LEN + I40EVF_QUEUE_STATS_LEN(_dev))
/**
* i40evf_get_settings - Get Link Speed and Duplex settings
@@ -75,7 +78,7 @@ static int i40evf_get_settings(struct net_device *netdev,
/* In the future the VF will be able to query the PF for
* some information - for now use a dummy value
*/
- ecmd->supported = SUPPORTED_10000baseT_Full;
+ ecmd->supported = 0;
ecmd->autoneg = AUTONEG_DISABLE;
ecmd->transceiver = XCVR_DUMMY1;
ecmd->port = PORT_NONE;
@@ -94,9 +97,9 @@ static int i40evf_get_settings(struct net_device *netdev,
static int i40evf_get_sset_count(struct net_device *netdev, int sset)
{
if (sset == ETH_SS_STATS)
- return I40EVF_STATS_LEN;
+ return I40EVF_STATS_LEN(netdev);
else
- return -ENOTSUPP;
+ return -EINVAL;
}
/**
@@ -219,13 +222,11 @@ static void i40evf_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
- struct i40e_ring *tx_ring = adapter->tx_rings[0];
- struct i40e_ring *rx_ring = adapter->rx_rings[0];
ring->rx_max_pending = I40EVF_MAX_RXD;
ring->tx_max_pending = I40EVF_MAX_TXD;
- ring->rx_pending = rx_ring->count;
- ring->tx_pending = tx_ring->count;
+ ring->rx_pending = adapter->rx_desc_count;
+ ring->tx_pending = adapter->tx_desc_count;
}
/**
@@ -241,7 +242,6 @@ static int i40evf_set_ringparam(struct net_device *netdev,
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
u32 new_rx_count, new_tx_count;
- int i;
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL;
@@ -257,17 +257,16 @@ static int i40evf_set_ringparam(struct net_device *netdev,
new_rx_count = ALIGN(new_rx_count, I40EVF_REQ_DESCRIPTOR_MULTIPLE);
/* if nothing to do return success */
- if ((new_tx_count == adapter->tx_rings[0]->count) &&
- (new_rx_count == adapter->rx_rings[0]->count))
+ if ((new_tx_count == adapter->tx_desc_count) &&
+ (new_rx_count == adapter->rx_desc_count))
return 0;
- for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
- adapter->tx_rings[0]->count = new_tx_count;
- adapter->rx_rings[0]->count = new_rx_count;
- }
+ adapter->tx_desc_count = new_tx_count;
+ adapter->rx_desc_count = new_rx_count;
if (netif_running(netdev))
i40evf_reinit_locked(adapter);
+
return 0;
}
@@ -290,14 +289,13 @@ static int i40evf_get_coalesce(struct net_device *netdev,
ec->rx_max_coalesced_frames = vsi->work_limit;
if (ITR_IS_DYNAMIC(vsi->rx_itr_setting))
- ec->rx_coalesce_usecs = 1;
- else
- ec->rx_coalesce_usecs = vsi->rx_itr_setting;
+ ec->use_adaptive_rx_coalesce = 1;
if (ITR_IS_DYNAMIC(vsi->tx_itr_setting))
- ec->tx_coalesce_usecs = 1;
- else
- ec->tx_coalesce_usecs = vsi->tx_itr_setting;
+ ec->use_adaptive_tx_coalesce = 1;
+
+ ec->rx_coalesce_usecs = vsi->rx_itr_setting & ~I40E_ITR_DYNAMIC;
+ ec->tx_coalesce_usecs = vsi->tx_itr_setting & ~I40E_ITR_DYNAMIC;
return 0;
}
@@ -318,54 +316,361 @@ static int i40evf_set_coalesce(struct net_device *netdev,
struct i40e_q_vector *q_vector;
int i;
- if (ec->tx_max_coalesced_frames || ec->rx_max_coalesced_frames)
- vsi->work_limit = ec->tx_max_coalesced_frames;
+ if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
+ vsi->work_limit = ec->tx_max_coalesced_frames_irq;
+
+ if ((ec->rx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
+ (ec->rx_coalesce_usecs <= (I40E_MAX_ITR << 1)))
+ vsi->rx_itr_setting = ec->rx_coalesce_usecs;
+
+ else
+ return -EINVAL;
+
+ if ((ec->tx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
+ (ec->tx_coalesce_usecs <= (I40E_MAX_ITR << 1)))
+ vsi->tx_itr_setting = ec->tx_coalesce_usecs;
+ else if (ec->use_adaptive_tx_coalesce)
+ vsi->tx_itr_setting = (I40E_ITR_DYNAMIC |
+ ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
+ else
+ return -EINVAL;
+
+ if (ec->use_adaptive_rx_coalesce)
+ vsi->rx_itr_setting |= I40E_ITR_DYNAMIC;
+ else
+ vsi->rx_itr_setting &= ~I40E_ITR_DYNAMIC;
+
+ if (ec->use_adaptive_tx_coalesce)
+ vsi->tx_itr_setting |= I40E_ITR_DYNAMIC;
+ else
+ vsi->tx_itr_setting &= ~I40E_ITR_DYNAMIC;
- switch (ec->rx_coalesce_usecs) {
- case 0:
- vsi->rx_itr_setting = 0;
+ for (i = 0; i < adapter->num_msix_vectors - NONQ_VECS; i++) {
+ q_vector = adapter->q_vector[i];
+ q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
+ wr32(hw, I40E_VFINT_ITRN1(0, i), q_vector->rx.itr);
+ q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
+ wr32(hw, I40E_VFINT_ITRN1(1, i), q_vector->tx.itr);
+ i40e_flush(hw);
+ }
+
+ return 0;
+}
+
+/**
+ * i40e_get_rss_hash_opts - Get RSS hash Input Set for each flow type
+ * @adapter: board private structure
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns Success if the flow is supported, else Invalid Input.
+ **/
+static int i40evf_get_rss_hash_opts(struct i40evf_adapter *adapter,
+ struct ethtool_rxnfc *cmd)
+{
+ struct i40e_hw *hw = &adapter->hw;
+ u64 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
+ ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
+
+ /* We always hash on IP src and dest addresses */
+ cmd->data = RXH_IP_SRC | RXH_IP_DST;
+
+ switch (cmd->flow_type) {
+ case TCP_V4_FLOW:
+ if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP))
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break;
- case 1:
- vsi->rx_itr_setting = (I40E_ITR_DYNAMIC
- | ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
+ case UDP_V4_FLOW:
+ if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP))
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break;
- default:
- if ((ec->rx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
- (ec->rx_coalesce_usecs > (I40E_MAX_ITR << 1)))
- return -EINVAL;
- vsi->rx_itr_setting = ec->rx_coalesce_usecs;
+
+ case SCTP_V4_FLOW:
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case IPV4_FLOW:
+ break;
+
+ case TCP_V6_FLOW:
+ if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP))
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
break;
+ case UDP_V6_FLOW:
+ if (hena & ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP))
+ cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ break;
+
+ case SCTP_V6_FLOW:
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case IPV6_FLOW:
+ break;
+ default:
+ cmd->data = 0;
+ return -EINVAL;
}
- switch (ec->tx_coalesce_usecs) {
- case 0:
- vsi->tx_itr_setting = 0;
+ return 0;
+}
+
+/**
+ * i40evf_get_rxnfc - command to get RX flow classification rules
+ * @netdev: network interface device structure
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns Success if the command is supported.
+ **/
+static int i40evf_get_rxnfc(struct net_device *netdev,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = adapter->vsi_res->num_queue_pairs;
+ ret = 0;
break;
- case 1:
- vsi->tx_itr_setting = (I40E_ITR_DYNAMIC
- | ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
+ case ETHTOOL_GRXFH:
+ ret = i40evf_get_rss_hash_opts(adapter, cmd);
break;
default:
- if ((ec->tx_coalesce_usecs < (I40E_MIN_ITR << 1)) ||
- (ec->tx_coalesce_usecs > (I40E_MAX_ITR << 1)))
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * i40evf_set_rss_hash_opt - Enable/Disable flow types for RSS hash
+ * @adapter: board private structure
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns Success if the flow input set is supported.
+ **/
+static int i40evf_set_rss_hash_opt(struct i40evf_adapter *adapter,
+ struct ethtool_rxnfc *nfc)
+{
+ struct i40e_hw *hw = &adapter->hw;
+
+ u64 hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
+ ((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
+
+ /* RSS does not support anything other than hashing
+ * to queues on src and dst IPs and ports
+ */
+ if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST |
+ RXH_L4_B_0_1 | RXH_L4_B_2_3))
+ return -EINVAL;
+
+ /* We need at least the IP SRC and DEST fields for hashing */
+ if (!(nfc->data & RXH_IP_SRC) ||
+ !(nfc->data & RXH_IP_DST))
+ return -EINVAL;
+
+ switch (nfc->flow_type) {
+ case TCP_V4_FLOW:
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+ break;
+ default:
return -EINVAL;
- vsi->tx_itr_setting = ec->tx_coalesce_usecs;
+ }
+ break;
+ case TCP_V6_FLOW:
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ hena &= ~((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case UDP_V4_FLOW:
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4));
+ break;
+ default:
+ return -EINVAL;
+ }
break;
+ case UDP_V6_FLOW:
+ switch (nfc->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
+ case 0:
+ hena &= ~(((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+ break;
+ case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
+ hena |= (((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6));
+ break;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case AH_ESP_V4_FLOW:
+ case AH_V4_FLOW:
+ case ESP_V4_FLOW:
+ case SCTP_V4_FLOW:
+ if ((nfc->data & RXH_L4_B_0_1) ||
+ (nfc->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
+ break;
+ case AH_ESP_V6_FLOW:
+ case AH_V6_FLOW:
+ case ESP_V6_FLOW:
+ case SCTP_V6_FLOW:
+ if ((nfc->data & RXH_L4_B_0_1) ||
+ (nfc->data & RXH_L4_B_2_3))
+ return -EINVAL;
+ hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
+ break;
+ case IPV4_FLOW:
+ hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4);
+ break;
+ case IPV6_FLOW:
+ hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
+ ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
+ break;
+ default:
+ return -EINVAL;
}
- for (i = 0; i < adapter->num_msix_vectors - NONQ_VECS; i++) {
- q_vector = adapter->q_vector[i];
- q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
- wr32(hw, I40E_VFINT_ITRN1(0, i), q_vector->rx.itr);
- q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
- wr32(hw, I40E_VFINT_ITRN1(1, i), q_vector->tx.itr);
- i40e_flush(hw);
+ wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
+ wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
+ i40e_flush(hw);
+
+ return 0;
+}
+
+/**
+ * i40evf_set_rxnfc - command to set RX flow classification rules
+ * @netdev: network interface device structure
+ * @cmd: ethtool rxnfc command
+ *
+ * Returns Success if the command is supported.
+ **/
+static int i40evf_set_rxnfc(struct net_device *netdev,
+ struct ethtool_rxnfc *cmd)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ int ret = -EOPNOTSUPP;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_SRXFH:
+ ret = i40evf_set_rss_hash_opt(adapter, cmd);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+/**
+ * i40evf_get_channels: get the number of channels supported by the device
+ * @netdev: network interface device structure
+ * @ch: channel information structure
+ *
+ * For the purposes of our device, we only use combined channels, i.e. a tx/rx
+ * queue pair. Report one extra channel to match our "other" MSI-X vector.
+ **/
+static void i40evf_get_channels(struct net_device *netdev,
+ struct ethtool_channels *ch)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+
+ /* Report maximum channels */
+ ch->max_combined = adapter->vsi_res->num_queue_pairs;
+
+ ch->max_other = NONQ_VECS;
+ ch->other_count = NONQ_VECS;
+
+ ch->combined_count = adapter->vsi_res->num_queue_pairs;
+}
+
+/**
+ * i40evf_get_rxfh_indir_size - get the rx flow hash indirection table size
+ * @netdev: network interface device structure
+ *
+ * Returns the table size.
+ **/
+static u32 i40evf_get_rxfh_indir_size(struct net_device *netdev)
+{
+ return (I40E_VFQF_HLUT_MAX_INDEX + 1) * 4;
+}
+
+/**
+ * i40evf_get_rxfh - get the rx flow hash indirection table
+ * @netdev: network interface device structure
+ * @indir: indirection table
+ * @key: hash key (will be %NULL until get_rxfh_key_size is implemented)
+ *
+ * Reads the indirection table directly from the hardware. Always returns 0.
+ **/
+static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct i40e_hw *hw = &adapter->hw;
+ u32 hlut_val;
+ int i, j;
+
+ for (i = 0, j = 0; i < I40E_VFQF_HLUT_MAX_INDEX; i++) {
+ hlut_val = rd32(hw, I40E_VFQF_HLUT(i));
+ indir[j++] = hlut_val & 0xff;
+ indir[j++] = (hlut_val >> 8) & 0xff;
+ indir[j++] = (hlut_val >> 16) & 0xff;
+ indir[j++] = (hlut_val >> 24) & 0xff;
+ }
+ return 0;
+}
+
+/**
+ * i40evf_set_rxfh - set the rx flow hash indirection table
+ * @netdev: network interface device structure
+ * @indir: indirection table
+ * @key: hash key (will be %NULL until get_rxfh_key_size is implemented)
+ *
+ * Returns -EINVAL if the table specifies an inavlid queue id, otherwise
+ * returns 0 after programming the table.
+ **/
+static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir,
+ const u8 *key)
+{
+ struct i40evf_adapter *adapter = netdev_priv(netdev);
+ struct i40e_hw *hw = &adapter->hw;
+ u32 hlut_val;
+ int i, j;
+
+ for (i = 0, j = 0; i < I40E_VFQF_HLUT_MAX_INDEX + 1; i++) {
+ hlut_val = indir[j++];
+ hlut_val |= indir[j++] << 8;
+ hlut_val |= indir[j++] << 16;
+ hlut_val |= indir[j++] << 24;
+ wr32(hw, I40E_VFQF_HLUT(i), hlut_val);
}
return 0;
}
-static struct ethtool_ops i40evf_ethtool_ops = {
+static const struct ethtool_ops i40evf_ethtool_ops = {
.get_settings = i40evf_get_settings,
.get_drvinfo = i40evf_get_drvinfo,
.get_link = ethtool_op_get_link,
@@ -378,6 +683,12 @@ static struct ethtool_ops i40evf_ethtool_ops = {
.set_msglevel = i40evf_set_msglevel,
.get_coalesce = i40evf_get_coalesce,
.set_coalesce = i40evf_set_coalesce,
+ .get_rxnfc = i40evf_get_rxnfc,
+ .set_rxnfc = i40evf_set_rxnfc,
+ .get_rxfh_indir_size = i40evf_get_rxfh_indir_size,
+ .get_rxfh = i40evf_get_rxfh,
+ .set_rxfh = i40evf_set_rxfh,
+ .get_channels = i40evf_get_channels,
};
/**
@@ -389,5 +700,5 @@ static struct ethtool_ops i40evf_ethtool_ops = {
**/
void i40evf_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &i40evf_ethtool_ops);
+ netdev->ethtool_ops = &i40evf_ethtool_ops;
}
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index 2797548..7fc5f3b 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -12,6 +12,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
@@ -25,13 +28,15 @@
#include "i40e_prototype.h"
static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter);
static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter);
+static void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter);
+static void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter);
static int i40evf_close(struct net_device *netdev);
char i40evf_driver_name[] = "i40evf";
static const char i40evf_driver_string[] =
"Intel(R) XL710 X710 Virtual Function Network Driver";
-#define DRV_VERSION "0.9.16"
+#define DRV_VERSION "0.9.34"
const char i40evf_driver_version[] = DRV_VERSION;
static const char i40evf_copyright[] =
"Copyright (c) 2013 - 2014 Intel Corporation.";
@@ -167,7 +172,6 @@ static void i40evf_tx_timeout(struct net_device *netdev)
struct i40evf_adapter *adapter = netdev_priv(netdev);
adapter->tx_timeout_count++;
- dev_info(&adapter->pdev->dev, "TX timeout detected.\n");
if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) {
adapter->flags |= I40EVF_FLAG_RESET_NEEDED;
schedule_work(&adapter->reset_task);
@@ -657,12 +661,9 @@ i40evf_vlan_filter *i40evf_add_vlan(struct i40evf_adapter *adapter, u16 vlan)
f = i40evf_find_vlan(adapter, vlan);
if (NULL == f) {
f = kzalloc(sizeof(*f), GFP_ATOMIC);
- if (NULL == f) {
- dev_info(&adapter->pdev->dev,
- "%s: no memory for new VLAN filter\n",
- __func__);
+ if (NULL == f)
return NULL;
- }
+
f->vlan = vlan;
INIT_LIST_HEAD(&f->list);
@@ -688,7 +689,6 @@ static void i40evf_del_vlan(struct i40evf_adapter *adapter, u16 vlan)
f->remove = true;
adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
}
- return;
}
/**
@@ -767,14 +767,12 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,
if (NULL == f) {
f = kzalloc(sizeof(*f), GFP_ATOMIC);
if (NULL == f) {
- dev_info(&adapter->pdev->dev,
- "%s: no memory for new filter\n", __func__);
clear_bit(__I40EVF_IN_CRITICAL_TASK,
&adapter->crit_section);
return NULL;
}
- memcpy(f->macaddr, macaddr, ETH_ALEN);
+ ether_addr_copy(f->macaddr, macaddr);
list_add(&f->list, &adapter->mac_filter_list);
f->add = true;
@@ -807,9 +805,8 @@ static int i40evf_set_mac(struct net_device *netdev, void *p)
f = i40evf_add_filter(adapter, addr->sa_data);
if (f) {
- memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
- memcpy(netdev->dev_addr, adapter->hw.mac.addr,
- netdev->addr_len);
+ ether_addr_copy(hw->mac.addr, addr->sa_data);
+ ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
}
return (f == NULL) ? -ENOMEM : 0;
@@ -841,7 +838,7 @@ static void i40evf_set_rx_mode(struct net_device *netdev)
list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
bool found = false;
- if (f->macaddr[0] & 0x01) {
+ if (is_multicast_ether_addr(f->macaddr)) {
netdev_for_each_mc_addr(mca, netdev) {
if (ether_addr_equal(mca->addr, f->macaddr)) {
found = true;
@@ -970,6 +967,9 @@ void i40evf_down(struct i40evf_adapter *adapter)
struct net_device *netdev = adapter->netdev;
struct i40evf_mac_filter *f;
+ if (adapter->state == __I40EVF_DOWN)
+ return;
+
/* remove all MAC filters */
list_for_each_entry(f, &adapter->mac_filter_list, list) {
f->remove = true;
@@ -1027,30 +1027,21 @@ i40evf_acquire_msix_vectors(struct i40evf_adapter *adapter, int vectors)
* Right now, we simply care about how many we'll get; we'll
* set them up later while requesting irq's.
*/
- while (vectors >= vector_threshold) {
- err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
- vectors);
- if (!err) /* Success in acquiring all requested vectors. */
- break;
- else if (err < 0)
- vectors = 0; /* Nasty failure, quit now */
- else /* err == number of vectors we should try again with */
- vectors = err;
- }
-
- if (vectors < vector_threshold) {
- dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts.\n");
+ err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
+ vector_threshold, vectors);
+ if (err < 0) {
+ dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n");
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
- err = -EIO;
- } else {
- /* Adjust for only the vectors we'll use, which is minimum
- * of max_msix_q_vectors + NONQ_VECS, or the number of
- * vectors we were allocated.
- */
- adapter->num_msix_vectors = vectors;
+ return err;
}
- return err;
+
+ /* Adjust for only the vectors we'll use, which is minimum
+ * of max_msix_q_vectors + NONQ_VECS, or the number of
+ * vectors we were allocated.
+ */
+ adapter->num_msix_vectors = err;
+ return 0;
}
/**
@@ -1096,14 +1087,14 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
tx_ring->queue_index = i;
tx_ring->netdev = adapter->netdev;
tx_ring->dev = &adapter->pdev->dev;
- tx_ring->count = I40EVF_DEFAULT_TXD;
+ tx_ring->count = adapter->tx_desc_count;
adapter->tx_rings[i] = tx_ring;
rx_ring = &tx_ring[1];
rx_ring->queue_index = i;
rx_ring->netdev = adapter->netdev;
rx_ring->dev = &adapter->pdev->dev;
- rx_ring->count = I40EVF_DEFAULT_RXD;
+ rx_ring->count = adapter->rx_desc_count;
adapter->rx_rings[i] = rx_ring;
}
@@ -1141,9 +1132,6 @@ static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter)
v_budget = min_t(int, pairs, (int)(num_online_cpus() * 2)) + NONQ_VECS;
v_budget = min_t(int, v_budget, (int)adapter->vf_res->max_vectors);
- /* A failure in MSI-X entry allocation isn't fatal, but it does
- * mean we disable MSI-X capabilities of the adapter.
- */
adapter->msix_entries = kcalloc(v_budget,
sizeof(struct msix_entry), GFP_KERNEL);
if (!adapter->msix_entries) {
@@ -1183,7 +1171,7 @@ static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter)
q_vector->vsi = &adapter->vsi;
q_vector->v_idx = q_idx;
netif_napi_add(adapter->netdev, &q_vector->napi,
- i40evf_napi_poll, 64);
+ i40evf_napi_poll, NAPI_POLL_WEIGHT);
adapter->q_vector[q_idx] = q_vector;
}
@@ -1236,8 +1224,6 @@ void i40evf_reset_interrupt_capability(struct i40evf_adapter *adapter)
pci_disable_msix(adapter->pdev);
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
-
- return;
}
/**
@@ -1309,7 +1295,6 @@ static void i40evf_watchdog_task(struct work_struct *work)
goto restart_watchdog;
if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
- dev_info(&adapter->pdev->dev, "Checking for redemption\n");
if ((rd32(hw, I40E_VFGEN_RSTAT) & 0x3) == I40E_VFR_VFACTIVE) {
/* A chance for redemption! */
dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n");
@@ -1340,8 +1325,7 @@ static void i40evf_watchdog_task(struct work_struct *work)
(rd32(hw, I40E_VFGEN_RSTAT) & 0x3) != I40E_VFR_VFACTIVE) {
adapter->state = __I40EVF_RESETTING;
adapter->flags |= I40EVF_FLAG_RESET_PENDING;
- dev_err(&adapter->pdev->dev, "Hardware reset detected.\n");
- dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
+ dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
schedule_work(&adapter->reset_task);
adapter->aq_pending = 0;
adapter->aq_required = 0;
@@ -1413,7 +1397,7 @@ restart_watchdog:
}
/**
- * i40evf_configure_rss - increment to next available tx queue
+ * next_queue - increment to next available tx queue
* @adapter: board private structure
* @j: queue counter
*
@@ -1504,15 +1488,12 @@ static void i40evf_reset_task(struct work_struct *work)
for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
I40E_VFGEN_RSTAT_VFR_STATE_MASK;
- if (rstat_val != I40E_VFR_VFACTIVE) {
- dev_info(&adapter->pdev->dev, "Reset now occurring\n");
+ if (rstat_val != I40E_VFR_VFACTIVE)
break;
- } else {
+ else
msleep(I40EVF_RESET_WAIT_MS);
- }
}
if (i == I40EVF_RESET_WAIT_COUNT) {
- dev_err(&adapter->pdev->dev, "Reset was not detected\n");
adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
goto continue_reset; /* act like the reset happened */
}
@@ -1521,22 +1502,24 @@ static void i40evf_reset_task(struct work_struct *work)
for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
I40E_VFGEN_RSTAT_VFR_STATE_MASK;
- if (rstat_val == I40E_VFR_VFACTIVE) {
- dev_info(&adapter->pdev->dev, "Reset is complete. Reinitializing.\n");
+ if (rstat_val == I40E_VFR_VFACTIVE)
break;
- } else {
+ else
msleep(I40EVF_RESET_WAIT_MS);
- }
}
if (i == I40EVF_RESET_WAIT_COUNT) {
/* reset never finished */
- dev_err(&adapter->pdev->dev, "Reset never finished (%x). PF driver is dead, and so am I.\n",
+ dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
rstat_val);
adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
- if (netif_running(adapter->netdev))
- i40evf_close(adapter->netdev);
-
+ if (netif_running(adapter->netdev)) {
+ set_bit(__I40E_DOWN, &adapter->vsi.state);
+ i40evf_down(adapter);
+ i40evf_free_traffic_irqs(adapter);
+ i40evf_free_all_tx_resources(adapter);
+ i40evf_free_all_rx_resources(adapter);
+ }
i40evf_free_misc_irq(adapter);
i40evf_reset_interrupt_capability(adapter);
i40evf_free_queues(adapter);
@@ -1591,7 +1574,7 @@ continue_reset:
}
return;
reset_err:
- dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit.\n");
+ dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
i40evf_close(adapter->netdev);
}
@@ -1607,6 +1590,7 @@ static void i40evf_adminq_task(struct work_struct *work)
struct i40e_arq_event_info event;
struct i40e_virtchnl_msg *v_msg;
i40e_status ret;
+ u32 val, oldval;
u16 pending;
if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED)
@@ -1614,11 +1598,9 @@ static void i40evf_adminq_task(struct work_struct *work)
event.msg_size = I40EVF_MAX_AQ_BUF_SIZE;
event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
- if (!event.msg_buf) {
- dev_info(&adapter->pdev->dev, "%s: no memory for ARQ clean\n",
- __func__);
+ if (!event.msg_buf)
return;
- }
+
v_msg = (struct i40e_virtchnl_msg *)&event.desc;
do {
ret = i40evf_clean_arq_element(hw, &event, &pending);
@@ -1636,6 +1618,41 @@ static void i40evf_adminq_task(struct work_struct *work)
}
} while (pending);
+ /* check for error indications */
+ val = rd32(hw, hw->aq.arq.len);
+ oldval = val;
+ if (val & I40E_VF_ARQLEN_ARQVFE_MASK) {
+ dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n");
+ val &= ~I40E_VF_ARQLEN_ARQVFE_MASK;
+ }
+ if (val & I40E_VF_ARQLEN_ARQOVFL_MASK) {
+ dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n");
+ val &= ~I40E_VF_ARQLEN_ARQOVFL_MASK;
+ }
+ if (val & I40E_VF_ARQLEN_ARQCRIT_MASK) {
+ dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n");
+ val &= ~I40E_VF_ARQLEN_ARQCRIT_MASK;
+ }
+ if (oldval != val)
+ wr32(hw, hw->aq.arq.len, val);
+
+ val = rd32(hw, hw->aq.asq.len);
+ oldval = val;
+ if (val & I40E_VF_ATQLEN_ATQVFE_MASK) {
+ dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n");
+ val &= ~I40E_VF_ATQLEN_ATQVFE_MASK;
+ }
+ if (val & I40E_VF_ATQLEN_ATQOVFL_MASK) {
+ dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n");
+ val &= ~I40E_VF_ATQLEN_ATQOVFL_MASK;
+ }
+ if (val & I40E_VF_ATQLEN_ATQCRIT_MASK) {
+ dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n");
+ val &= ~I40E_VF_ATQLEN_ATQCRIT_MASK;
+ }
+ if (oldval != val)
+ wr32(hw, hw->aq.asq.len, val);
+
/* re-enable Admin queue interrupt cause */
i40evf_misc_irq_enable(adapter);
@@ -1673,6 +1690,7 @@ static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter)
int i, err = 0;
for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ adapter->tx_rings[i]->count = adapter->tx_desc_count;
err = i40evf_setup_tx_descriptors(adapter->tx_rings[i]);
if (!err)
continue;
@@ -1700,6 +1718,7 @@ static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter)
int i, err = 0;
for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ adapter->rx_rings[i]->count = adapter->rx_desc_count;
err = i40evf_setup_rx_descriptors(adapter->rx_rings[i]);
if (!err)
continue;
@@ -1804,12 +1823,11 @@ static int i40evf_close(struct net_device *netdev)
if (adapter->state <= __I40EVF_DOWN)
return 0;
- /* signal that we are down to the interrupt handler */
- adapter->state = __I40EVF_DOWN;
set_bit(__I40E_DOWN, &adapter->vsi.state);
i40evf_down(adapter);
+ adapter->state = __I40EVF_DOWN;
i40evf_free_traffic_irqs(adapter);
i40evf_free_all_tx_resources(adapter);
@@ -1848,8 +1866,6 @@ void i40evf_reinit_locked(struct i40evf_adapter *adapter)
WARN_ON(in_interrupt());
- adapter->state = __I40EVF_RESETTING;
-
i40evf_down(adapter);
/* allocate transmit descriptors */
@@ -1872,7 +1888,7 @@ void i40evf_reinit_locked(struct i40evf_adapter *adapter)
return;
err_reinit:
- dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit.\n");
+ dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
i40evf_close(netdev);
}
@@ -1967,7 +1983,7 @@ static void i40evf_init_task(struct work_struct *work)
}
err = i40evf_check_reset_complete(hw);
if (err) {
- dev_err(&pdev->dev, "Device is still in reset (%d)\n",
+ dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n",
err);
goto err;
}
@@ -1993,14 +2009,14 @@ static void i40evf_init_task(struct work_struct *work)
break;
case __I40EVF_INIT_VERSION_CHECK:
if (!i40evf_asq_done(hw)) {
- dev_err(&pdev->dev, "Admin queue command never completed.\n");
+ dev_err(&pdev->dev, "Admin queue command never completed\n");
goto err;
}
/* aq msg sent, awaiting reply */
err = i40evf_verify_api_ver(adapter);
if (err) {
- dev_err(&pdev->dev, "Unable to verify API version (%d)\n",
+ dev_info(&pdev->dev, "Unable to verify API version (%d), retrying\n",
err);
goto err;
}
@@ -2074,12 +2090,12 @@ static void i40evf_init_task(struct work_struct *work)
netdev->hw_features &= ~NETIF_F_RXCSUM;
if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
- dev_info(&pdev->dev, "Invalid MAC address %pMAC, using random\n",
+ dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
adapter->hw.mac.addr);
random_ether_addr(adapter->hw.mac.addr);
}
- memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
- memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
+ ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
+ ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
INIT_LIST_HEAD(&adapter->mac_filter_list);
INIT_LIST_HEAD(&adapter->vlan_filter_list);
@@ -2087,7 +2103,7 @@ static void i40evf_init_task(struct work_struct *work)
if (NULL == f)
goto err_sw_init;
- memcpy(f->macaddr, adapter->hw.mac.addr, ETH_ALEN);
+ ether_addr_copy(f->macaddr, adapter->hw.mac.addr);
f->add = true;
adapter->aq_required |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
@@ -2098,6 +2114,8 @@ static void i40evf_init_task(struct work_struct *work)
adapter->watchdog_timer.data = (unsigned long)adapter;
mod_timer(&adapter->watchdog_timer, jiffies + 1);
+ adapter->tx_desc_count = I40EVF_DEFAULT_TXD;
+ adapter->rx_desc_count = I40EVF_DEFAULT_RXD;
err = i40evf_init_interrupt_scheme(adapter);
if (err)
goto err_sw_init;
@@ -2114,8 +2132,10 @@ static void i40evf_init_task(struct work_struct *work)
adapter->vsi.back = adapter;
adapter->vsi.base_vector = 1;
adapter->vsi.work_limit = I40E_DEFAULT_IRQ_WORK;
- adapter->vsi.rx_itr_setting = I40E_ITR_DYNAMIC;
- adapter->vsi.tx_itr_setting = I40E_ITR_DYNAMIC;
+ adapter->vsi.rx_itr_setting = (I40E_ITR_DYNAMIC |
+ ITR_REG_TO_USEC(I40E_ITR_RX_DEF));
+ adapter->vsi.tx_itr_setting = (I40E_ITR_DYNAMIC |
+ ITR_REG_TO_USEC(I40E_ITR_TX_DEF));
adapter->vsi.netdev = adapter->netdev;
if (!adapter->netdev_registered) {
@@ -2128,7 +2148,7 @@ static void i40evf_init_task(struct work_struct *work)
netif_tx_stop_all_queues(netdev);
- dev_info(&pdev->dev, "MAC address: %pMAC\n", adapter->hw.mac.addr);
+ dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
if (netdev->features & NETIF_F_GRO)
dev_info(&pdev->dev, "GRO is enabled\n");
@@ -2152,12 +2172,11 @@ err_alloc:
err:
/* Things went into the weeds, so try again later */
if (++adapter->aq_wait_count > I40EVF_AQ_MAX_ERR) {
- dev_err(&pdev->dev, "Failed to communicate with PF; giving up.\n");
+ dev_err(&pdev->dev, "Failed to communicate with PF; giving up\n");
adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
return; /* do not reschedule */
}
schedule_delayed_work(&adapter->init_task, HZ * 3);
- return;
}
/**
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index e294f01..2dc0bac 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -12,6 +12,9 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
+ * You should have received a copy of the GNU General Public License along
+ * with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
@@ -216,11 +219,9 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter)
len = sizeof(struct i40e_virtchnl_vsi_queue_config_info) +
(sizeof(struct i40e_virtchnl_queue_pair_info) * pairs);
vqci = kzalloc(len, GFP_ATOMIC);
- if (!vqci) {
- dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
- __func__);
+ if (!vqci)
return;
- }
+
vqci->vsi_id = adapter->vsi_res->vsi_id;
vqci->num_queue_pairs = pairs;
vqpi = vqci->qpair;
@@ -232,6 +233,9 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter)
vqpi->txq.queue_id = i;
vqpi->txq.ring_len = adapter->tx_rings[i]->count;
vqpi->txq.dma_ring_addr = adapter->tx_rings[i]->dma;
+ vqpi->txq.headwb_enabled = 1;
+ vqpi->txq.dma_headwb_addr = vqpi->txq.dma_ring_addr +
+ (vqpi->txq.ring_len * sizeof(struct i40e_tx_desc));
vqpi->rxq.vsi_id = vqci->vsi_id;
vqpi->rxq.queue_id = i;
@@ -329,11 +333,8 @@ void i40evf_map_queues(struct i40evf_adapter *adapter)
(adapter->num_msix_vectors *
sizeof(struct i40e_virtchnl_vector_map));
vimi = kzalloc(len, GFP_ATOMIC);
- if (!vimi) {
- dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
- __func__);
+ if (!vimi)
return;
- }
vimi->num_vectors = adapter->num_msix_vectors;
/* Queue vectors first */
@@ -390,7 +391,7 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
len = sizeof(struct i40e_virtchnl_ether_addr_list) +
(count * sizeof(struct i40e_virtchnl_ether_addr));
if (len > I40EVF_MAX_AQ_BUF_SIZE) {
- dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request.\n",
+ dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request\n",
__func__);
count = (I40EVF_MAX_AQ_BUF_SIZE -
sizeof(struct i40e_virtchnl_ether_addr_list)) /
@@ -399,16 +400,14 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
}
veal = kzalloc(len, GFP_ATOMIC);
- if (!veal) {
- dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
- __func__);
+ if (!veal)
return;
- }
+
veal->vsi_id = adapter->vsi_res->vsi_id;
veal->num_elements = count;
list_for_each_entry(f, &adapter->mac_filter_list, list) {
if (f->add) {
- memcpy(veal->list[i].addr, f->macaddr, ETH_ALEN);
+ ether_addr_copy(veal->list[i].addr, f->macaddr);
i++;
f->add = false;
}
@@ -454,7 +453,7 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
len = sizeof(struct i40e_virtchnl_ether_addr_list) +
(count * sizeof(struct i40e_virtchnl_ether_addr));
if (len > I40EVF_MAX_AQ_BUF_SIZE) {
- dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request.\n",
+ dev_warn(&adapter->pdev->dev, "%s: Too many MAC address changes in one request\n",
__func__);
count = (I40EVF_MAX_AQ_BUF_SIZE -
sizeof(struct i40e_virtchnl_ether_addr_list)) /
@@ -462,16 +461,14 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
len = I40EVF_MAX_AQ_BUF_SIZE;
}
veal = kzalloc(len, GFP_ATOMIC);
- if (!veal) {
- dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
- __func__);
+ if (!veal)
return;
- }
+
veal->vsi_id = adapter->vsi_res->vsi_id;
veal->num_elements = count;
list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
if (f->remove) {
- memcpy(veal->list[i].addr, f->macaddr, ETH_ALEN);
+ ether_addr_copy(veal->list[i].addr, f->macaddr);
i++;
list_del(&f->list);
kfree(f);
@@ -518,7 +515,7 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
(count * sizeof(u16));
if (len > I40EVF_MAX_AQ_BUF_SIZE) {
- dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request.\n",
+ dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request\n",
__func__);
count = (I40EVF_MAX_AQ_BUF_SIZE -
sizeof(struct i40e_virtchnl_vlan_filter_list)) /
@@ -526,11 +523,9 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
len = I40EVF_MAX_AQ_BUF_SIZE;
}
vvfl = kzalloc(len, GFP_ATOMIC);
- if (!vvfl) {
- dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
- __func__);
+ if (!vvfl)
return;
- }
+
vvfl->vsi_id = adapter->vsi_res->vsi_id;
vvfl->num_elements = count;
list_for_each_entry(f, &adapter->vlan_filter_list, list) {
@@ -580,7 +575,7 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
len = sizeof(struct i40e_virtchnl_vlan_filter_list) +
(count * sizeof(u16));
if (len > I40EVF_MAX_AQ_BUF_SIZE) {
- dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request.\n",
+ dev_warn(&adapter->pdev->dev, "%s: Too many VLAN changes in one request\n",
__func__);
count = (I40EVF_MAX_AQ_BUF_SIZE -
sizeof(struct i40e_virtchnl_vlan_filter_list)) /
@@ -588,11 +583,9 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
len = I40EVF_MAX_AQ_BUF_SIZE;
}
vvfl = kzalloc(len, GFP_ATOMIC);
- if (!vvfl) {
- dev_err(&adapter->pdev->dev, "%s: unable to allocate memory\n",
- __func__);
+ if (!vvfl)
return;
- }
+
vvfl->vsi_id = adapter->vsi_res->vsi_id;
vvfl->num_elements = count;
list_for_each_entry_safe(f, ftmp, &adapter->vlan_filter_list, list) {
@@ -721,7 +714,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
return;
}
if (v_opcode != adapter->current_op) {
- dev_err(&adapter->pdev->dev, "%s: Pending op is %d, received %d.\n",
+ dev_err(&adapter->pdev->dev, "%s: Pending op is %d, received %d\n",
__func__, adapter->current_op, v_opcode);
/* We're probably completely screwed at this point, but clear
* the current op and try to carry on....
@@ -730,7 +723,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
return;
}
if (v_retval) {
- dev_err(&adapter->pdev->dev, "%s: PF returned error %d to our request %d!\n",
+ dev_err(&adapter->pdev->dev, "%s: PF returned error %d to our request %d\n",
__func__, v_retval, v_opcode);
}
switch (v_opcode) {
@@ -745,9 +738,8 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
stats->tx_broadcast;
adapter->net_stats.rx_bytes = stats->rx_bytes;
adapter->net_stats.tx_bytes = stats->tx_bytes;
- adapter->net_stats.rx_errors = stats->rx_errors;
adapter->net_stats.tx_errors = stats->tx_errors;
- adapter->net_stats.rx_dropped = stats->rx_missed;
+ adapter->net_stats.rx_dropped = stats->rx_discards;
adapter->net_stats.tx_dropped = stats->tx_discards;
adapter->current_stats = *stats;
}
@@ -781,7 +773,7 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
adapter->aq_pending &= ~(I40EVF_FLAG_AQ_MAP_VECTORS);
break;
default:
- dev_warn(&adapter->pdev->dev, "%s: Received unexpected message %d from PF.\n",
+ dev_warn(&adapter->pdev->dev, "%s: Received unexpected message %d from PF\n",
__func__, v_opcode);
break;
} /* switch v_opcode */
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index fa36fe12..a2db388 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
/* e1000_82575
* e1000_82576
@@ -73,9 +70,8 @@ static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw);
static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw);
static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw);
static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw);
-static const u16 e1000_82580_rxpbs_table[] =
- { 36, 72, 144, 1, 2, 4, 8, 16,
- 35, 70, 140 };
+static const u16 e1000_82580_rxpbs_table[] = {
+ 36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 };
/**
* igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO
@@ -159,7 +155,7 @@ static s32 igb_check_for_link_media_swap(struct e1000_hw *hw)
ret_val = igb_check_for_link_82575(hw);
}
- return E1000_SUCCESS;
+ return 0;
}
/**
@@ -526,7 +522,7 @@ out:
static s32 igb_get_invariants_82575(struct e1000_hw *hw)
{
struct e1000_mac_info *mac = &hw->mac;
- struct e1000_dev_spec_82575 * dev_spec = &hw->dev_spec._82575;
+ struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
s32 ret_val;
u32 ctrl_ext = 0;
u32 link_mode = 0;
@@ -1008,7 +1004,6 @@ out:
static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
{
struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val = 0;
u16 data;
data = rd32(E1000_82580_PHY_POWER_MGMT);
@@ -1032,7 +1027,7 @@ static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
data &= ~E1000_82580_PM_SPD; }
wr32(E1000_82580_PHY_POWER_MGMT, data);
- return ret_val;
+ return 0;
}
/**
@@ -1052,7 +1047,6 @@ static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active)
{
struct e1000_phy_info *phy = &hw->phy;
- s32 ret_val = 0;
u16 data;
data = rd32(E1000_82580_PHY_POWER_MGMT);
@@ -1077,7 +1071,7 @@ static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active)
}
wr32(E1000_82580_PHY_POWER_MGMT, data);
- return ret_val;
+ return 0;
}
/**
@@ -1180,8 +1174,8 @@ static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
{
u32 swfw_sync;
- while (igb_get_hw_semaphore(hw) != 0);
- /* Empty */
+ while (igb_get_hw_semaphore(hw) != 0)
+ ; /* Empty */
swfw_sync = rd32(E1000_SW_FW_SYNC);
swfw_sync &= ~mask;
@@ -1203,7 +1197,6 @@ static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
static s32 igb_get_cfg_done_82575(struct e1000_hw *hw)
{
s32 timeout = PHY_CFG_TIMEOUT;
- s32 ret_val = 0;
u32 mask = E1000_NVM_CFG_DONE_PORT_0;
if (hw->bus.func == 1)
@@ -1216,7 +1209,7 @@ static s32 igb_get_cfg_done_82575(struct e1000_hw *hw)
while (timeout) {
if (rd32(E1000_EEMNGCTL) & mask)
break;
- msleep(1);
+ usleep_range(1000, 2000);
timeout--;
}
if (!timeout)
@@ -1227,7 +1220,7 @@ static s32 igb_get_cfg_done_82575(struct e1000_hw *hw)
(hw->phy.type == e1000_phy_igp_3))
igb_phy_init_script_igp3(hw);
- return ret_val;
+ return 0;
}
/**
@@ -1269,7 +1262,7 @@ static s32 igb_check_for_link_82575(struct e1000_hw *hw)
if (hw->phy.media_type != e1000_media_type_copper) {
ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed,
- &duplex);
+ &duplex);
/* Use this flag to determine if link needs to be checked or
* not. If we have link clear the flag so that we do not
* continue to check for link.
@@ -1316,7 +1309,7 @@ void igb_power_up_serdes_link_82575(struct e1000_hw *hw)
/* flush the write to verify completion */
wrfl();
- msleep(1);
+ usleep_range(1000, 2000);
}
/**
@@ -1411,7 +1404,7 @@ void igb_shutdown_serdes_link_82575(struct e1000_hw *hw)
/* flush the write to verify completion */
wrfl();
- msleep(1);
+ usleep_range(1000, 2000);
}
}
@@ -1436,9 +1429,8 @@ static s32 igb_reset_hw_82575(struct e1000_hw *hw)
/* set the completion timeout for interface */
ret_val = igb_set_pcie_completion_timeout(hw);
- if (ret_val) {
+ if (ret_val)
hw_dbg("PCI-E Set completion timeout has failed.\n");
- }
hw_dbg("Masking off all interrupts\n");
wr32(E1000_IMC, 0xffffffff);
@@ -1447,7 +1439,7 @@ static s32 igb_reset_hw_82575(struct e1000_hw *hw)
wr32(E1000_TCTL, E1000_TCTL_PSP);
wrfl();
- msleep(10);
+ usleep_range(10000, 20000);
ctrl = rd32(E1000_CTRL);
@@ -1622,7 +1614,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
{
u32 ctrl_ext, ctrl_reg, reg, anadv_reg;
bool pcs_autoneg;
- s32 ret_val = E1000_SUCCESS;
+ s32 ret_val = 0;
u16 data;
if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
@@ -1676,7 +1668,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
hw->mac.type == e1000_82576) {
ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data);
if (ret_val) {
- printk(KERN_DEBUG "NVM Read Error\n\n");
+ hw_dbg(KERN_DEBUG "NVM Read Error\n\n");
return ret_val;
}
@@ -1689,7 +1681,7 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
* link either autoneg or be forced to 1000/Full
*/
ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD |
- E1000_CTRL_FD | E1000_CTRL_FRCDPX;
+ E1000_CTRL_FD | E1000_CTRL_FRCDPX;
/* set speed of 1000/Full if speed/duplex is forced */
reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL;
@@ -1925,7 +1917,7 @@ void igb_rx_fifo_flush_82575(struct e1000_hw *hw)
}
/* Poll all queues to verify they have shut down */
for (ms_wait = 0; ms_wait < 10; ms_wait++) {
- msleep(1);
+ usleep_range(1000, 2000);
rx_enabled = 0;
for (i = 0; i < 4; i++)
rx_enabled |= rd32(E1000_RXDCTL(i));
@@ -1953,7 +1945,7 @@ void igb_rx_fifo_flush_82575(struct e1000_hw *hw)
wr32(E1000_RCTL, temp_rctl);
wr32(E1000_RCTL, temp_rctl | E1000_RCTL_EN);
wrfl();
- msleep(2);
+ usleep_range(2000, 3000);
/* Enable RX queues that were previously enabled and restore our
* previous state
@@ -2005,14 +1997,14 @@ static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw)
* 16ms to 55ms
*/
ret_val = igb_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
- &pcie_devctl2);
+ &pcie_devctl2);
if (ret_val)
goto out;
pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms;
ret_val = igb_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
- &pcie_devctl2);
+ &pcie_devctl2);
out:
/* disable completion timeout resend */
gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND;
@@ -2241,7 +2233,7 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw)
wr32(E1000_TCTL, E1000_TCTL_PSP);
wrfl();
- msleep(10);
+ usleep_range(10000, 11000);
/* Determine whether or not a global dev reset is requested */
if (global_device_reset &&
@@ -2259,7 +2251,7 @@ static s32 igb_reset_hw_82580(struct e1000_hw *hw)
/* Add delay to insure DEV_RST has time to complete */
if (global_device_reset)
- msleep(5);
+ usleep_range(5000, 6000);
ret_val = igb_get_auto_rd_done(hw);
if (ret_val) {
@@ -2436,8 +2428,7 @@ static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw)
ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
if (ret_val) {
- hw_dbg("NVM Read Error while updating checksum"
- " compatibility bit.\n");
+ hw_dbg("NVM Read Error while updating checksum compatibility bit.\n");
goto out;
}
@@ -2447,8 +2438,7 @@ static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw)
ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1,
&nvm_data);
if (ret_val) {
- hw_dbg("NVM Write Error while updating checksum"
- " compatibility bit.\n");
+ hw_dbg("NVM Write Error while updating checksum compatibility bit.\n");
goto out;
}
}
@@ -2525,7 +2515,7 @@ out:
static s32 __igb_access_emi_reg(struct e1000_hw *hw, u16 address,
u16 *data, bool read)
{
- s32 ret_val = E1000_SUCCESS;
+ s32 ret_val = 0;
ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address);
if (ret_val)
@@ -2559,7 +2549,6 @@ s32 igb_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data)
**/
s32 igb_set_eee_i350(struct e1000_hw *hw)
{
- s32 ret_val = 0;
u32 ipcnfg, eeer;
if ((hw->mac.type < e1000_i350) ||
@@ -2593,7 +2582,7 @@ s32 igb_set_eee_i350(struct e1000_hw *hw)
rd32(E1000_EEER);
out:
- return ret_val;
+ return 0;
}
/**
@@ -2720,7 +2709,6 @@ static const u8 e1000_emc_therm_limit[4] = {
**/
static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
{
- s32 status = E1000_SUCCESS;
u16 ets_offset;
u16 ets_cfg;
u16 ets_sensor;
@@ -2738,7 +2726,7 @@ static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
/* Return the internal sensor only if ETS is unsupported */
hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset);
if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
- return status;
+ return 0;
hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg);
if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT)
@@ -2762,7 +2750,7 @@ static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
E1000_I2C_THERMAL_SENSOR_ADDR,
&data->sensor[i].temp);
}
- return status;
+ return 0;
}
/**
@@ -2774,7 +2762,6 @@ static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
**/
static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
{
- s32 status = E1000_SUCCESS;
u16 ets_offset;
u16 ets_cfg;
u16 ets_sensor;
@@ -2800,7 +2787,7 @@ static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
/* Return the internal sensor only if ETS is unsupported */
hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset);
if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
- return status;
+ return 0;
hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg);
if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT)
@@ -2831,7 +2818,7 @@ static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
low_thresh_delta;
}
}
- return status;
+ return 0;
}
#endif
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h
index 09d78be..b407c55 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.h
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.h
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_82575_H_
#define _E1000_82575_H_
@@ -37,9 +34,9 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr,
u8 data);
#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \
- (ID_LED_DEF1_DEF2 << 8) | \
- (ID_LED_DEF1_DEF2 << 4) | \
- (ID_LED_OFF1_ON2))
+ (ID_LED_DEF1_DEF2 << 8) | \
+ (ID_LED_DEF1_DEF2 << 4) | \
+ (ID_LED_OFF1_ON2))
#define E1000_RAR_ENTRIES_82575 16
#define E1000_RAR_ENTRIES_82576 24
@@ -67,16 +64,16 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr,
#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX 0x01000000
#define E1000_EICR_TX_QUEUE ( \
- E1000_EICR_TX_QUEUE0 | \
- E1000_EICR_TX_QUEUE1 | \
- E1000_EICR_TX_QUEUE2 | \
- E1000_EICR_TX_QUEUE3)
+ E1000_EICR_TX_QUEUE0 | \
+ E1000_EICR_TX_QUEUE1 | \
+ E1000_EICR_TX_QUEUE2 | \
+ E1000_EICR_TX_QUEUE3)
#define E1000_EICR_RX_QUEUE ( \
- E1000_EICR_RX_QUEUE0 | \
- E1000_EICR_RX_QUEUE1 | \
- E1000_EICR_RX_QUEUE2 | \
- E1000_EICR_RX_QUEUE3)
+ E1000_EICR_RX_QUEUE0 | \
+ E1000_EICR_RX_QUEUE1 | \
+ E1000_EICR_RX_QUEUE2 | \
+ E1000_EICR_RX_QUEUE3)
/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
#define E1000_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */
@@ -92,8 +89,7 @@ union e1000_adv_rx_desc {
struct {
struct {
__le16 pkt_info; /* RSS type, Packet type */
- __le16 hdr_info; /* Split Header,
- * header buffer length */
+ __le16 hdr_info; /* Split Head, buf len */
} lo_dword;
union {
__le32 rss; /* RSS Hash */
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index b05bf92..2a8bb35 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_DEFINES_H_
#define _E1000_DEFINES_H_
@@ -101,11 +98,11 @@
/* Same mask, but for extended and packet split descriptors */
#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
- E1000_RXDEXT_STATERR_CE | \
- E1000_RXDEXT_STATERR_SE | \
- E1000_RXDEXT_STATERR_SEQ | \
- E1000_RXDEXT_STATERR_CXE | \
- E1000_RXDEXT_STATERR_RXE)
+ E1000_RXDEXT_STATERR_CE | \
+ E1000_RXDEXT_STATERR_SE | \
+ E1000_RXDEXT_STATERR_SEQ | \
+ E1000_RXDEXT_STATERR_CXE | \
+ E1000_RXDEXT_STATERR_RXE)
#define E1000_MRQC_RSS_FIELD_IPV4_TCP 0x00010000
#define E1000_MRQC_RSS_FIELD_IPV4 0x00020000
@@ -307,39 +304,34 @@
#define E1000_TCTL_RTLC 0x01000000 /* Re-transmit on late collision */
/* DMA Coalescing register fields */
-#define E1000_DMACR_DMACWT_MASK 0x00003FFF /* DMA Coalescing
- * Watchdog Timer */
-#define E1000_DMACR_DMACTHR_MASK 0x00FF0000 /* DMA Coalescing Receive
- * Threshold */
+#define E1000_DMACR_DMACWT_MASK 0x00003FFF /* DMA Coal Watchdog Timer */
+#define E1000_DMACR_DMACTHR_MASK 0x00FF0000 /* DMA Coal Rx Threshold */
#define E1000_DMACR_DMACTHR_SHIFT 16
-#define E1000_DMACR_DMAC_LX_MASK 0x30000000 /* Lx when no PCIe
- * transactions */
+#define E1000_DMACR_DMAC_LX_MASK 0x30000000 /* Lx when no PCIe trans */
#define E1000_DMACR_DMAC_LX_SHIFT 28
#define E1000_DMACR_DMAC_EN 0x80000000 /* Enable DMA Coalescing */
/* DMA Coalescing BMC-to-OS Watchdog Enable */
#define E1000_DMACR_DC_BMC2OSW_EN 0x00008000
-#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF /* DMA Coalescing Transmit
- * Threshold */
+#define E1000_DMCTXTH_DMCTTHR_MASK 0x00000FFF /* DMA Coal Tx Threshold */
#define E1000_DMCTLX_TTLX_MASK 0x00000FFF /* Time to LX request */
-#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF /* Receive Traffic Rate
- * Threshold */
-#define E1000_DMCRTRH_LRPRCW 0x80000000 /* Rcv packet rate in
- * current window */
+#define E1000_DMCRTRH_UTRESH_MASK 0x0007FFFF /* Rx Traffic Rate Thresh */
+#define E1000_DMCRTRH_LRPRCW 0x80000000 /* Rx pkt rate curr window */
-#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF /* DMA Coal Rcv Traffic
- * Current Cnt */
+#define E1000_DMCCNT_CCOUNT_MASK 0x01FFFFFF /* DMA Coal Rx Current Cnt */
-#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0 /* Flow ctrl Rcv Threshold
- * High val */
+#define E1000_FCRTC_RTH_COAL_MASK 0x0003FFF0 /* FC Rx Thresh High val */
#define E1000_FCRTC_RTH_COAL_SHIFT 4
#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision */
/* Timestamp in Rx buffer */
#define E1000_RXPBS_CFG_TS_EN 0x80000000
+#define I210_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */
+#define I210_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */
+
/* SerDes Control */
#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
@@ -406,12 +398,12 @@
* o LSC = Link Status Change
*/
#define IMS_ENABLE_MASK ( \
- E1000_IMS_RXT0 | \
- E1000_IMS_TXDW | \
- E1000_IMS_RXDMT0 | \
- E1000_IMS_RXSEQ | \
- E1000_IMS_LSC | \
- E1000_IMS_DOUTSYNC)
+ E1000_IMS_RXT0 | \
+ E1000_IMS_TXDW | \
+ E1000_IMS_RXDMT0 | \
+ E1000_IMS_RXSEQ | \
+ E1000_IMS_LSC | \
+ E1000_IMS_DOUTSYNC)
/* Interrupt Mask Set */
#define E1000_IMS_TXDW E1000_ICR_TXDW /* Transmit desc written back */
@@ -467,7 +459,6 @@
#define E1000_RAH_POOL_1 0x00040000
/* Error Codes */
-#define E1000_SUCCESS 0
#define E1000_ERR_NVM 1
#define E1000_ERR_PHY 2
#define E1000_ERR_CONFIG 3
@@ -1011,8 +1002,7 @@
#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK 0x1F
/* DMA Coalescing register fields */
-#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power decision based
- on DMA coal */
+#define E1000_PCIEMISC_LX_DECISION 0x00000080 /* Lx power on DMA coal */
/* Tx Rate-Scheduler Config fields */
#define E1000_RTTBCNRC_RS_ENA 0x80000000
diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
index 10741d1..89925e4 100644
--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
@@ -1,28 +1,24 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_HW_H_
#define _E1000_HW_H_
@@ -320,15 +316,15 @@ struct e1000_host_mng_command_info {
#include "e1000_mbx.h"
struct e1000_mac_operations {
- s32 (*check_for_link)(struct e1000_hw *);
- s32 (*reset_hw)(struct e1000_hw *);
- s32 (*init_hw)(struct e1000_hw *);
+ s32 (*check_for_link)(struct e1000_hw *);
+ s32 (*reset_hw)(struct e1000_hw *);
+ s32 (*init_hw)(struct e1000_hw *);
bool (*check_mng_mode)(struct e1000_hw *);
- s32 (*setup_physical_interface)(struct e1000_hw *);
+ s32 (*setup_physical_interface)(struct e1000_hw *);
void (*rar_set)(struct e1000_hw *, u8 *, u32);
- s32 (*read_mac_addr)(struct e1000_hw *);
- s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
- s32 (*acquire_swfw_sync)(struct e1000_hw *, u16);
+ s32 (*read_mac_addr)(struct e1000_hw *);
+ s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
+ s32 (*acquire_swfw_sync)(struct e1000_hw *, u16);
void (*release_swfw_sync)(struct e1000_hw *, u16);
#ifdef CONFIG_IGB_HWMON
s32 (*get_thermal_sensor_data)(struct e1000_hw *);
@@ -338,31 +334,31 @@ struct e1000_mac_operations {
};
struct e1000_phy_operations {
- s32 (*acquire)(struct e1000_hw *);
- s32 (*check_polarity)(struct e1000_hw *);
- s32 (*check_reset_block)(struct e1000_hw *);
- s32 (*force_speed_duplex)(struct e1000_hw *);
- s32 (*get_cfg_done)(struct e1000_hw *hw);
- s32 (*get_cable_length)(struct e1000_hw *);
- s32 (*get_phy_info)(struct e1000_hw *);
- s32 (*read_reg)(struct e1000_hw *, u32, u16 *);
+ s32 (*acquire)(struct e1000_hw *);
+ s32 (*check_polarity)(struct e1000_hw *);
+ s32 (*check_reset_block)(struct e1000_hw *);
+ s32 (*force_speed_duplex)(struct e1000_hw *);
+ s32 (*get_cfg_done)(struct e1000_hw *hw);
+ s32 (*get_cable_length)(struct e1000_hw *);
+ s32 (*get_phy_info)(struct e1000_hw *);
+ s32 (*read_reg)(struct e1000_hw *, u32, u16 *);
void (*release)(struct e1000_hw *);
- s32 (*reset)(struct e1000_hw *);
- s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
- s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
- s32 (*write_reg)(struct e1000_hw *, u32, u16);
+ s32 (*reset)(struct e1000_hw *);
+ s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
+ s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
+ s32 (*write_reg)(struct e1000_hw *, u32, u16);
s32 (*read_i2c_byte)(struct e1000_hw *, u8, u8, u8 *);
s32 (*write_i2c_byte)(struct e1000_hw *, u8, u8, u8);
};
struct e1000_nvm_operations {
- s32 (*acquire)(struct e1000_hw *);
- s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
+ s32 (*acquire)(struct e1000_hw *);
+ s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
void (*release)(struct e1000_hw *);
- s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
- s32 (*update)(struct e1000_hw *);
- s32 (*validate)(struct e1000_hw *);
- s32 (*valid_led_default)(struct e1000_hw *, u16 *);
+ s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
+ s32 (*update)(struct e1000_hw *);
+ s32 (*validate)(struct e1000_hw *);
+ s32 (*valid_led_default)(struct e1000_hw *, u16 *);
};
#define E1000_MAX_SENSORS 3
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index f67f8a1..337161f 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
/* e1000_i210
* e1000_i211
@@ -100,7 +97,7 @@ static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw)
return -E1000_ERR_NVM;
}
- return E1000_SUCCESS;
+ return 0;
}
/**
@@ -142,7 +139,7 @@ s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
u32 swfw_sync;
u32 swmask = mask;
u32 fwmask = mask << 16;
- s32 ret_val = E1000_SUCCESS;
+ s32 ret_val = 0;
s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
while (i < timeout) {
@@ -187,7 +184,7 @@ void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
{
u32 swfw_sync;
- while (igb_get_hw_semaphore_i210(hw) != E1000_SUCCESS)
+ while (igb_get_hw_semaphore_i210(hw))
; /* Empty */
swfw_sync = rd32(E1000_SW_FW_SYNC);
@@ -210,7 +207,7 @@ void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
static s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
u16 *data)
{
- s32 status = E1000_SUCCESS;
+ s32 status = 0;
u16 i, count;
/* We cannot hold synchronization semaphores for too long,
@@ -220,7 +217,7 @@ static s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
E1000_EERD_EEWR_MAX_COUNT : (words - i);
- if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
+ if (!(hw->nvm.ops.acquire(hw))) {
status = igb_read_nvm_eerd(hw, offset, count,
data + i);
hw->nvm.ops.release(hw);
@@ -228,7 +225,7 @@ static s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
status = E1000_ERR_SWFW_SYNC;
}
- if (status != E1000_SUCCESS)
+ if (status)
break;
}
@@ -253,7 +250,7 @@ static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
struct e1000_nvm_info *nvm = &hw->nvm;
u32 i, k, eewr = 0;
u32 attempts = 100000;
- s32 ret_val = E1000_SUCCESS;
+ s32 ret_val = 0;
/* A check for invalid values: offset too large, too many words,
* too many words for the offset, and not enough words.
@@ -275,13 +272,13 @@ static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
for (k = 0; k < attempts; k++) {
if (E1000_NVM_RW_REG_DONE &
rd32(E1000_SRWR)) {
- ret_val = E1000_SUCCESS;
+ ret_val = 0;
break;
}
udelay(5);
}
- if (ret_val != E1000_SUCCESS) {
+ if (ret_val) {
hw_dbg("Shadow RAM write EEWR timed out\n");
break;
}
@@ -310,7 +307,7 @@ out:
static s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
u16 *data)
{
- s32 status = E1000_SUCCESS;
+ s32 status = 0;
u16 i, count;
/* We cannot hold synchronization semaphores for too long,
@@ -320,7 +317,7 @@ static s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
E1000_EERD_EEWR_MAX_COUNT : (words - i);
- if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
+ if (!(hw->nvm.ops.acquire(hw))) {
status = igb_write_nvm_srwr(hw, offset, count,
data + i);
hw->nvm.ops.release(hw);
@@ -328,7 +325,7 @@ static s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
status = E1000_ERR_SWFW_SYNC;
}
- if (status != E1000_SUCCESS)
+ if (status)
break;
}
@@ -367,12 +364,12 @@ static s32 igb_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data)
*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
hw_dbg("Read INVM Word 0x%02x = %x\n",
address, *data);
- status = E1000_SUCCESS;
+ status = 0;
break;
}
}
}
- if (status != E1000_SUCCESS)
+ if (status)
hw_dbg("Requested word 0x%02x not found in OTP\n", address);
return status;
}
@@ -388,7 +385,7 @@ static s32 igb_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data)
static s32 igb_read_invm_i210(struct e1000_hw *hw, u16 offset,
u16 words __always_unused, u16 *data)
{
- s32 ret_val = E1000_SUCCESS;
+ s32 ret_val = 0;
/* Only the MAC addr is required to be present in the iNVM */
switch (offset) {
@@ -398,43 +395,44 @@ static s32 igb_read_invm_i210(struct e1000_hw *hw, u16 offset,
&data[1]);
ret_val |= igb_read_invm_word_i210(hw, (u8)offset+2,
&data[2]);
- if (ret_val != E1000_SUCCESS)
+ if (ret_val)
hw_dbg("MAC Addr not found in iNVM\n");
break;
case NVM_INIT_CTRL_2:
ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
- if (ret_val != E1000_SUCCESS) {
+ if (ret_val) {
*data = NVM_INIT_CTRL_2_DEFAULT_I211;
- ret_val = E1000_SUCCESS;
+ ret_val = 0;
}
break;
case NVM_INIT_CTRL_4:
ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
- if (ret_val != E1000_SUCCESS) {
+ if (ret_val) {
*data = NVM_INIT_CTRL_4_DEFAULT_I211;
- ret_val = E1000_SUCCESS;
+ ret_val = 0;
}
break;
case NVM_LED_1_CFG:
ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
- if (ret_val != E1000_SUCCESS) {
+ if (ret_val) {
*data = NVM_LED_1_CFG_DEFAULT_I211;
- ret_val = E1000_SUCCESS;
+ ret_val = 0;
}
break;
case NVM_LED_0_2_CFG:
ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
- if (ret_val != E1000_SUCCESS) {
+ if (ret_val) {
*data = NVM_LED_0_2_CFG_DEFAULT_I211;
- ret_val = E1000_SUCCESS;
+ ret_val = 0;
}
break;
case NVM_ID_LED_SETTINGS:
ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
- if (ret_val != E1000_SUCCESS) {
+ if (ret_val) {
*data = ID_LED_RESERVED_FFFF;
- ret_val = E1000_SUCCESS;
+ ret_val = 0;
}
+ break;
case NVM_SUB_DEV_ID:
*data = hw->subsystem_device_id;
break;
@@ -488,14 +486,14 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
/* Check if we have first version location used */
if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) {
version = 0;
- status = E1000_SUCCESS;
+ status = 0;
break;
}
/* Check if we have second version location used */
else if ((i == 1) &&
((*record & E1000_INVM_VER_FIELD_TWO) == 0)) {
version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
- status = E1000_SUCCESS;
+ status = 0;
break;
}
/* Check if we have odd version location
@@ -506,7 +504,7 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
(i != 1))) {
version = (*next_record & E1000_INVM_VER_FIELD_TWO)
>> 13;
- status = E1000_SUCCESS;
+ status = 0;
break;
}
/* Check if we have even version location
@@ -515,12 +513,12 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) &&
((*record & 0x3) == 0)) {
version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
- status = E1000_SUCCESS;
+ status = 0;
break;
}
}
- if (status == E1000_SUCCESS) {
+ if (!status) {
invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK)
>> E1000_INVM_MAJOR_SHIFT;
invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK;
@@ -533,7 +531,7 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
/* Check if we have image type in first location used */
if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) {
invm_ver->invm_img_type = 0;
- status = E1000_SUCCESS;
+ status = 0;
break;
}
/* Check if we have image type in first location used */
@@ -542,7 +540,7 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
((((*record & 0x3) != 0) && (i != 1)))) {
invm_ver->invm_img_type =
(*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23;
- status = E1000_SUCCESS;
+ status = 0;
break;
}
}
@@ -558,10 +556,10 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
**/
static s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw)
{
- s32 status = E1000_SUCCESS;
+ s32 status = 0;
s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *);
- if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
+ if (!(hw->nvm.ops.acquire(hw))) {
/* Replace the read function with semaphore grabbing with
* the one that skips this for a while.
@@ -593,7 +591,7 @@ static s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw)
**/
static s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)
{
- s32 ret_val = E1000_SUCCESS;
+ s32 ret_val = 0;
u16 checksum = 0;
u16 i, nvm_data;
@@ -602,12 +600,12 @@ static s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)
* EEPROM read fails
*/
ret_val = igb_read_nvm_eerd(hw, 0, 1, &nvm_data);
- if (ret_val != E1000_SUCCESS) {
+ if (ret_val) {
hw_dbg("EEPROM read failed\n");
goto out;
}
- if (hw->nvm.ops.acquire(hw) == E1000_SUCCESS) {
+ if (!(hw->nvm.ops.acquire(hw))) {
/* Do not use hw->nvm.ops.write, hw->nvm.ops.read
* because we do not want to take the synchronization
* semaphores twice here.
@@ -625,7 +623,7 @@ static s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)
checksum = (u16) NVM_SUM - checksum;
ret_val = igb_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
&checksum);
- if (ret_val != E1000_SUCCESS) {
+ if (ret_val) {
hw->nvm.ops.release(hw);
hw_dbg("NVM Write Error while updating checksum.\n");
goto out;
@@ -654,7 +652,7 @@ static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw)
for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) {
reg = rd32(E1000_EECD);
if (reg & E1000_EECD_FLUDONE_I210) {
- ret_val = E1000_SUCCESS;
+ ret_val = 0;
break;
}
udelay(5);
@@ -687,7 +685,7 @@ bool igb_get_flash_presence_i210(struct e1000_hw *hw)
**/
static s32 igb_update_flash_i210(struct e1000_hw *hw)
{
- s32 ret_val = E1000_SUCCESS;
+ s32 ret_val = 0;
u32 flup;
ret_val = igb_pool_flash_update_done_i210(hw);
@@ -700,7 +698,7 @@ static s32 igb_update_flash_i210(struct e1000_hw *hw)
wr32(E1000_EECD, flup);
ret_val = igb_pool_flash_update_done_i210(hw);
- if (ret_val == E1000_SUCCESS)
+ if (ret_val)
hw_dbg("Flash update complete\n");
else
hw_dbg("Flash update time out\n");
@@ -753,7 +751,7 @@ out:
static s32 __igb_access_xmdio_reg(struct e1000_hw *hw, u16 address,
u8 dev_addr, u16 *data, bool read)
{
- s32 ret_val = E1000_SUCCESS;
+ s32 ret_val = 0;
ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr);
if (ret_val)
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
index 907fe99..9f34976 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.h
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_I210_H_
#define _E1000_I210_H_
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 1e0c404..2a88595 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#include <linux/if_ether.h>
#include <linux/delay.h>
@@ -442,7 +439,7 @@ static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
* The caller must have a packed mc_addr_list of multicast addresses.
**/
void igb_update_mc_addr_list(struct e1000_hw *hw,
- u8 *mc_addr_list, u32 mc_addr_count)
+ u8 *mc_addr_list, u32 mc_addr_count)
{
u32 hash_value, hash_bit, hash_reg;
int i;
@@ -866,8 +863,7 @@ s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
goto out;
if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
- hw_dbg("Copper PHY and Auto Neg "
- "has not completed.\n");
+ hw_dbg("Copper PHY and Auto Neg has not completed.\n");
goto out;
}
@@ -1265,7 +1261,7 @@ s32 igb_get_auto_rd_done(struct e1000_hw *hw)
while (i < AUTO_READ_DONE_TIMEOUT) {
if (rd32(E1000_EECD) & E1000_EECD_AUTO_RD)
break;
- msleep(1);
+ usleep_range(1000, 2000);
i++;
}
@@ -1298,7 +1294,7 @@ static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data)
}
if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
- switch(hw->phy.media_type) {
+ switch (hw->phy.media_type) {
case e1000_media_type_internal_serdes:
*data = ID_LED_DEFAULT_82575_SERDES;
break;
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h
index 99299ba..ea24961b 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.h
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_MAC_H_
#define _E1000_MAC_H_
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c
index d5b1217..162cc49 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#include "e1000_mbx.h"
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.h b/drivers/net/ethernet/intel/igb/e1000_mbx.h
index f52f551..d20af6b 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.h
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_MBX_H_
#define _E1000_MBX_H_
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c
index 9abf829..e8280d0 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.c
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c
@@ -1,28 +1,24 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#include <linux/if_ether.h>
#include <linux/delay.h>
@@ -480,6 +476,7 @@ s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
/* Loop to allow for up to whole page write of eeprom */
while (widx < words) {
u16 word_out = data[widx];
+
word_out = (word_out >> 8) | (word_out << 8);
igb_shift_out_eec_bits(hw, word_out, 16);
widx++;
@@ -801,5 +798,4 @@ etrack_id:
fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT)
| eeprom_verl;
}
- return;
}
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.h b/drivers/net/ethernet/intel/igb/e1000_nvm.h
index 5b10117..febc9cd 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.h
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.h
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_NVM_H_
#define _E1000_NVM_H_
@@ -32,7 +29,7 @@ void igb_release_nvm(struct e1000_hw *hw);
s32 igb_read_mac_addr(struct e1000_hw *hw);
s32 igb_read_part_num(struct e1000_hw *hw, u32 *part_num);
s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num,
- u32 part_num_size);
+ u32 part_num_size);
s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index 4009bba..c1bb64d 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#include <linux/if_ether.h>
#include <linux/delay.h>
@@ -924,8 +921,7 @@ static s32 igb_copper_link_autoneg(struct e1000_hw *hw)
if (phy->autoneg_wait_to_complete) {
ret_val = igb_wait_autoneg(hw);
if (ret_val) {
- hw_dbg("Error while waiting for "
- "autoneg to complete\n");
+ hw_dbg("Error while waiting for autoneg to complete\n");
goto out;
}
}
@@ -2208,16 +2204,10 @@ s32 igb_phy_init_script_igp3(struct e1000_hw *hw)
void igb_power_up_phy_copper(struct e1000_hw *hw)
{
u16 mii_reg = 0;
- u16 power_reg = 0;
/* The PHY will retain its settings across a power down/up cycle */
hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
mii_reg &= ~MII_CR_POWER_DOWN;
- if (hw->phy.type == e1000_phy_i210) {
- hw->phy.ops.read_reg(hw, GS40G_COPPER_SPEC, &power_reg);
- power_reg &= ~GS40G_CS_POWER_DOWN;
- hw->phy.ops.write_reg(hw, GS40G_COPPER_SPEC, power_reg);
- }
hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
}
@@ -2231,20 +2221,12 @@ void igb_power_up_phy_copper(struct e1000_hw *hw)
void igb_power_down_phy_copper(struct e1000_hw *hw)
{
u16 mii_reg = 0;
- u16 power_reg = 0;
/* The PHY will retain its settings across a power down/up cycle */
hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
mii_reg |= MII_CR_POWER_DOWN;
-
- /* i210 Phy requires an additional bit for power up/down */
- if (hw->phy.type == e1000_phy_i210) {
- hw->phy.ops.read_reg(hw, GS40G_COPPER_SPEC, &power_reg);
- power_reg |= GS40G_CS_POWER_DOWN;
- hw->phy.ops.write_reg(hw, GS40G_COPPER_SPEC, power_reg);
- }
hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
- msleep(1);
+ usleep_range(1000, 2000);
}
/**
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h
index 4c2c36c..7af4ffa 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.h
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.h
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_PHY_H_
#define _E1000_PHY_H_
@@ -154,7 +151,6 @@ s32 igb_check_polarity_m88(struct e1000_hw *hw);
#define GS40G_MAC_LB 0x4140
#define GS40G_MAC_SPEED_1G 0X0006
#define GS40G_COPPER_SPEC 0x0010
-#define GS40G_CS_POWER_DOWN 0x0002
#define GS40G_LINE_LB 0x4000
/* SFP modules ID memory locations */
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index bdb246e..1cc4b1a7 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#ifndef _E1000_REGS_H_
#define _E1000_REGS_H_
@@ -195,6 +192,10 @@
: (0x0E038 + ((_n) * 0x40)))
#define E1000_TDWBAH(_n) ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) \
: (0x0E03C + ((_n) * 0x40)))
+
+#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
+#define E1000_TXPBS 0x03404 /* Tx Packet Buffer Size - RW */
+
#define E1000_TDFH 0x03410 /* TX Data FIFO Head - RW */
#define E1000_TDFT 0x03418 /* TX Data FIFO Tail - RW */
#define E1000_TDFHS 0x03420 /* TX Data FIFO Head Saved - RW */
@@ -301,9 +302,9 @@
#define E1000_RA2 0x054E0 /* 2nd half of Rx address array - RW Array */
#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4))
#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
- (0x054E0 + ((_i - 16) * 8)))
+ (0x054E0 + ((_i - 16) * 8)))
#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
- (0x054E4 + ((_i - 16) * 8)))
+ (0x054E4 + ((_i - 16) * 8)))
#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8))
#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4))
#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4))
@@ -358,8 +359,7 @@
#define E1000_VMBMEM(_n) (0x00800 + (64 * (_n)))
#define E1000_VMOLR(_n) (0x05AD0 + (4 * (_n)))
#define E1000_DVMOLR(_n) (0x0C038 + (64 * (_n)))
-#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN Virtual Machine
- * Filter - RW */
+#define E1000_VLVF(_n) (0x05D00 + (4 * (_n))) /* VLAN VM Filter */
#define E1000_VMVIR(_n) (0x03700 + (4 * (_n)))
struct e1000_hw;
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index 2713006..06102d1 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -1,29 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
-
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
/* Linux PRO/1000 Ethernet Driver main header file */
@@ -198,6 +194,7 @@ struct igb_tx_buffer {
unsigned int bytecount;
u16 gso_segs;
__be16 protocol;
+
DEFINE_DMA_UNMAP_ADDR(dma);
DEFINE_DMA_UNMAP_LEN(len);
u32 tx_flags;
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index e5570ac..c737d1f 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
/* ethtool support for igb */
@@ -144,6 +141,7 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags;
u32 status;
+ u32 speed;
status = rd32(E1000_STATUS);
if (hw->phy.media_type == e1000_media_type_copper) {
@@ -218,13 +216,13 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
if (status & E1000_STATUS_LU) {
if ((status & E1000_STATUS_2P5_SKU) &&
!(status & E1000_STATUS_2P5_SKU_OVER)) {
- ecmd->speed = SPEED_2500;
+ speed = SPEED_2500;
} else if (status & E1000_STATUS_SPEED_1000) {
- ecmd->speed = SPEED_1000;
+ speed = SPEED_1000;
} else if (status & E1000_STATUS_SPEED_100) {
- ecmd->speed = SPEED_100;
+ speed = SPEED_100;
} else {
- ecmd->speed = SPEED_10;
+ speed = SPEED_10;
}
if ((status & E1000_STATUS_FD) ||
hw->phy.media_type != e1000_media_type_copper)
@@ -232,9 +230,10 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
else
ecmd->duplex = DUPLEX_HALF;
} else {
- ecmd->speed = -1;
- ecmd->duplex = -1;
+ speed = SPEED_UNKNOWN;
+ ecmd->duplex = DUPLEX_UNKNOWN;
}
+ ethtool_cmd_speed_set(ecmd, speed);
if ((hw->phy.media_type == e1000_media_type_fiber) ||
hw->mac.autoneg)
ecmd->autoneg = AUTONEG_ENABLE;
@@ -286,7 +285,7 @@ static int igb_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
}
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
- msleep(1);
+ usleep_range(1000, 2000);
if (ecmd->autoneg == AUTONEG_ENABLE) {
hw->mac.autoneg = 1;
@@ -399,7 +398,7 @@ static int igb_set_pauseparam(struct net_device *netdev,
adapter->fc_autoneg = pause->autoneg;
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
- msleep(1);
+ usleep_range(1000, 2000);
if (adapter->fc_autoneg == AUTONEG_ENABLE) {
hw->fc.requested_mode = e1000_fc_default;
@@ -886,7 +885,7 @@ static int igb_set_ringparam(struct net_device *netdev,
}
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
- msleep(1);
+ usleep_range(1000, 2000);
if (!netif_running(adapter->netdev)) {
for (i = 0; i < adapter->num_tx_queues; i++)
@@ -1060,8 +1059,8 @@ static struct igb_reg_test reg_test_i350[] = {
{ E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
{ E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
{ E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
{ E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
{ E1000_RA, 0, 16, TABLE64_TEST_LO,
0xFFFFFFFF, 0xFFFFFFFF },
@@ -1103,8 +1102,8 @@ static struct igb_reg_test reg_test_82580[] = {
{ E1000_TDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
{ E1000_TDT(4), 0x40, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
{ E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
{ E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
{ E1000_RA, 0, 16, TABLE64_TEST_LO,
0xFFFFFFFF, 0xFFFFFFFF },
@@ -1132,8 +1131,10 @@ static struct igb_reg_test reg_test_82576[] = {
{ E1000_RDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ E1000_RDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
/* Enable all RX queues before testing. */
- { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
- { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
+ { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0,
+ E1000_RXDCTL_QUEUE_ENABLE },
+ { E1000_RXDCTL(4), 0x40, 12, WRITE_NO_TEST, 0,
+ E1000_RXDCTL_QUEUE_ENABLE },
/* RDH is read-only for 82576, only test RDT. */
{ E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
{ E1000_RDT(4), 0x40, 12, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
@@ -1149,14 +1150,14 @@ static struct igb_reg_test reg_test_82576[] = {
{ E1000_TDBAH(4), 0x40, 12, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ E1000_TDLEN(4), 0x40, 12, PATTERN_TEST, 0x000FFFF0, 0x000FFFFF },
{ E1000_RCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
- { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0x003FFFFB },
+ { E1000_RCTL, 0x100, 1, SET_READ_TEST, 0x04CFB0FE, 0xFFFFFFFF },
{ E1000_TCTL, 0x100, 1, SET_READ_TEST, 0xFFFFFFFF, 0x00000000 },
{ E1000_RA, 0, 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
{ E1000_RA, 0, 16, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF },
{ E1000_RA2, 0, 8, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF },
{ E1000_RA2, 0, 8, TABLE64_TEST_HI, 0x83FFFFFF, 0xFFFFFFFF },
- { E1000_MTA, 0, 128,TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
+ { E1000_MTA, 0, 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ 0, 0, 0, 0 }
};
@@ -1170,7 +1171,8 @@ static struct igb_reg_test reg_test_82575[] = {
{ E1000_RDBAH(0), 0x100, 4, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
{ E1000_RDLEN(0), 0x100, 4, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
/* Enable all four RX queues before testing. */
- { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, E1000_RXDCTL_QUEUE_ENABLE },
+ { E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0,
+ E1000_RXDCTL_QUEUE_ENABLE },
/* RDH is read-only for 82575, only test RDT. */
{ E1000_RDT(0), 0x100, 4, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
{ E1000_RXDCTL(0), 0x100, 4, WRITE_NO_TEST, 0, 0 },
@@ -1196,8 +1198,8 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
{
struct e1000_hw *hw = &adapter->hw;
u32 pat, val;
- static const u32 _test[] =
- {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
+ static const u32 _test[] = {
+ 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
for (pat = 0; pat < ARRAY_SIZE(_test); pat++) {
wr32(reg, (_test[pat] & write));
val = rd32(reg) & mask;
@@ -1206,11 +1208,11 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
"pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
reg, val, (_test[pat] & write & mask));
*data = reg;
- return 1;
+ return true;
}
}
- return 0;
+ return false;
}
static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
@@ -1218,17 +1220,18 @@ static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
{
struct e1000_hw *hw = &adapter->hw;
u32 val;
+
wr32(reg, write & mask);
val = rd32(reg);
if ((write & mask) != (val & mask)) {
dev_err(&adapter->pdev->dev,
- "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", reg,
- (val & mask), (write & mask));
+ "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
+ reg, (val & mask), (write & mask));
*data = reg;
- return 1;
+ return true;
}
- return 0;
+ return false;
}
#define REG_PATTERN_TEST(reg, mask, write) \
@@ -1387,14 +1390,14 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
/* Hook up test interrupt handler just for this test */
if (adapter->flags & IGB_FLAG_HAS_MSIX) {
if (request_irq(adapter->msix_entries[0].vector,
- igb_test_intr, 0, netdev->name, adapter)) {
+ igb_test_intr, 0, netdev->name, adapter)) {
*data = 1;
return -1;
}
} else if (adapter->flags & IGB_FLAG_HAS_MSI) {
shared_int = false;
if (request_irq(irq,
- igb_test_intr, 0, netdev->name, adapter)) {
+ igb_test_intr, 0, netdev->name, adapter)) {
*data = 1;
return -1;
}
@@ -1412,7 +1415,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
/* Disable all the interrupts */
wr32(E1000_IMC, ~0);
wrfl();
- msleep(10);
+ usleep_range(10000, 11000);
/* Define all writable bits for ICS */
switch (hw->mac.type) {
@@ -1459,7 +1462,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
wr32(E1000_IMC, mask);
wr32(E1000_ICS, mask);
wrfl();
- msleep(10);
+ usleep_range(10000, 11000);
if (adapter->test_icr & mask) {
*data = 3;
@@ -1481,7 +1484,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
wr32(E1000_IMS, mask);
wr32(E1000_ICS, mask);
wrfl();
- msleep(10);
+ usleep_range(10000, 11000);
if (!(adapter->test_icr & mask)) {
*data = 4;
@@ -1503,7 +1506,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
wr32(E1000_IMC, ~mask);
wr32(E1000_ICS, ~mask);
wrfl();
- msleep(10);
+ usleep_range(10000, 11000);
if (adapter->test_icr & mask) {
*data = 5;
@@ -1515,7 +1518,7 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
/* Disable all the interrupts */
wr32(E1000_IMC, ~0);
wrfl();
- msleep(10);
+ usleep_range(10000, 11000);
/* Unhook test interrupt handler */
if (adapter->flags & IGB_FLAG_HAS_MSIX)
@@ -1664,8 +1667,8 @@ static int igb_setup_loopback_test(struct igb_adapter *adapter)
(hw->device_id == E1000_DEV_ID_DH89XXCC_SERDES) ||
(hw->device_id == E1000_DEV_ID_DH89XXCC_BACKPLANE) ||
(hw->device_id == E1000_DEV_ID_DH89XXCC_SFP) ||
- (hw->device_id == E1000_DEV_ID_I354_SGMII)) {
-
+ (hw->device_id == E1000_DEV_ID_I354_SGMII) ||
+ (hw->device_id == E1000_DEV_ID_I354_BACKPLANE_2_5GBPS)) {
/* Enable DH89xxCC MPHY for near end loopback */
reg = rd32(E1000_MPHY_ADDR_CTL);
reg = (reg & E1000_MPHY_ADDR_CTL_OFFSET_MASK) |
@@ -1949,6 +1952,7 @@ static int igb_link_test(struct igb_adapter *adapter, u64 *data)
*data = 0;
if (hw->phy.media_type == e1000_media_type_internal_serdes) {
int i = 0;
+
hw->mac.serdes_has_link = false;
/* On some blade server designs, link establishment
@@ -2413,9 +2417,11 @@ static int igb_get_rss_hash_opts(struct igb_adapter *adapter,
switch (cmd->flow_type) {
case TCP_V4_FLOW:
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* Fall through */
case UDP_V4_FLOW:
if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* Fall through */
case SCTP_V4_FLOW:
case AH_ESP_V4_FLOW:
case AH_V4_FLOW:
@@ -2425,9 +2431,11 @@ static int igb_get_rss_hash_opts(struct igb_adapter *adapter,
break;
case TCP_V6_FLOW:
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* Fall through */
case UDP_V6_FLOW:
if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* Fall through */
case SCTP_V6_FLOW:
case AH_ESP_V6_FLOW:
case AH_V6_FLOW:
@@ -2730,7 +2738,7 @@ static int igb_get_module_info(struct net_device *netdev,
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- u32 status = E1000_SUCCESS;
+ u32 status = 0;
u16 sff8472_rev, addr_mode;
bool page_swap = false;
@@ -2740,12 +2748,12 @@ static int igb_get_module_info(struct net_device *netdev,
/* Check whether we support SFF-8472 or not */
status = igb_read_phy_reg_i2c(hw, IGB_SFF_8472_COMP, &sff8472_rev);
- if (status != E1000_SUCCESS)
+ if (status)
return -EIO;
/* addressing mode is not supported */
status = igb_read_phy_reg_i2c(hw, IGB_SFF_8472_SWAP, &addr_mode);
- if (status != E1000_SUCCESS)
+ if (status)
return -EIO;
/* addressing mode is not supported */
@@ -2772,7 +2780,7 @@ static int igb_get_module_eeprom(struct net_device *netdev,
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- u32 status = E1000_SUCCESS;
+ u32 status = 0;
u16 *dataword;
u16 first_word, last_word;
int i = 0;
@@ -2791,7 +2799,7 @@ static int igb_get_module_eeprom(struct net_device *netdev,
/* Read EEPROM block, SFF-8079/SFF-8472, word at a time */
for (i = 0; i < last_word - first_word + 1; i++) {
status = igb_read_phy_reg_i2c(hw, first_word + i, &dataword[i]);
- if (status != E1000_SUCCESS) {
+ if (status) {
/* Error occurred while reading module */
kfree(dataword);
return -EIO;
@@ -2824,7 +2832,7 @@ static u32 igb_get_rxfh_indir_size(struct net_device *netdev)
return IGB_RETA_SIZE;
}
-static int igb_get_rxfh_indir(struct net_device *netdev, u32 *indir)
+static int igb_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key)
{
struct igb_adapter *adapter = netdev_priv(netdev);
int i;
@@ -2870,7 +2878,8 @@ void igb_write_rss_indir_tbl(struct igb_adapter *adapter)
}
}
-static int igb_set_rxfh_indir(struct net_device *netdev, const u32 *indir)
+static int igb_set_rxfh(struct net_device *netdev, const u32 *indir,
+ const u8 *key)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -3019,8 +3028,8 @@ static const struct ethtool_ops igb_ethtool_ops = {
.get_module_info = igb_get_module_info,
.get_module_eeprom = igb_get_module_eeprom,
.get_rxfh_indir_size = igb_get_rxfh_indir_size,
- .get_rxfh_indir = igb_get_rxfh_indir,
- .set_rxfh_indir = igb_set_rxfh_indir,
+ .get_rxfh = igb_get_rxfh,
+ .set_rxfh = igb_set_rxfh,
.get_channels = igb_get_channels,
.set_channels = igb_set_channels,
.begin = igb_ethtool_begin,
@@ -3029,5 +3038,5 @@ static const struct ethtool_ops igb_ethtool_ops = {
void igb_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &igb_ethtool_ops);
+ netdev->ethtool_ops = &igb_ethtool_ops;
}
diff --git a/drivers/net/ethernet/intel/igb/igb_hwmon.c b/drivers/net/ethernet/intel/igb/igb_hwmon.c
index 8333f67..44b6a68 100644
--- a/drivers/net/ethernet/intel/igb/igb_hwmon.c
+++ b/drivers/net/ethernet/intel/igb/igb_hwmon.c
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#include "igb.h"
#include "e1000_82575.h"
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 16430a8..f145adb 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1,28 +1,25 @@
-/*******************************************************************************
-
- Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2014 Intel Corporation.
-
- This program is free software; you can redistribute it and/or modify it
- under the terms and conditions of the GNU General Public License,
- version 2, as published by the Free Software Foundation.
-
- This program is distributed in the hope it will be useful, but WITHOUT
- ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- more details.
-
- You should have received a copy of the GNU General Public License along with
- this program; if not, see <http://www.gnu.org/licenses/>.
-
- The full GNU General Public License is included in this distribution in
- the file called "COPYING".
-
- Contact Information:
- e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
- Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
-
-*******************************************************************************/
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -75,7 +72,7 @@ static const struct e1000_info *igb_info_tbl[] = {
[board_82575] = &e1000_82575_info,
};
-static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
+static const struct pci_device_id igb_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) },
@@ -117,7 +114,6 @@ static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
-void igb_reset(struct igb_adapter *);
static int igb_setup_all_tx_resources(struct igb_adapter *);
static int igb_setup_all_rx_resources(struct igb_adapter *);
static void igb_free_all_tx_resources(struct igb_adapter *);
@@ -141,7 +137,7 @@ static void igb_watchdog(unsigned long);
static void igb_watchdog_task(struct work_struct *);
static netdev_tx_t igb_xmit_frame(struct sk_buff *skb, struct net_device *);
static struct rtnl_link_stats64 *igb_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *stats);
+ struct rtnl_link_stats64 *stats);
static int igb_change_mtu(struct net_device *, int);
static int igb_set_mac(struct net_device *, void *);
static void igb_set_uta(struct igb_adapter *adapter);
@@ -159,7 +155,8 @@ static bool igb_clean_rx_irq(struct igb_q_vector *, int);
static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
static void igb_tx_timeout(struct net_device *);
static void igb_reset_task(struct work_struct *);
-static void igb_vlan_mode(struct net_device *netdev, netdev_features_t features);
+static void igb_vlan_mode(struct net_device *netdev,
+ netdev_features_t features);
static int igb_vlan_rx_add_vid(struct net_device *, __be16, u16);
static int igb_vlan_rx_kill_vid(struct net_device *, __be16, u16);
static void igb_restore_vlan(struct igb_adapter *);
@@ -172,7 +169,7 @@ static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac);
static int igb_ndo_set_vf_vlan(struct net_device *netdev,
int vf, u16 vlan, u8 qos);
-static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
+static int igb_ndo_set_vf_bw(struct net_device *, int, int, int);
static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
bool setting);
static int igb_ndo_get_vf_config(struct net_device *netdev, int vf,
@@ -215,10 +212,9 @@ static struct notifier_block dca_notifier = {
static void igb_netpoll(struct net_device *);
#endif
#ifdef CONFIG_PCI_IOV
-static unsigned int max_vfs = 0;
+static unsigned int max_vfs;
module_param(max_vfs, uint, 0);
-MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate "
- "per physical function");
+MODULE_PARM_DESC(max_vfs, "Maximum number of virtual functions to allocate per physical function");
#endif /* CONFIG_PCI_IOV */
static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
@@ -384,8 +380,7 @@ static void igb_dump(struct igb_adapter *adapter)
/* Print netdevice Info */
if (netdev) {
dev_info(&adapter->pdev->dev, "Net device Info\n");
- pr_info("Device Name state trans_start "
- "last_rx\n");
+ pr_info("Device Name state trans_start last_rx\n");
pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
netdev->state, netdev->trans_start, netdev->last_rx);
}
@@ -438,9 +433,7 @@ static void igb_dump(struct igb_adapter *adapter)
pr_info("------------------------------------\n");
pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
pr_info("------------------------------------\n");
- pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] "
- "[bi->dma ] leng ntw timestamp "
- "bi->skb\n");
+ pr_info("T [desc] [address 63:0 ] [PlPOCIStDDM Ln] [bi->dma ] leng ntw timestamp bi->skb\n");
for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
const char *next_desc;
@@ -458,9 +451,8 @@ static void igb_dump(struct igb_adapter *adapter)
else
next_desc = "";
- pr_info("T [0x%03X] %016llX %016llX %016llX"
- " %04X %p %016llX %p%s\n", i,
- le64_to_cpu(u0->a),
+ pr_info("T [0x%03X] %016llX %016llX %016llX %04X %p %016llX %p%s\n",
+ i, le64_to_cpu(u0->a),
le64_to_cpu(u0->b),
(u64)dma_unmap_addr(buffer_info, dma),
dma_unmap_len(buffer_info, len),
@@ -519,10 +511,8 @@ rx_ring_summary:
pr_info("------------------------------------\n");
pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
pr_info("------------------------------------\n");
- pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] "
- "[bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
- pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] -----"
- "----------- [bi->skb] <-- Adv Rx Write-Back format\n");
+ pr_info("R [desc] [ PktBuf A0] [ HeadBuf DD] [bi->dma ] [bi->skb] <-- Adv Rx Read format\n");
+ pr_info("RWB[desc] [PcsmIpSHl PtRs] [vl er S cks ln] ---------------- [bi->skb] <-- Adv Rx Write-Back format\n");
for (i = 0; i < rx_ring->count; i++) {
const char *next_desc;
@@ -584,7 +574,7 @@ static int igb_get_i2c_data(void *data)
struct e1000_hw *hw = &adapter->hw;
s32 i2cctl = rd32(E1000_I2CPARAMS);
- return ((i2cctl & E1000_I2C_DATA_IN) != 0);
+ return !!(i2cctl & E1000_I2C_DATA_IN);
}
/**
@@ -648,7 +638,7 @@ static int igb_get_i2c_clk(void *data)
struct e1000_hw *hw = &adapter->hw;
s32 i2cctl = rd32(E1000_I2CPARAMS);
- return ((i2cctl & E1000_I2C_CLK_IN) != 0);
+ return !!(i2cctl & E1000_I2C_CLK_IN);
}
static const struct i2c_algo_bit_data igb_i2c_algo = {
@@ -681,9 +671,9 @@ struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
static int __init igb_init_module(void)
{
int ret;
+
pr_info("%s - version %s\n",
igb_driver_string, igb_driver_version);
-
pr_info("%s\n", igb_copyright);
#ifdef CONFIG_IGB_DCA
@@ -736,12 +726,14 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
adapter->rx_ring[i]->reg_idx = rbase_offset +
Q_IDX_82576(i);
}
+ /* Fall through */
case e1000_82575:
case e1000_82580:
case e1000_i350:
case e1000_i354:
case e1000_i210:
case e1000_i211:
+ /* Fall through */
default:
for (; i < adapter->num_rx_queues; i++)
adapter->rx_ring[i]->reg_idx = rbase_offset + i;
@@ -1292,8 +1284,7 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
if (adapter->hw.mac.type >= e1000_82576)
set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
- /*
- * On i350, i354, i210, and i211, loopback VLAN packets
+ /* On i350, i354, i210, and i211, loopback VLAN packets
* have the tag byte-swapped.
*/
if (adapter->hw.mac.type >= e1000_i350)
@@ -1345,6 +1336,7 @@ static int igb_alloc_q_vectors(struct igb_adapter *adapter)
for (; v_idx < q_vectors; v_idx++) {
int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
+
err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
tqpv, txr_idx, rqpv, rxr_idx);
@@ -1484,6 +1476,7 @@ static void igb_irq_disable(struct igb_adapter *adapter)
*/
if (adapter->flags & IGB_FLAG_HAS_MSIX) {
u32 regval = rd32(E1000_EIAM);
+
wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
wr32(E1000_EIMC, adapter->eims_enable_mask);
regval = rd32(E1000_EIAC);
@@ -1495,6 +1488,7 @@ static void igb_irq_disable(struct igb_adapter *adapter)
wrfl();
if (adapter->flags & IGB_FLAG_HAS_MSIX) {
int i;
+
for (i = 0; i < adapter->num_q_vectors; i++)
synchronize_irq(adapter->msix_entries[i].vector);
} else {
@@ -1513,6 +1507,7 @@ static void igb_irq_enable(struct igb_adapter *adapter)
if (adapter->flags & IGB_FLAG_HAS_MSIX) {
u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
u32 regval = rd32(E1000_EIAC);
+
wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
regval = rd32(E1000_EIAM);
wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
@@ -1745,6 +1740,7 @@ int igb_up(struct igb_adapter *adapter)
/* notify VFs that reset has been completed */
if (adapter->vfs_allocated_count) {
u32 reg_data = rd32(E1000_CTRL_EXT);
+
reg_data |= E1000_CTRL_EXT_PFRSTD;
wr32(E1000_CTRL_EXT, reg_data);
}
@@ -1787,7 +1783,7 @@ void igb_down(struct igb_adapter *adapter)
wr32(E1000_TCTL, tctl);
/* flush both disables and wait for them to finish */
wrfl();
- msleep(10);
+ usleep_range(10000, 11000);
igb_irq_disable(adapter);
@@ -1827,7 +1823,7 @@ void igb_reinit_locked(struct igb_adapter *adapter)
{
WARN_ON(in_interrupt());
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
- msleep(1);
+ usleep_range(1000, 2000);
igb_down(adapter);
igb_up(adapter);
clear_bit(__IGB_RESETTING, &adapter->state);
@@ -1960,6 +1956,7 @@ void igb_reset(struct igb_adapter *adapter)
/* disable receive for all VFs and wait one second */
if (adapter->vfs_allocated_count) {
int i;
+
for (i = 0 ; i < adapter->vfs_allocated_count; i++)
adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
@@ -2087,7 +2084,7 @@ static const struct net_device_ops igb_netdev_ops = {
.ndo_vlan_rx_kill_vid = igb_vlan_rx_kill_vid,
.ndo_set_vf_mac = igb_ndo_set_vf_mac,
.ndo_set_vf_vlan = igb_ndo_set_vf_vlan,
- .ndo_set_vf_tx_rate = igb_ndo_set_vf_bw,
+ .ndo_set_vf_rate = igb_ndo_set_vf_bw,
.ndo_set_vf_spoofchk = igb_ndo_set_vf_spoofchk,
.ndo_get_vf_config = igb_ndo_get_vf_config,
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -2142,7 +2139,6 @@ void igb_set_fw_version(struct igb_adapter *adapter)
}
break;
}
- return;
}
/**
@@ -2203,11 +2199,11 @@ static void igb_init_mas(struct igb_adapter *adapter)
**/
static s32 igb_init_i2c(struct igb_adapter *adapter)
{
- s32 status = E1000_SUCCESS;
+ s32 status = 0;
/* I2C interface supported on i350 devices */
if (adapter->hw.mac.type != e1000_i350)
- return E1000_SUCCESS;
+ return 0;
/* Initialize the i2c bus which is controlled by the registers.
* This bus will use the i2c_algo_bit structue that implements
@@ -2437,6 +2433,12 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
/* get firmware version for ethtool -i */
igb_set_fw_version(adapter);
+ /* configure RXPBSIZE and TXPBSIZE */
+ if (hw->mac.type == e1000_i210) {
+ wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT);
+ wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT);
+ }
+
setup_timer(&adapter->watchdog_timer, igb_watchdog,
(unsigned long) adapter);
setup_timer(&adapter->phy_info_timer, igb_update_phy_info,
@@ -2529,7 +2531,8 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
/* let the f/w know that the h/w is now under the control of the
- * driver. */
+ * driver.
+ */
igb_get_hw_control(adapter);
strcpy(netdev->name, "eth%d");
@@ -3077,6 +3080,7 @@ static int __igb_open(struct net_device *netdev, bool resuming)
/* notify VFs that reset has been completed */
if (adapter->vfs_allocated_count) {
u32 reg_data = rd32(E1000_CTRL_EXT);
+
reg_data |= E1000_CTRL_EXT_PFRSTD;
wr32(E1000_CTRL_EXT, reg_data);
}
@@ -3248,7 +3252,7 @@ void igb_setup_tctl(struct igb_adapter *adapter)
* Configure a transmit ring after a reset.
**/
void igb_configure_tx_ring(struct igb_adapter *adapter,
- struct igb_ring *ring)
+ struct igb_ring *ring)
{
struct e1000_hw *hw = &adapter->hw;
u32 txdctl = 0;
@@ -3389,7 +3393,8 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
if (adapter->rss_indir_tbl_init != num_rx_queues) {
for (j = 0; j < IGB_RETA_SIZE; j++)
- adapter->rss_indir_tbl[j] = (j * num_rx_queues) / IGB_RETA_SIZE;
+ adapter->rss_indir_tbl[j] =
+ (j * num_rx_queues) / IGB_RETA_SIZE;
adapter->rss_indir_tbl_init = num_rx_queues;
}
igb_write_rss_indir_tbl(adapter);
@@ -3430,6 +3435,7 @@ static void igb_setup_mrqc(struct igb_adapter *adapter)
if (hw->mac.type > e1000_82575) {
/* Set the default pool for the PF's first queue */
u32 vtctl = rd32(E1000_VT_CTL);
+
vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
E1000_VT_CTL_DISABLE_DEF_POOL);
vtctl |= adapter->vfs_allocated_count <<
@@ -3511,7 +3517,7 @@ void igb_setup_rctl(struct igb_adapter *adapter)
}
static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
- int vfn)
+ int vfn)
{
struct e1000_hw *hw = &adapter->hw;
u32 vmolr;
@@ -4058,7 +4064,8 @@ static void igb_check_wvbr(struct igb_adapter *adapter)
switch (hw->mac.type) {
case e1000_82576:
case e1000_i350:
- if (!(wvbr = rd32(E1000_WVBR)))
+ wvbr = rd32(E1000_WVBR);
+ if (!wvbr)
return;
break;
default:
@@ -4077,7 +4084,7 @@ static void igb_spoof_check(struct igb_adapter *adapter)
if (!adapter->wvbr)
return;
- for(j = 0; j < adapter->vfs_allocated_count; j++) {
+ for (j = 0; j < adapter->vfs_allocated_count; j++) {
if (adapter->wvbr & (1 << j) ||
adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) {
dev_warn(&adapter->pdev->dev,
@@ -4209,14 +4216,15 @@ static void igb_watchdog_task(struct work_struct *work)
if (!netif_carrier_ok(netdev)) {
u32 ctrl;
+
hw->mac.ops.get_speed_and_duplex(hw,
&adapter->link_speed,
&adapter->link_duplex);
ctrl = rd32(E1000_CTRL);
/* Links status message must follow this format */
- printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s "
- "Duplex, Flow Control: %s\n",
+ netdev_info(netdev,
+ "igb: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
netdev->name,
adapter->link_speed,
adapter->link_duplex == FULL_DUPLEX ?
@@ -4242,11 +4250,8 @@ static void igb_watchdog_task(struct work_struct *work)
/* check for thermal sensor event */
if (igb_thermal_sensor_event(hw,
- E1000_THSTAT_LINK_THROTTLE)) {
- netdev_info(netdev, "The network adapter link "
- "speed was downshifted because it "
- "overheated\n");
- }
+ E1000_THSTAT_LINK_THROTTLE))
+ netdev_info(netdev, "The network adapter link speed was downshifted because it overheated\n");
/* adjust timeout factor according to speed/duplex */
adapter->tx_timeout_factor = 1;
@@ -4277,12 +4282,11 @@ static void igb_watchdog_task(struct work_struct *work)
/* check for thermal sensor event */
if (igb_thermal_sensor_event(hw,
E1000_THSTAT_PWR_DOWN)) {
- netdev_err(netdev, "The network adapter was "
- "stopped because it overheated\n");
+ netdev_err(netdev, "The network adapter was stopped because it overheated\n");
}
/* Links status message must follow this format */
- printk(KERN_INFO "igb: %s NIC Link is Down\n",
+ netdev_info(netdev, "igb: %s NIC Link is Down\n",
netdev->name);
netif_carrier_off(netdev);
@@ -4344,6 +4348,7 @@ static void igb_watchdog_task(struct work_struct *work)
/* Cause software interrupt to ensure Rx ring is cleaned */
if (adapter->flags & IGB_FLAG_HAS_MSIX) {
u32 eics = 0;
+
for (i = 0; i < adapter->num_q_vectors; i++)
eics |= adapter->q_vector[i]->eims_value;
wr32(E1000_EICS, eics);
@@ -4483,13 +4488,12 @@ static void igb_update_itr(struct igb_q_vector *q_vector,
case low_latency: /* 50 usec aka 20000 ints/s */
if (bytes > 10000) {
/* this if handles the TSO accounting */
- if (bytes/packets > 8000) {
+ if (bytes/packets > 8000)
itrval = bulk_latency;
- } else if ((packets < 10) || ((bytes/packets) > 1200)) {
+ else if ((packets < 10) || ((bytes/packets) > 1200))
itrval = bulk_latency;
- } else if ((packets > 35)) {
+ else if ((packets > 35))
itrval = lowest_latency;
- }
} else if (bytes/packets > 2000) {
itrval = bulk_latency;
} else if (packets <= 2 && bytes < 512) {
@@ -4675,6 +4679,7 @@ static void igb_tx_csum(struct igb_ring *tx_ring, struct igb_tx_buffer *first)
return;
} else {
u8 l4_hdr = 0;
+
switch (first->protocol) {
case htons(ETH_P_IP):
vlan_macip_lens |= skb_network_header_len(skb);
@@ -4962,6 +4967,7 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb,
*/
if (NETDEV_FRAG_PAGE_MAX_SIZE > IGB_MAX_DATA_PER_TXD) {
unsigned short f;
+
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
} else {
@@ -5140,7 +5146,7 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
max_frame = ETH_FRAME_LEN + ETH_FCS_LEN;
while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
- msleep(1);
+ usleep_range(1000, 2000);
/* igb_down has a dependency on max_frame_size */
adapter->max_frame_size = max_frame;
@@ -5621,6 +5627,7 @@ static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
vmolr |= E1000_VMOLR_MPME;
} else if (vf_data->num_vf_mc_hashes) {
int j;
+
vmolr |= E1000_VMOLR_ROMPE;
for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
@@ -5672,6 +5679,7 @@ static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
for (i = 0; i < adapter->vfs_allocated_count; i++) {
u32 vmolr = rd32(E1000_VMOLR(i));
+
vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
vf_data = &adapter->vf_data[i];
@@ -5770,6 +5778,7 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
if (!adapter->vf_data[vf].vlans_enabled) {
u32 size;
+
reg = rd32(E1000_VMOLR(vf));
size = reg & E1000_VMOLR_RLPML_MASK;
size += 4;
@@ -5798,6 +5807,7 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
adapter->vf_data[vf].vlans_enabled--;
if (!adapter->vf_data[vf].vlans_enabled) {
u32 size;
+
reg = rd32(E1000_VMOLR(vf));
size = reg & E1000_VMOLR_RLPML_MASK;
size -= 4;
@@ -5902,8 +5912,8 @@ static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
*/
if (!add && (adapter->netdev->flags & IFF_PROMISC)) {
u32 vlvf, bits;
-
int regndx = igb_find_vlvf_entry(adapter, vid);
+
if (regndx < 0)
goto out;
/* See if any other pools are set for this VLAN filter
@@ -6494,7 +6504,7 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
/* transfer page from old buffer to new buffer */
- memcpy(new_buff, old_buff, sizeof(struct igb_rx_buffer));
+ *new_buff = *old_buff;
/* sync the buffer for use by the device */
dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma,
@@ -6963,6 +6973,7 @@ static void igb_process_skb_fields(struct igb_ring *rx_ring,
if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
igb_test_staterr(rx_desc, E1000_RXD_STAT_VP)) {
u16 vid;
+
if (igb_test_staterr(rx_desc, E1000_RXDEXT_STATERR_LB) &&
test_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &rx_ring->flags))
vid = be16_to_cpu(rx_desc->wb.upper.vlan);
@@ -7051,7 +7062,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
if (cleaned_count)
igb_alloc_rx_buffers(rx_ring, cleaned_count);
- return (total_packets < budget);
+ return total_packets < budget;
}
static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
@@ -7172,7 +7183,7 @@ static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
break;
case SIOCGMIIREG:
if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
- &data->val_out))
+ &data->val_out))
return -EIO;
break;
case SIOCSMIIREG:
@@ -7873,7 +7884,8 @@ static void igb_check_vf_rate_limit(struct igb_adapter *adapter)
}
}
-static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
+static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf,
+ int min_tx_rate, int max_tx_rate)
{
struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
@@ -7882,15 +7894,19 @@ static int igb_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
if (hw->mac.type != e1000_82576)
return -EOPNOTSUPP;
+ if (min_tx_rate)
+ return -EINVAL;
+
actual_link_speed = igb_link_mbps(adapter->link_speed);
if ((vf >= adapter->vfs_allocated_count) ||
(!(rd32(E1000_STATUS) & E1000_STATUS_LU)) ||
- (tx_rate < 0) || (tx_rate > actual_link_speed))
+ (max_tx_rate < 0) ||
+ (max_tx_rate > actual_link_speed))
return -EINVAL;
adapter->vf_rate_link_speed = actual_link_speed;
- adapter->vf_data[vf].tx_rate = (u16)tx_rate;
- igb_set_vf_rate_limit(hw, vf, tx_rate, actual_link_speed);
+ adapter->vf_data[vf].tx_rate = (u16)max_tx_rate;
+ igb_set_vf_rate_limit(hw, vf, max_tx_rate, actual_link_speed);
return 0;
}
@@ -7919,7 +7935,7 @@ static int igb_ndo_set_vf_spoofchk(struct net_device *netdev, int vf,
wr32(reg_offset, reg_val);
adapter->vf_data[vf].spoofchk_enabled = setting;
- return E1000_SUCCESS;
+ return 0;
}
static int igb_ndo_get_vf_config(struct net_device *netdev,
@@ -7930,7 +7946,8 @@ static int igb_ndo_get_vf_config(struct net_device *netdev,
return -EINVAL;
ivi->vf = vf;
memcpy(&ivi->mac, adapter->vf_data[vf].vf_mac_addresses, ETH_ALEN);
- ivi->tx_rate = adapter->vf_data[vf].tx_rate;
+ ivi->max_tx_rate = adapter->vf_data[vf].tx_rate;
+ ivi->min_tx_rate = 0;
ivi->vlan = adapter->vf_data[vf].pf_vlan;
ivi->qos = adapter->vf_data[vf].pf_qos;
ivi->spoofchk = adapter->vf_data[vf].spoofchk_enabled;
@@ -7955,11 +7972,13 @@ static void igb_vmm_control(struct igb_adapter *adapter)
reg = rd32(E1000_DTXCTL);
reg |= E1000_DTXCTL_VLAN_ADDED;
wr32(E1000_DTXCTL, reg);
+ /* Fall through */
case e1000_82580:
/* enable replication vlan tag stripping */
reg = rd32(E1000_RPLOLR);
reg |= E1000_RPLOLR_STRVLAN;
wr32(E1000_RPLOLR, reg);
+ /* Fall through */
case e1000_i350:
/* none of the above registers are supported by i350 */
break;
@@ -8049,6 +8068,7 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
} /* endif adapter->dmac is not disabled */
} else if (hw->mac.type == e1000_82580) {
u32 reg = rd32(E1000_PCIEMISC);
+
wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION);
wr32(E1000_DMACR, 0);
}
@@ -8077,8 +8097,7 @@ s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
swfw_mask = E1000_SWFW_PHY0_SM;
- if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)
- != E1000_SUCCESS)
+ if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
return E1000_ERR_SWFW_SYNC;
status = i2c_smbus_read_byte_data(this_client, byte_offset);
@@ -8088,7 +8107,7 @@ s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
return E1000_ERR_I2C;
else {
*data = status;
- return E1000_SUCCESS;
+ return 0;
}
}
@@ -8113,7 +8132,7 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
if (!this_client)
return E1000_ERR_I2C;
- if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != E1000_SUCCESS)
+ if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
return E1000_ERR_SWFW_SYNC;
status = i2c_smbus_write_byte_data(this_client, byte_offset, data);
hw->mac.ops.release_swfw_sync(hw, swfw_mask);
@@ -8121,7 +8140,7 @@ s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
if (status)
return E1000_ERR_I2C;
else
- return E1000_SUCCESS;
+ return 0;
}
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index ab25e49..794c139 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -360,8 +360,8 @@ static int igb_ptp_settime_i210(struct ptp_clock_info *ptp,
return 0;
}
-static int igb_ptp_enable(struct ptp_clock_info *ptp,
- struct ptp_clock_request *rq, int on)
+static int igb_ptp_feature_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
{
return -EOPNOTSUPP;
}
@@ -559,10 +559,11 @@ int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr)
return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
-EFAULT : 0;
}
+
/**
- * igb_ptp_set_ts_config - control hardware time stamping
- * @netdev:
- * @ifreq:
+ * igb_ptp_set_timestamp_mode - setup hardware for timestamping
+ * @adapter: networking device structure
+ * @config: hwtstamp configuration
*
* Outgoing time stamping can be enabled and disabled. Play nice and
* disable it when requested, although it shouldn't case any overhead
@@ -575,12 +576,11 @@ int igb_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr)
* type has to be specified. Matching the kind of event packet is
* not supported, with the exception of "all V2 events regardless of
* level 2 or 4".
- **/
-int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
+ */
+static int igb_ptp_set_timestamp_mode(struct igb_adapter *adapter,
+ struct hwtstamp_config *config)
{
- struct igb_adapter *adapter = netdev_priv(netdev);
struct e1000_hw *hw = &adapter->hw;
- struct hwtstamp_config *config = &adapter->tstamp_config;
u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
u32 tsync_rx_cfg = 0;
@@ -588,9 +588,6 @@ int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
bool is_l2 = false;
u32 regval;
- if (copy_from_user(config, ifr->ifr_data, sizeof(*config)))
- return -EFAULT;
-
/* reserved for future extensions */
if (config->flags)
return -EINVAL;
@@ -725,7 +722,33 @@ int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
regval = rd32(E1000_RXSTMPL);
regval = rd32(E1000_RXSTMPH);
- return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
+ return 0;
+}
+
+/**
+ * igb_ptp_set_ts_config - set hardware time stamping config
+ * @netdev:
+ * @ifreq:
+ *
+ **/
+int igb_ptp_set_ts_config(struct net_device *netdev, struct ifreq *ifr)
+{
+ struct igb_adapter *adapter = netdev_priv(netdev);
+ struct hwtstamp_config config;
+ int err;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ err = igb_ptp_set_timestamp_mode(adapter, &config);
+ if (err)
+ return err;
+
+ /* save these settings for future reference */
+ memcpy(&adapter->tstamp_config, &config,
+ sizeof(adapter->tstamp_config));
+
+ return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
-EFAULT : 0;
}
@@ -745,7 +768,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
adapter->ptp_caps.gettime = igb_ptp_gettime_82576;
adapter->ptp_caps.settime = igb_ptp_settime_82576;
- adapter->ptp_caps.enable = igb_ptp_enable;
+ adapter->ptp_caps.enable = igb_ptp_feature_enable;
adapter->cc.read = igb_ptp_read_82576;
adapter->cc.mask = CLOCKSOURCE_MASK(64);
adapter->cc.mult = 1;
@@ -765,7 +788,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576;
adapter->ptp_caps.gettime = igb_ptp_gettime_82576;
adapter->ptp_caps.settime = igb_ptp_settime_82576;
- adapter->ptp_caps.enable = igb_ptp_enable;
+ adapter->ptp_caps.enable = igb_ptp_feature_enable;
adapter->cc.read = igb_ptp_read_82580;
adapter->cc.mask = CLOCKSOURCE_MASK(IGB_NBITS_82580);
adapter->cc.mult = 1;
@@ -784,7 +807,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
adapter->ptp_caps.adjtime = igb_ptp_adjtime_i210;
adapter->ptp_caps.gettime = igb_ptp_gettime_i210;
adapter->ptp_caps.settime = igb_ptp_settime_i210;
- adapter->ptp_caps.enable = igb_ptp_enable;
+ adapter->ptp_caps.enable = igb_ptp_feature_enable;
/* Enable the timer functions by clearing bit 31. */
wr32(E1000_TSAUXC, 0x0);
break;
@@ -820,6 +843,9 @@ void igb_ptp_init(struct igb_adapter *adapter)
wr32(E1000_IMS, E1000_IMS_TS);
}
+ adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+ adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
+
adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
&adapter->pdev->dev);
if (IS_ERR(adapter->ptp_clock)) {
@@ -884,7 +910,7 @@ void igb_ptp_reset(struct igb_adapter *adapter)
return;
/* reset the tstamp_config */
- memset(&adapter->tstamp_config, 0, sizeof(adapter->tstamp_config));
+ igb_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config);
switch (adapter->hw.mac.type) {
case e1000_82576:
diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c
index 90eef07..2178f87 100644
--- a/drivers/net/ethernet/intel/igbvf/ethtool.c
+++ b/drivers/net/ethernet/intel/igbvf/ethtool.c
@@ -101,8 +101,8 @@ static int igbvf_get_settings(struct net_device *netdev,
else
ecmd->duplex = DUPLEX_HALF;
} else {
- ethtool_cmd_speed_set(ecmd, -1);
- ecmd->duplex = -1;
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ ecmd->duplex = DUPLEX_UNKNOWN;
}
ecmd->autoneg = AUTONEG_DISABLE;
@@ -119,7 +119,6 @@ static int igbvf_set_settings(struct net_device *netdev,
static void igbvf_get_pauseparam(struct net_device *netdev,
struct ethtool_pauseparam *pause)
{
- return;
}
static int igbvf_set_pauseparam(struct net_device *netdev,
@@ -476,5 +475,5 @@ static const struct ethtool_ops igbvf_ethtool_ops = {
void igbvf_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &igbvf_ethtool_ops);
+ netdev->ethtool_ops = &igbvf_ethtool_ops;
}
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
index dbb7dd2..b311e9e 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_ethtool.c
@@ -107,8 +107,8 @@ ixgb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
ethtool_cmd_speed_set(ecmd, SPEED_10000);
ecmd->duplex = DUPLEX_FULL;
} else {
- ethtool_cmd_speed_set(ecmd, -1);
- ecmd->duplex = -1;
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ ecmd->duplex = DUPLEX_UNKNOWN;
}
ecmd->autoneg = AUTONEG_DISABLE;
@@ -656,5 +656,5 @@ static const struct ethtool_ops ixgb_ethtool_ops = {
void ixgb_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &ixgb_ethtool_ops);
+ netdev->ethtool_ops = &ixgb_ethtool_ops;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index c6c4ca7..ac9f214 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -155,7 +155,6 @@ struct vf_data_storage {
struct vf_macvlans {
struct list_head l;
int vf;
- int rar_entry;
bool free;
bool is_macvlan;
u8 vf_macvlan[ETH_ALEN];
@@ -363,7 +362,7 @@ struct ixgbe_ring_container {
for (pos = (head).ring; pos != NULL; pos = pos->next)
#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
- ? 8 : 1)
+ ? 8 : 1)
#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS
/* MAX_Q_VECTORS of these are allocated,
@@ -613,6 +612,15 @@ static inline void ixgbe_write_tail(struct ixgbe_ring *ring, u32 value)
#define MAX_MSIX_VECTORS_82598 18
#define MAX_Q_VECTORS_82598 16
+struct ixgbe_mac_addr {
+ u8 addr[ETH_ALEN];
+ u16 queue;
+ u16 state; /* bitmask */
+};
+#define IXGBE_MAC_STATE_DEFAULT 0x1
+#define IXGBE_MAC_STATE_MODIFIED 0x2
+#define IXGBE_MAC_STATE_IN_USE 0x4
+
#define MAX_Q_VECTORS MAX_Q_VECTORS_82599
#define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82599
@@ -785,6 +793,7 @@ struct ixgbe_adapter {
u32 timer_event_accumulator;
u32 vferr_refcount;
+ struct ixgbe_mac_addr *mac_table;
struct kobject *info_kobj;
#ifdef CONFIG_IXGBE_HWMON
struct hwmon_buff *ixgbe_hwmon_buff;
@@ -863,6 +872,13 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter);
int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
u16 subdevice_id);
+#ifdef CONFIG_PCI_IOV
+void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter);
+#endif
+int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
+ u8 *addr, u16 queue);
+int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter,
+ u8 *addr, u16 queue);
void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, struct ixgbe_adapter *,
struct ixgbe_ring *);
@@ -941,6 +957,7 @@ static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring)
}
void ixgbe_ptp_init(struct ixgbe_adapter *adapter);
+void ixgbe_ptp_suspend(struct ixgbe_adapter *adapter);
void ixgbe_ptp_stop(struct ixgbe_adapter *adapter);
void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter);
void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index 4c78ea8..1560933 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -41,10 +41,10 @@
#define IXGBE_82598_RX_PB_SIZE 512
static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete);
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset,
- u8 *eeprom_data);
+ u8 *eeprom_data);
/**
* ixgbe_set_pcie_completion_timeout - set pci-e completion timeout
@@ -140,7 +140,7 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
phy->ops.check_link = &ixgbe_check_phy_link_tnx;
phy->ops.get_firmware_version =
- &ixgbe_get_phy_firmware_version_tnx;
+ &ixgbe_get_phy_firmware_version_tnx;
break;
case ixgbe_phy_nl:
phy->ops.reset = &ixgbe_reset_phy_nl;
@@ -156,8 +156,8 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
/* Check to see if SFP+ module is supported */
ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
- &list_offset,
- &data_offset);
+ &list_offset,
+ &data_offset);
if (ret_val != 0) {
ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
goto out;
@@ -219,8 +219,8 @@ static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
* Determines the link capabilities by reading the AUTOC register.
**/
static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *autoneg)
+ ixgbe_link_speed *speed,
+ bool *autoneg)
{
s32 status = 0;
u32 autoc = 0;
@@ -337,19 +337,25 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
int i;
bool link_up;
- /*
- * Validate the water mark configuration for packet buffer 0. Zero
- * water marks indicate that the packet buffer was not configured
- * and the watermarks for packet buffer 0 should always be configured.
- */
- if (!hw->fc.low_water ||
- !hw->fc.high_water[0] ||
- !hw->fc.pause_time) {
- hw_dbg(hw, "Invalid water mark configuration\n");
+ /* Validate the water mark configuration */
+ if (!hw->fc.pause_time) {
ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
goto out;
}
+ /* Low water mark of zero causes XOFF floods */
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
+ hw->fc.high_water[i]) {
+ if (!hw->fc.low_water[i] ||
+ hw->fc.low_water[i] >= hw->fc.high_water[i]) {
+ hw_dbg(hw, "Invalid water mark configuration\n");
+ ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+ goto out;
+ }
+ }
+ }
+
/*
* On 82598 having Rx FC on causes resets while doing 1G
* so if it's on turn it off once we know link_speed. For
@@ -432,12 +438,11 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl_reg);
IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg);
- fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
-
/* Set up and enable Rx high/low water mark thresholds, enable XON. */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
hw->fc.high_water[i]) {
+ fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), fcrth);
@@ -468,7 +473,7 @@ out:
* Restarts the link. Performs autonegotiation if needed.
**/
static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw,
- bool autoneg_wait_to_complete)
+ bool autoneg_wait_to_complete)
{
u32 autoc_reg;
u32 links_reg;
@@ -550,8 +555,8 @@ static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw)
* Reads the links register to determine if link is up and the current speed
**/
static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed, bool *link_up,
- bool link_up_wait_to_complete)
+ ixgbe_link_speed *speed, bool *link_up,
+ bool link_up_wait_to_complete)
{
u32 links_reg;
u32 i;
@@ -567,7 +572,7 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg);
hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg);
hw->phy.ops.read_reg(hw, 0xC00C, MDIO_MMD_PMAPMD,
- &adapt_comp_reg);
+ &adapt_comp_reg);
if (link_up_wait_to_complete) {
for (i = 0; i < IXGBE_LINK_UP_TIME; i++) {
if ((link_reg & 1) &&
@@ -579,11 +584,11 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
}
msleep(100);
hw->phy.ops.read_reg(hw, 0xC79F,
- MDIO_MMD_PMAPMD,
- &link_reg);
+ MDIO_MMD_PMAPMD,
+ &link_reg);
hw->phy.ops.read_reg(hw, 0xC00C,
- MDIO_MMD_PMAPMD,
- &adapt_comp_reg);
+ MDIO_MMD_PMAPMD,
+ &adapt_comp_reg);
}
} else {
if ((link_reg & 1) && ((adapt_comp_reg & 1) == 0))
@@ -656,7 +661,7 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
/* Set KX4/KX support according to speed requested */
else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
- link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
+ link_mode == IXGBE_AUTOC_LMS_KX4_AN_1G_AN) {
autoc &= ~IXGBE_AUTOC_KX4_KX_SUPP_MASK;
if (speed & IXGBE_LINK_SPEED_10GB_FULL)
autoc |= IXGBE_AUTOC_KX4_SUPP;
@@ -689,14 +694,14 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
* Sets the link speed in the AUTOC register in the MAC and restarts link.
**/
static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete)
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
{
s32 status;
/* Setup the PHY according to input speed */
status = hw->phy.ops.setup_link_speed(hw, speed,
- autoneg_wait_to_complete);
+ autoneg_wait_to_complete);
/* Set up MAC */
ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
@@ -735,28 +740,28 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
if (analog_val & IXGBE_ATLAS_PDN_TX_REG_EN) {
/* Enable Tx Atlas so packets can be transmitted again */
hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
- &analog_val);
+ &analog_val);
analog_val &= ~IXGBE_ATLAS_PDN_TX_REG_EN;
hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_LPBK,
- analog_val);
+ analog_val);
hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
- &analog_val);
+ &analog_val);
analog_val &= ~IXGBE_ATLAS_PDN_TX_10G_QL_ALL;
hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_10G,
- analog_val);
+ analog_val);
hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
- &analog_val);
+ &analog_val);
analog_val &= ~IXGBE_ATLAS_PDN_TX_1G_QL_ALL;
hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_1G,
- analog_val);
+ analog_val);
hw->mac.ops.read_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
- &analog_val);
+ &analog_val);
analog_val &= ~IXGBE_ATLAS_PDN_TX_AN_QL_ALL;
hw->mac.ops.write_analog_reg8(hw, IXGBE_ATLAS_PDN_AN,
- analog_val);
+ analog_val);
}
/* Reset PHY */
@@ -955,7 +960,7 @@ static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw)
for (vlanbyte = 0; vlanbyte < 4; vlanbyte++)
for (offset = 0; offset < hw->mac.vft_size; offset++)
IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset),
- 0);
+ 0);
return 0;
}
@@ -973,7 +978,7 @@ static s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val)
u32 atlas_ctl;
IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL,
- IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
+ IXGBE_ATLASCTL_WRITE_CMD | (reg << 8));
IXGBE_WRITE_FLUSH(hw);
udelay(10);
atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);
@@ -1273,8 +1278,6 @@ static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb,
/* Setup Tx packet buffer sizes */
for (i = 0; i < IXGBE_MAX_PACKET_BUFFERS; i++)
IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB);
-
- return;
}
static struct ixgbe_mac_operations mac_ops_82598 = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index f32b3dd..bc7c924 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -48,17 +48,17 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
ixgbe_link_speed speed,
bool autoneg_wait_to_complete);
static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete);
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw);
static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
bool autoneg_wait_to_complete);
static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete);
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete);
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data);
@@ -96,9 +96,9 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
!ixgbe_mng_enabled(hw)) {
mac->ops.disable_tx_laser =
- &ixgbe_disable_tx_laser_multispeed_fiber;
+ &ixgbe_disable_tx_laser_multispeed_fiber;
mac->ops.enable_tx_laser =
- &ixgbe_enable_tx_laser_multispeed_fiber;
+ &ixgbe_enable_tx_laser_multispeed_fiber;
mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber;
} else {
mac->ops.disable_tx_laser = NULL;
@@ -132,13 +132,13 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
hw->phy.ops.reset = NULL;
ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
- &data_offset);
+ &data_offset);
if (ret_val != 0)
goto setup_sfp_out;
/* PHY config will finish before releasing the semaphore */
ret_val = hw->mac.ops.acquire_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
+ IXGBE_GSSR_MAC_CSR_SM);
if (ret_val != 0) {
ret_val = IXGBE_ERR_SWFW_SYNC;
goto setup_sfp_out;
@@ -334,7 +334,7 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
phy->ops.check_link = &ixgbe_check_phy_link_tnx;
phy->ops.setup_link = &ixgbe_setup_phy_link_tnx;
phy->ops.get_firmware_version =
- &ixgbe_get_phy_firmware_version_tnx;
+ &ixgbe_get_phy_firmware_version_tnx;
break;
default:
break;
@@ -352,7 +352,7 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
* Determines the link capabilities by reading the AUTOC register.
**/
static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
+ ixgbe_link_speed *speed,
bool *autoneg)
{
s32 status = 0;
@@ -543,7 +543,7 @@ static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw)
* Restarts the link. Performs autonegotiation if needed.
**/
static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
- bool autoneg_wait_to_complete)
+ bool autoneg_wait_to_complete)
{
u32 autoc_reg;
u32 links_reg;
@@ -672,8 +672,8 @@ static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
* Set the link speed in the AUTOC register and restarts link.
**/
static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete)
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
{
s32 status = 0;
ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN;
@@ -820,8 +820,8 @@ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
*/
if (speedcnt > 1)
status = ixgbe_setup_mac_link_multispeed_fiber(hw,
- highest_link_speed,
- autoneg_wait_to_complete);
+ highest_link_speed,
+ autoneg_wait_to_complete);
out:
/* Set autoneg_advertised value based on input link speed */
@@ -1009,8 +1009,8 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
if (speed & IXGBE_LINK_SPEED_1GB_FULL)
autoc |= IXGBE_AUTOC_KX_SUPP;
} else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) &&
- (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
- link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
+ (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN ||
+ link_mode == IXGBE_AUTOC_LMS_1G_AN)) {
/* Switch from 1G SFI to 10G SFI if requested */
if ((speed == IXGBE_LINK_SPEED_10GB_FULL) &&
(pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) {
@@ -1018,7 +1018,7 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
autoc |= IXGBE_AUTOC_LMS_10G_SERIAL;
}
} else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) &&
- (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
+ (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) {
/* Switch from 10G SFI to 1G SFI if requested */
if ((speed == IXGBE_LINK_SPEED_1GB_FULL) &&
(pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) {
@@ -1051,7 +1051,7 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
}
if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) {
status =
- IXGBE_ERR_AUTONEG_NOT_COMPLETE;
+ IXGBE_ERR_AUTONEG_NOT_COMPLETE;
hw_dbg(hw, "Autoneg did not complete.\n");
}
}
@@ -1074,14 +1074,14 @@ out:
* Restarts link on PHY and MAC based on settings passed in.
**/
static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete)
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
{
s32 status;
/* Setup the PHY according to input speed */
status = hw->phy.ops.setup_link_speed(hw, speed,
- autoneg_wait_to_complete);
+ autoneg_wait_to_complete);
/* Set up MAC */
ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete);
@@ -1224,7 +1224,7 @@ mac_reset_top:
(hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
autoc2 |= (hw->mac.orig_autoc2 &
- IXGBE_AUTOC2_UPPER_MASK);
+ IXGBE_AUTOC2_UPPER_MASK);
IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
}
}
@@ -1246,7 +1246,7 @@ mac_reset_top:
/* Add the SAN MAC address to the RAR only if it's a valid address */
if (is_valid_ether_addr(hw->mac.san_addr)) {
hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
- hw->mac.san_addr, 0, IXGBE_RAH_AV);
+ hw->mac.san_addr, 0, IXGBE_RAH_AV);
/* Save the SAN MAC RAR index */
hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
@@ -1257,7 +1257,7 @@ mac_reset_top:
/* Store the alternative WWNN/WWPN prefix */
hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
- &hw->mac.wwpn_prefix);
+ &hw->mac.wwpn_prefix);
reset_hw_out:
return status;
@@ -1271,6 +1271,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
{
int i;
u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL);
+
fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE;
/*
@@ -1284,8 +1285,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
udelay(10);
}
if (i >= IXGBE_FDIRCMD_CMD_POLL) {
- hw_dbg(hw, "Flow Director previous command isn't complete, "
- "aborting table re-initialization.\n");
+ hw_dbg(hw, "Flow Director previous command isn't complete, aborting table re-initialization.\n");
return IXGBE_ERR_FDIR_REINIT_FAILED;
}
@@ -1299,12 +1299,12 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
* - write 0 to bit 8 of FDIRCMD register
*/
IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
- (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
- IXGBE_FDIRCMD_CLEARHT));
+ (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) |
+ IXGBE_FDIRCMD_CLEARHT));
IXGBE_WRITE_FLUSH(hw);
IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD,
- (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
- ~IXGBE_FDIRCMD_CLEARHT));
+ (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) &
+ ~IXGBE_FDIRCMD_CLEARHT));
IXGBE_WRITE_FLUSH(hw);
/*
* Clear FDIR Hash register to clear any leftover hashes
@@ -1319,7 +1319,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
/* Poll init-done after we write FDIRCTRL register */
for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
- IXGBE_FDIRCTRL_INIT_DONE)
+ IXGBE_FDIRCTRL_INIT_DONE)
break;
usleep_range(1000, 2000);
}
@@ -1368,7 +1368,7 @@ static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl)
IXGBE_WRITE_FLUSH(hw);
for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) {
if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) &
- IXGBE_FDIRCTRL_INIT_DONE)
+ IXGBE_FDIRCTRL_INIT_DONE)
break;
usleep_range(1000, 2000);
}
@@ -1453,7 +1453,7 @@ do { \
bucket_hash ^= hi_hash_dword >> n; \
else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
sig_hash ^= hi_hash_dword << (16 - n); \
-} while (0);
+} while (0)
/**
* ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
@@ -1529,9 +1529,9 @@ static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
* @queue: queue index to direct traffic to
**/
s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
- union ixgbe_atr_hash_dword input,
- union ixgbe_atr_hash_dword common,
- u8 queue)
+ union ixgbe_atr_hash_dword input,
+ union ixgbe_atr_hash_dword common,
+ u8 queue)
{
u64 fdirhashcmd;
u32 fdircmd;
@@ -1555,7 +1555,7 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
/* configure FDIRCMD register */
fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
- IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
+ IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
@@ -1579,7 +1579,7 @@ do { \
bucket_hash ^= lo_hash_dword >> n; \
if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
bucket_hash ^= hi_hash_dword >> n; \
-} while (0);
+} while (0)
/**
* ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash
@@ -1651,6 +1651,7 @@ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask)
{
u32 mask = ntohs(input_mask->formatted.dst_port);
+
mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
mask |= ntohs(input_mask->formatted.src_port);
mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
@@ -1885,7 +1886,7 @@ static s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val)
u32 core_ctl;
IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD |
- (reg << 8));
+ (reg << 8));
IXGBE_WRITE_FLUSH(hw);
udelay(10);
core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 981b8a7..4e5385a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -41,7 +41,7 @@ static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw);
static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw);
static void ixgbe_standby_eeprom(struct ixgbe_hw *hw);
static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
- u16 count);
+ u16 count);
static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
@@ -271,6 +271,7 @@ out:
**/
s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
{
+ s32 ret_val;
u32 ctrl_ext;
/* Set the media type */
@@ -292,12 +293,15 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
IXGBE_WRITE_FLUSH(hw);
/* Setup flow control */
- ixgbe_setup_fc(hw);
+ ret_val = ixgbe_setup_fc(hw);
+ if (!ret_val)
+ goto out;
/* Clear adapter stopped flag */
hw->adapter_stopped = false;
- return 0;
+out:
+ return ret_val;
}
/**
@@ -481,7 +485,7 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
* Reads the part number string from the EEPROM.
**/
s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
- u32 pba_num_size)
+ u32 pba_num_size)
{
s32 ret_val;
u16 data;
@@ -814,9 +818,8 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
eeprom->address_bits = 16;
else
eeprom->address_bits = 8;
- hw_dbg(hw, "Eeprom params: type = %d, size = %d, address bits: "
- "%d\n", eeprom->type, eeprom->word_size,
- eeprom->address_bits);
+ hw_dbg(hw, "Eeprom params: type = %d, size = %d, address bits: %d\n",
+ eeprom->type, eeprom->word_size, eeprom->address_bits);
}
return 0;
@@ -1388,8 +1391,7 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
}
if (i == timeout) {
- hw_dbg(hw, "Driver can't access the Eeprom - SMBI Semaphore "
- "not granted.\n");
+ hw_dbg(hw, "Driver can't access the Eeprom - SMBI Semaphore not granted.\n");
/*
* this release is particularly important because our attempts
* above to get the semaphore may have succeeded, and if there
@@ -1434,14 +1436,12 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
* was not granted because we don't have access to the EEPROM
*/
if (i >= timeout) {
- hw_dbg(hw, "SWESMBI Software EEPROM semaphore "
- "not granted.\n");
+ hw_dbg(hw, "SWESMBI Software EEPROM semaphore not granted.\n");
ixgbe_release_eeprom_semaphore(hw);
status = IXGBE_ERR_EEPROM;
}
} else {
- hw_dbg(hw, "Software semaphore SMBI between device drivers "
- "not granted.\n");
+ hw_dbg(hw, "Software semaphore SMBI between device drivers not granted.\n");
}
return status;
@@ -1483,7 +1483,7 @@ static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
*/
for (i = 0; i < IXGBE_EEPROM_MAX_RETRY_SPI; i += 5) {
ixgbe_shift_out_eeprom_bits(hw, IXGBE_EEPROM_RDSR_OPCODE_SPI,
- IXGBE_EEPROM_OPCODE_BITS);
+ IXGBE_EEPROM_OPCODE_BITS);
spi_stat_reg = (u8)ixgbe_shift_in_eeprom_bits(hw, 8);
if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI))
break;
@@ -1532,7 +1532,7 @@ static void ixgbe_standby_eeprom(struct ixgbe_hw *hw)
* @count: number of bits to shift out
**/
static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data,
- u16 count)
+ u16 count)
{
u32 eec;
u32 mask;
@@ -1736,7 +1736,7 @@ u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
* caller does not need checksum_val, the value can be NULL.
**/
s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
- u16 *checksum_val)
+ u16 *checksum_val)
{
s32 status;
u16 checksum;
@@ -1809,7 +1809,7 @@ s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
* Puts an ethernet address into a receive address register.
**/
s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
- u32 enable_addr)
+ u32 enable_addr)
{
u32 rar_low, rar_high;
u32 rar_entries = hw->mac.num_rar_entries;
@@ -2053,7 +2053,7 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
if (hw->addr_ctrl.mta_in_use > 0)
IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,
- IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
+ IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type);
hw_dbg(hw, "ixgbe_update_mc_addr_list_generic Complete\n");
return 0;
@@ -2071,7 +2071,7 @@ s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw)
if (a->mta_in_use > 0)
IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE |
- hw->mac.mc_filter_type);
+ hw->mac.mc_filter_type);
return 0;
}
@@ -2106,19 +2106,25 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
u32 fcrtl, fcrth;
int i;
- /*
- * Validate the water mark configuration for packet buffer 0. Zero
- * water marks indicate that the packet buffer was not configured
- * and the watermarks for packet buffer 0 should always be configured.
- */
- if (!hw->fc.low_water ||
- !hw->fc.high_water[0] ||
- !hw->fc.pause_time) {
- hw_dbg(hw, "Invalid water mark configuration\n");
+ /* Validate the water mark configuration. */
+ if (!hw->fc.pause_time) {
ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
goto out;
}
+ /* Low water mark of zero causes XOFF floods */
+ for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
+ if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
+ hw->fc.high_water[i]) {
+ if (!hw->fc.low_water[i] ||
+ hw->fc.low_water[i] >= hw->fc.high_water[i]) {
+ hw_dbg(hw, "Invalid water mark configuration\n");
+ ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
+ goto out;
+ }
+ }
+ }
+
/* Negotiate the fc mode to use */
ixgbe_fc_autoneg(hw);
@@ -2181,12 +2187,11 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
- fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
-
/* Set up and enable Rx high/low water mark thresholds, enable XON. */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
if ((hw->fc.current_mode & ixgbe_fc_tx_pause) &&
hw->fc.high_water[i]) {
+ fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
fcrth = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
} else {
@@ -2654,8 +2659,7 @@ s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw)
/* For informational purposes only */
if (i >= IXGBE_MAX_SECRX_POLL)
- hw_dbg(hw, "Rx unit being enabled before security "
- "path fully disabled. Continuing with init.\n");
+ hw_dbg(hw, "Rx unit being enabled before security path fully disabled. Continuing with init.\n");
return 0;
@@ -2782,7 +2786,7 @@ out:
* get and set mac_addr routines.
**/
static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw,
- u16 *san_mac_offset)
+ u16 *san_mac_offset)
{
s32 ret_val;
@@ -2828,7 +2832,7 @@ s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr)
hw->mac.ops.set_lan_id(hw);
/* apply the port offset to the address offset */
(hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) :
- (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
+ (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET);
for (i = 0; i < 3; i++) {
ret_val = hw->eeprom.ops.read(hw, san_mac_offset,
&san_mac_data);
@@ -3068,7 +3072,7 @@ static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan)
* Turn on/off specified VLAN in the VLAN filter table.
**/
s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
- bool vlan_on)
+ bool vlan_on)
{
s32 regindex;
u32 bitindex;
@@ -3190,9 +3194,9 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind,
* Ignore it. */
vfta_changed = false;
}
- }
- else
+ } else {
IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0);
+ }
}
if (vfta_changed)
@@ -3292,7 +3296,7 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
* block to check the support for the alternative WWNN/WWPN prefix support.
**/
s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
- u16 *wwpn_prefix)
+ u16 *wwpn_prefix)
{
u16 offset, caps;
u16 alt_san_mac_blk_offset;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index f12c40f..2ae5d4b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -39,7 +39,7 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw);
s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
- u32 pba_num_size);
+ u32 pba_num_size);
s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr);
enum ixgbe_bus_width ixgbe_convert_bus_width(u16 link_status);
enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status);
@@ -61,16 +61,16 @@ s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
- u16 *data);
+ u16 *data);
s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
u16 words, u16 *data);
u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw);
s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
- u16 *checksum_val);
+ u16 *checksum_val);
s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
- u32 enable_addr);
+ u32 enable_addr);
s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw);
s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw,
@@ -92,13 +92,13 @@ s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq);
s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq);
s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw);
s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan,
- u32 vind, bool vlan_on);
+ u32 vind, bool vlan_on);
s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw);
s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *link_up, bool link_up_wait_to_complete);
+ ixgbe_link_speed *speed,
+ bool *link_up, bool link_up_wait_to_complete);
s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
- u16 *wwpn_prefix);
+ u16 *wwpn_prefix);
s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *, u32 *reg_val);
s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked);
@@ -141,8 +141,6 @@ static inline bool ixgbe_removed(void __iomem *addr)
return unlikely(!addr);
}
-void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg);
-
static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value)
{
u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
@@ -172,18 +170,7 @@ static inline void ixgbe_write_reg64(struct ixgbe_hw *hw, u32 reg, u64 value)
}
#define IXGBE_WRITE_REG64(a, reg, value) ixgbe_write_reg64((a), (reg), (value))
-static inline u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
-{
- u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
- u32 value;
-
- if (ixgbe_removed(reg_addr))
- return IXGBE_FAILED_READ_REG;
- value = readl(reg_addr + reg);
- if (unlikely(value == IXGBE_FAILED_READ_REG))
- ixgbe_check_remove(hw, reg);
- return value;
-}
+u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg);
#define IXGBE_READ_REG(a, reg) ixgbe_read_reg((a), (reg))
#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) \
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
index e055e00..a689ee0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
@@ -267,7 +267,7 @@ void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *cfg, int direction, u8 *map)
* Configure dcb settings and enable dcb mode.
*/
s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
- struct ixgbe_dcb_config *dcb_config)
+ struct ixgbe_dcb_config *dcb_config)
{
s32 ret = 0;
u8 pfc_en;
@@ -389,7 +389,6 @@ static void ixgbe_dcb_read_rtrup2tc_82599(struct ixgbe_hw *hw, u8 *map)
for (i = 0; i < MAX_USER_PRIORITY; i++)
map[i] = IXGBE_RTRUP2TC_UP_MASK &
(reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT));
- return;
}
void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw, u8 *map)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
index 7a77f37..d3ba63f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c
@@ -208,7 +208,6 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg);
- fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
/* Configure PFC Tx thresholds per TC */
for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
if (!(pfc_en & (1 << i))) {
@@ -217,6 +216,7 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en)
continue;
}
+ fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), fcrtl);
IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), reg);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
index bdb99b3..3b932fe 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c
@@ -242,7 +242,6 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
max_tc = prio_tc[i];
}
- fcrtl = (hw->fc.low_water << 10) | IXGBE_FCRTL_XONE;
/* Configure PFC Tx thresholds per TC */
for (i = 0; i <= max_tc; i++) {
@@ -257,6 +256,7 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
if (enabled) {
reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
+ fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
} else {
reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
index d5a1e3d..90c3702 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h
@@ -31,17 +31,17 @@
/* DCB register definitions */
#define IXGBE_RTTDCS_TDPAC 0x00000001 /* 0 Round Robin,
- * 1 WSP - Weighted Strict Priority
- */
+ * 1 WSP - Weighted Strict Priority
+ */
#define IXGBE_RTTDCS_VMPAC 0x00000002 /* 0 Round Robin,
- * 1 WRR - Weighted Round Robin
- */
+ * 1 WRR - Weighted Round Robin
+ */
#define IXGBE_RTTDCS_TDRM 0x00000010 /* Transmit Recycle Mode */
#define IXGBE_RTTDCS_ARBDIS 0x00000040 /* DCB arbiter disable */
#define IXGBE_RTTDCS_BDPM 0x00400000 /* Bypass Data Pipe - must clear! */
#define IXGBE_RTTDCS_BPBFSM 0x00800000 /* Bypass PB Free Space - must
- * clear!
- */
+ * clear!
+ */
#define IXGBE_RTTDCS_SPEED_CHG 0x80000000 /* Link speed change */
/* Receive UP2TC mapping */
@@ -56,11 +56,11 @@
#define IXGBE_RTRPT4C_LSP 0x80000000 /* LSP enable bit */
#define IXGBE_RDRXCTL_MPBEN 0x00000010 /* DMA config for multiple packet
- * buffers enable
- */
+ * buffers enable
+ */
#define IXGBE_RDRXCTL_MCEN 0x00000040 /* DMA config for multiple cores
- * (RSS) enable
- */
+ * (RSS) enable
+ */
/* RTRPCS Bit Masks */
#define IXGBE_RTRPCS_RRM 0x00000002 /* Receive Recycle Mode enable */
@@ -81,8 +81,8 @@
/* RTTPCS Bit Masks */
#define IXGBE_RTTPCS_TPPAC 0x00000020 /* 0 Round Robin,
- * 1 SP - Strict Priority
- */
+ * 1 SP - Strict Priority
+ */
#define IXGBE_RTTPCS_ARBDIS 0x00000040 /* Arbiter disable */
#define IXGBE_RTTPCS_TPRM 0x00000100 /* Transmit Recycle Mode enable */
#define IXGBE_RTTPCS_ARBD_SHIFT 22
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
index edd89a1..5172b6b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
@@ -192,8 +192,8 @@ static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev,
}
static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
- u8 prio, u8 bwg_id, u8 bw_pct,
- u8 up_map)
+ u8 prio, u8 bwg_id, u8 bw_pct,
+ u8 up_map)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -210,7 +210,7 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc,
}
static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
- u8 bw_pct)
+ u8 bw_pct)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -218,8 +218,8 @@ static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
}
static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
- u8 prio, u8 bwg_id, u8 bw_pct,
- u8 up_map)
+ u8 prio, u8 bwg_id, u8 bw_pct,
+ u8 up_map)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -236,7 +236,7 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc,
}
static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
- u8 bw_pct)
+ u8 bw_pct)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -244,8 +244,8 @@ static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
}
static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc,
- u8 *prio, u8 *bwg_id, u8 *bw_pct,
- u8 *up_map)
+ u8 *prio, u8 *bwg_id, u8 *bw_pct,
+ u8 *up_map)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -256,7 +256,7 @@ static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc,
}
static void ixgbe_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
- u8 *bw_pct)
+ u8 *bw_pct)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -264,8 +264,8 @@ static void ixgbe_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id,
}
static void ixgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc,
- u8 *prio, u8 *bwg_id, u8 *bw_pct,
- u8 *up_map)
+ u8 *prio, u8 *bwg_id, u8 *bw_pct,
+ u8 *up_map)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -276,7 +276,7 @@ static void ixgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc,
}
static void ixgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
- u8 *bw_pct)
+ u8 *bw_pct)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -284,7 +284,7 @@ static void ixgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id,
}
static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
- u8 setting)
+ u8 setting)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -295,7 +295,7 @@ static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority,
}
static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority,
- u8 *setting)
+ u8 *setting)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
index 472b0f4..5e2c1e3 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c
@@ -253,8 +253,7 @@ void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter)
**/
void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter)
{
- if (adapter->ixgbe_dbg_adapter)
- debugfs_remove_recursive(adapter->ixgbe_dbg_adapter);
+ debugfs_remove_recursive(adapter->ixgbe_dbg_adapter);
adapter->ixgbe_dbg_adapter = NULL;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 6c55c14..a452730 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -141,8 +141,8 @@ static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
/ sizeof(u64))
#define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \
- IXGBE_PB_STATS_LEN + \
- IXGBE_QUEUE_STATS_LEN)
+ IXGBE_PB_STATS_LEN + \
+ IXGBE_QUEUE_STATS_LEN)
static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
"Register test (offline)", "Eeprom test (offline)",
@@ -152,7 +152,7 @@ static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
#define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN
static int ixgbe_get_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+ struct ethtool_cmd *ecmd)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
@@ -161,13 +161,6 @@ static int ixgbe_get_settings(struct net_device *netdev,
bool autoneg = false;
bool link_up;
- /* SFP type is needed for get_link_capabilities */
- if (hw->phy.media_type & (ixgbe_media_type_fiber |
- ixgbe_media_type_fiber_qsfp)) {
- if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
- hw->phy.ops.identify_sfp(hw);
- }
-
hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg);
/* set the supported link speeds */
@@ -303,15 +296,15 @@ static int ixgbe_get_settings(struct net_device *netdev,
}
ecmd->duplex = DUPLEX_FULL;
} else {
- ethtool_cmd_speed_set(ecmd, -1);
- ecmd->duplex = -1;
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ ecmd->duplex = DUPLEX_UNKNOWN;
}
return 0;
}
static int ixgbe_set_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+ struct ethtool_cmd *ecmd)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
@@ -368,7 +361,7 @@ static int ixgbe_set_settings(struct net_device *netdev,
}
static void ixgbe_get_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pause)
+ struct ethtool_pauseparam *pause)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
@@ -390,7 +383,7 @@ static void ixgbe_get_pauseparam(struct net_device *netdev,
}
static int ixgbe_set_pauseparam(struct net_device *netdev,
- struct ethtool_pauseparam *pause)
+ struct ethtool_pauseparam *pause)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
@@ -450,7 +443,7 @@ static int ixgbe_get_regs_len(struct net_device *netdev)
#define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_
static void ixgbe_get_regs(struct net_device *netdev,
- struct ethtool_regs *regs, void *p)
+ struct ethtool_regs *regs, void *p)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
@@ -812,7 +805,7 @@ static int ixgbe_get_eeprom_len(struct net_device *netdev)
}
static int ixgbe_get_eeprom(struct net_device *netdev,
- struct ethtool_eeprom *eeprom, u8 *bytes)
+ struct ethtool_eeprom *eeprom, u8 *bytes)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
@@ -918,7 +911,7 @@ err:
}
static void ixgbe_get_drvinfo(struct net_device *netdev,
- struct ethtool_drvinfo *drvinfo)
+ struct ethtool_drvinfo *drvinfo)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
u32 nvm_track_id;
@@ -940,7 +933,7 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
}
static void ixgbe_get_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
@@ -953,7 +946,7 @@ static void ixgbe_get_ringparam(struct net_device *netdev,
}
static int ixgbe_set_ringparam(struct net_device *netdev,
- struct ethtool_ringparam *ring)
+ struct ethtool_ringparam *ring)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_ring *temp_ring;
@@ -1082,7 +1075,7 @@ static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
}
static void ixgbe_get_ethtool_stats(struct net_device *netdev,
- struct ethtool_stats *stats, u64 *data)
+ struct ethtool_stats *stats, u64 *data)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct rtnl_link_stats64 temp;
@@ -1110,7 +1103,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
}
data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
- sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+ sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
}
for (j = 0; j < netdev->num_tx_queues; j++) {
ring = adapter->tx_ring[j];
@@ -1180,7 +1173,7 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
}
static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
- u8 *data)
+ u8 *data)
{
char *p = (char *)data;
int i;
@@ -1357,8 +1350,7 @@ static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg,
ixgbe_write_reg(&adapter->hw, reg, test_pattern[pat] & write);
val = ixgbe_read_reg(&adapter->hw, reg);
if (val != (test_pattern[pat] & write & mask)) {
- e_err(drv, "pattern test reg %04X failed: got "
- "0x%08X expected 0x%08X\n",
+ e_err(drv, "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
reg, val, (test_pattern[pat] & write & mask));
*data = reg;
ixgbe_write_reg(&adapter->hw, reg, before);
@@ -1382,8 +1374,8 @@ static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg,
ixgbe_write_reg(&adapter->hw, reg, write & mask);
val = ixgbe_read_reg(&adapter->hw, reg);
if ((write & mask) != (val & mask)) {
- e_err(drv, "set/check reg %04X test failed: got 0x%08X "
- "expected 0x%08X\n", reg, (val & mask), (write & mask));
+ e_err(drv, "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
+ reg, (val & mask), (write & mask));
*data = reg;
ixgbe_write_reg(&adapter->hw, reg, before);
return true;
@@ -1430,8 +1422,8 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, toggle);
after = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle;
if (value != after) {
- e_err(drv, "failed STATUS register test got: 0x%08X "
- "expected: 0x%08X\n", after, value);
+ e_err(drv, "failed STATUS register test got: 0x%08X expected: 0x%08X\n",
+ after, value);
*data = 1;
return 1;
}
@@ -1533,10 +1525,10 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
return -1;
}
} else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED,
- netdev->name, netdev)) {
+ netdev->name, netdev)) {
shared_int = false;
} else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED,
- netdev->name, netdev)) {
+ netdev->name, netdev)) {
*data = 1;
return -1;
}
@@ -1563,9 +1555,9 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
*/
adapter->test_icr = 0;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
- ~mask & 0x00007FFF);
+ ~mask & 0x00007FFF);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
- ~mask & 0x00007FFF);
+ ~mask & 0x00007FFF);
IXGBE_WRITE_FLUSH(&adapter->hw);
usleep_range(10000, 20000);
@@ -1587,7 +1579,7 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
IXGBE_WRITE_FLUSH(&adapter->hw);
usleep_range(10000, 20000);
- if (!(adapter->test_icr &mask)) {
+ if (!(adapter->test_icr & mask)) {
*data = 4;
break;
}
@@ -1602,9 +1594,9 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data)
*/
adapter->test_icr = 0;
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC,
- ~mask & 0x00007FFF);
+ ~mask & 0x00007FFF);
IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS,
- ~mask & 0x00007FFF);
+ ~mask & 0x00007FFF);
IXGBE_WRITE_FLUSH(&adapter->hw);
usleep_range(10000, 20000);
@@ -1964,7 +1956,7 @@ out:
}
static void ixgbe_diag_test(struct net_device *netdev,
- struct ethtool_test *eth_test, u64 *data)
+ struct ethtool_test *eth_test, u64 *data)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
bool if_running = netif_running(netdev);
@@ -1987,10 +1979,7 @@ static void ixgbe_diag_test(struct net_device *netdev,
int i;
for (i = 0; i < adapter->num_vfs; i++) {
if (adapter->vfinfo[i].clear_to_send) {
- netdev_warn(netdev, "%s",
- "offline diagnostic is not "
- "supported when VFs are "
- "present\n");
+ netdev_warn(netdev, "offline diagnostic is not supported when VFs are present\n");
data[0] = 1;
data[1] = 1;
data[2] = 1;
@@ -2037,8 +2026,7 @@ static void ixgbe_diag_test(struct net_device *netdev,
* loopback diagnostic. */
if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
IXGBE_FLAG_VMDQ_ENABLED)) {
- e_info(hw, "Skip MAC loopback diagnostic in VT "
- "mode\n");
+ e_info(hw, "Skip MAC loopback diagnostic in VT mode\n");
data[3] = 0;
goto skip_loopback;
}
@@ -2078,7 +2066,7 @@ skip_ol_tests:
}
static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
- struct ethtool_wolinfo *wol)
+ struct ethtool_wolinfo *wol)
{
struct ixgbe_hw *hw = &adapter->hw;
int retval = 0;
@@ -2094,12 +2082,12 @@ static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
}
static void ixgbe_get_wol(struct net_device *netdev,
- struct ethtool_wolinfo *wol)
+ struct ethtool_wolinfo *wol)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
wol->supported = WAKE_UCAST | WAKE_MCAST |
- WAKE_BCAST | WAKE_MAGIC;
+ WAKE_BCAST | WAKE_MAGIC;
wol->wolopts = 0;
if (ixgbe_wol_exclusion(adapter, wol) ||
@@ -2181,7 +2169,7 @@ static int ixgbe_set_phys_id(struct net_device *netdev,
}
static int ixgbe_get_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *ec)
+ struct ethtool_coalesce *ec)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
@@ -2222,8 +2210,7 @@ static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter)
adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) {
if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
- e_info(probe, "rx-usecs value high enough "
- "to re-enable RSC\n");
+ e_info(probe, "rx-usecs value high enough to re-enable RSC\n");
return true;
}
/* if interrupt rate is too high then disable RSC */
@@ -2236,7 +2223,7 @@ static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter)
}
static int ixgbe_set_coalesce(struct net_device *netdev,
- struct ethtool_coalesce *ec)
+ struct ethtool_coalesce *ec)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_q_vector *q_vector;
@@ -2421,9 +2408,11 @@ static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
switch (cmd->flow_type) {
case TCP_V4_FLOW:
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* fallthrough */
case UDP_V4_FLOW:
if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP)
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* fallthrough */
case SCTP_V4_FLOW:
case AH_ESP_V4_FLOW:
case AH_V4_FLOW:
@@ -2433,9 +2422,11 @@ static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter,
break;
case TCP_V6_FLOW:
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* fallthrough */
case UDP_V6_FLOW:
if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* fallthrough */
case SCTP_V6_FLOW:
case AH_ESP_V6_FLOW:
case AH_V6_FLOW:
@@ -2787,8 +2778,7 @@ static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter,
if ((flags2 & UDP_RSS_FLAGS) &&
!(adapter->flags2 & UDP_RSS_FLAGS))
- e_warn(drv, "enabling UDP RSS: fragmented packets"
- " may arrive out of order to the stack above\n");
+ e_warn(drv, "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
adapter->flags2 = flags2;
@@ -3099,5 +3089,5 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
void ixgbe_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &ixgbe_ethtool_ops);
+ netdev->ethtool_ops = &ixgbe_ethtool_ops;
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
index b16cc78..0772b77 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h
@@ -81,9 +81,7 @@ struct ixgbe_fcoe {
void *extra_ddp_buffer;
dma_addr_t extra_ddp_buffer_dma;
unsigned long mode;
-#ifdef CONFIG_IXGBE_DCB
u8 up;
-#endif
};
#endif /* _IXGBE_FCOE_H */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 2067d39..2d9451e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -1113,8 +1113,8 @@ static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
err = pci_enable_msi(adapter->pdev);
if (err) {
netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev,
- "Unable to allocate MSI interrupt, "
- "falling back to legacy. Error: %d\n", err);
+ "Unable to allocate MSI interrupt, falling back to legacy. Error: %d\n",
+ err);
return;
}
adapter->flags |= IXGBE_FLAG_MSI_ENABLED;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index c047c3e..f5aa331 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -301,7 +301,7 @@ static void ixgbe_remove_adapter(struct ixgbe_hw *hw)
ixgbe_service_event_schedule(adapter);
}
-void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
+static void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
{
u32 value;
@@ -320,6 +320,32 @@ void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
ixgbe_remove_adapter(hw);
}
+/**
+ * ixgbe_read_reg - Read from device register
+ * @hw: hw specific details
+ * @reg: offset of register to read
+ *
+ * Returns : value read or IXGBE_FAILED_READ_REG if removed
+ *
+ * This function is used to read device registers. It checks for device
+ * removal by confirming any read that returns all ones by checking the
+ * status register value for all ones. This function avoids reading from
+ * the hardware if a removal was previously detected in which case it
+ * returns IXGBE_FAILED_READ_REG (all ones).
+ */
+u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg)
+{
+ u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr);
+ u32 value;
+
+ if (ixgbe_removed(reg_addr))
+ return IXGBE_FAILED_READ_REG;
+ value = readl(reg_addr + reg);
+ if (unlikely(value == IXGBE_FAILED_READ_REG))
+ ixgbe_check_remove(hw, reg);
+ return value;
+}
+
static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev)
{
u16 value;
@@ -3743,35 +3769,6 @@ static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev,
}
/**
- * ixgbe_vlan_filter_disable - helper to disable hw vlan filtering
- * @adapter: driver data
- */
-static void ixgbe_vlan_filter_disable(struct ixgbe_adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- u32 vlnctrl;
-
- vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
- vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
- IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
-}
-
-/**
- * ixgbe_vlan_filter_enable - helper to enable hw vlan filtering
- * @adapter: driver data
- */
-static void ixgbe_vlan_filter_enable(struct ixgbe_adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- u32 vlnctrl;
-
- vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
- vlnctrl |= IXGBE_VLNCTRL_VFE;
- vlnctrl &= ~IXGBE_VLNCTRL_CFIEN;
- IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
-}
-
-/**
* ixgbe_vlan_strip_disable - helper to disable hw vlan stripping
* @adapter: driver data
*/
@@ -3850,6 +3847,158 @@ static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
}
/**
+ * ixgbe_write_mc_addr_list - write multicast addresses to MTA
+ * @netdev: network interface device structure
+ *
+ * Writes multicast address list to the MTA hash table.
+ * Returns: -ENOMEM on failure
+ * 0 on no addresses written
+ * X on writing X addresses to MTA
+ **/
+static int ixgbe_write_mc_addr_list(struct net_device *netdev)
+{
+ struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ if (!netif_running(netdev))
+ return 0;
+
+ if (hw->mac.ops.update_mc_addr_list)
+ hw->mac.ops.update_mc_addr_list(hw, netdev);
+ else
+ return -ENOMEM;
+
+#ifdef CONFIG_PCI_IOV
+ ixgbe_restore_vf_multicasts(adapter);
+#endif
+
+ return netdev_mc_count(netdev);
+}
+
+#ifdef CONFIG_PCI_IOV
+void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i;
+ for (i = 0; i < hw->mac.num_rar_entries; i++) {
+ if (adapter->mac_table[i].state & IXGBE_MAC_STATE_IN_USE)
+ hw->mac.ops.set_rar(hw, i, adapter->mac_table[i].addr,
+ adapter->mac_table[i].queue,
+ IXGBE_RAH_AV);
+ else
+ hw->mac.ops.clear_rar(hw, i);
+
+ adapter->mac_table[i].state &= ~(IXGBE_MAC_STATE_MODIFIED);
+ }
+}
+#endif
+
+static void ixgbe_sync_mac_table(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i;
+ for (i = 0; i < hw->mac.num_rar_entries; i++) {
+ if (adapter->mac_table[i].state & IXGBE_MAC_STATE_MODIFIED) {
+ if (adapter->mac_table[i].state &
+ IXGBE_MAC_STATE_IN_USE)
+ hw->mac.ops.set_rar(hw, i,
+ adapter->mac_table[i].addr,
+ adapter->mac_table[i].queue,
+ IXGBE_RAH_AV);
+ else
+ hw->mac.ops.clear_rar(hw, i);
+
+ adapter->mac_table[i].state &=
+ ~(IXGBE_MAC_STATE_MODIFIED);
+ }
+ }
+}
+
+static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter)
+{
+ int i;
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ for (i = 0; i < hw->mac.num_rar_entries; i++) {
+ adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED;
+ adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE;
+ memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
+ adapter->mac_table[i].queue = 0;
+ }
+ ixgbe_sync_mac_table(adapter);
+}
+
+static int ixgbe_available_rars(struct ixgbe_adapter *adapter)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i, count = 0;
+
+ for (i = 0; i < hw->mac.num_rar_entries; i++) {
+ if (adapter->mac_table[i].state == 0)
+ count++;
+ }
+ return count;
+}
+
+/* this function destroys the first RAR entry */
+static void ixgbe_mac_set_default_filter(struct ixgbe_adapter *adapter,
+ u8 *addr)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ memcpy(&adapter->mac_table[0].addr, addr, ETH_ALEN);
+ adapter->mac_table[0].queue = VMDQ_P(0);
+ adapter->mac_table[0].state = (IXGBE_MAC_STATE_DEFAULT |
+ IXGBE_MAC_STATE_IN_USE);
+ hw->mac.ops.set_rar(hw, 0, adapter->mac_table[0].addr,
+ adapter->mac_table[0].queue,
+ IXGBE_RAH_AV);
+}
+
+int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue)
+{
+ struct ixgbe_hw *hw = &adapter->hw;
+ int i;
+
+ if (is_zero_ether_addr(addr))
+ return -EINVAL;
+
+ for (i = 0; i < hw->mac.num_rar_entries; i++) {
+ if (adapter->mac_table[i].state & IXGBE_MAC_STATE_IN_USE)
+ continue;
+ adapter->mac_table[i].state |= (IXGBE_MAC_STATE_MODIFIED |
+ IXGBE_MAC_STATE_IN_USE);
+ ether_addr_copy(adapter->mac_table[i].addr, addr);
+ adapter->mac_table[i].queue = queue;
+ ixgbe_sync_mac_table(adapter);
+ return i;
+ }
+ return -ENOMEM;
+}
+
+int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue)
+{
+ /* search table for addr, if found, set to 0 and sync */
+ int i;
+ struct ixgbe_hw *hw = &adapter->hw;
+
+ if (is_zero_ether_addr(addr))
+ return -EINVAL;
+
+ for (i = 0; i < hw->mac.num_rar_entries; i++) {
+ if (ether_addr_equal(addr, adapter->mac_table[i].addr) &&
+ adapter->mac_table[i].queue == queue) {
+ adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED;
+ adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE;
+ memset(adapter->mac_table[i].addr, 0, ETH_ALEN);
+ adapter->mac_table[i].queue = 0;
+ ixgbe_sync_mac_table(adapter);
+ return 0;
+ }
+ }
+ return -ENOMEM;
+}
+/**
* ixgbe_write_uc_addr_list - write unicast addresses to RAR table
* @netdev: network interface device structure
*
@@ -3858,39 +4007,23 @@ static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter)
* 0 on no addresses written
* X on writing X addresses to the RAR table
**/
-static int ixgbe_write_uc_addr_list(struct net_device *netdev)
+static int ixgbe_write_uc_addr_list(struct net_device *netdev, int vfn)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
- struct ixgbe_hw *hw = &adapter->hw;
- unsigned int rar_entries = hw->mac.num_rar_entries - 1;
int count = 0;
- /* In SR-IOV/VMDQ modes significantly less RAR entries are available */
- if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
- rar_entries = IXGBE_MAX_PF_MACVLANS - 1;
-
/* return ENOMEM indicating insufficient memory for addresses */
- if (netdev_uc_count(netdev) > rar_entries)
+ if (netdev_uc_count(netdev) > ixgbe_available_rars(adapter))
return -ENOMEM;
if (!netdev_uc_empty(netdev)) {
struct netdev_hw_addr *ha;
- /* return error if we do not support writing to RAR table */
- if (!hw->mac.ops.set_rar)
- return -ENOMEM;
-
netdev_for_each_uc_addr(ha, netdev) {
- if (!rar_entries)
- break;
- hw->mac.ops.set_rar(hw, rar_entries--, ha->addr,
- VMDQ_P(0), IXGBE_RAH_AV);
+ ixgbe_del_mac_filter(adapter, ha->addr, vfn);
+ ixgbe_add_mac_filter(adapter, ha->addr, vfn);
count++;
}
}
- /* write the addresses in reverse order to avoid write combining */
- for (; rar_entries > 0 ; rar_entries--)
- hw->mac.ops.clear_rar(hw, rar_entries);
-
return count;
}
@@ -3908,11 +4041,12 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE;
+ u32 vlnctrl;
int count;
/* Check for Promiscuous and All Multicast modes */
-
fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
+ vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
/* set all bits that we expect to always be set */
fctrl &= ~IXGBE_FCTRL_SBP; /* disable store-bad-packets */
@@ -3922,26 +4056,24 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
/* clear the bits we are changing the status of */
fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
-
+ vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
if (netdev->flags & IFF_PROMISC) {
hw->addr_ctrl.user_set_promisc = true;
fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE);
- vmolr |= (IXGBE_VMOLR_ROPE | IXGBE_VMOLR_MPE);
+ vmolr |= IXGBE_VMOLR_MPE;
/* Only disable hardware filter vlans in promiscuous mode
* if SR-IOV and VMDQ are disabled - otherwise ensure
* that hardware VLAN filters remain enabled.
*/
if (!(adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED |
IXGBE_FLAG_SRIOV_ENABLED)))
- ixgbe_vlan_filter_disable(adapter);
- else
- ixgbe_vlan_filter_enable(adapter);
+ vlnctrl |= (IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
} else {
if (netdev->flags & IFF_ALLMULTI) {
fctrl |= IXGBE_FCTRL_MPE;
vmolr |= IXGBE_VMOLR_MPE;
}
- ixgbe_vlan_filter_enable(adapter);
+ vlnctrl |= IXGBE_VLNCTRL_VFE;
hw->addr_ctrl.user_set_promisc = false;
}
@@ -3950,7 +4082,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
* sufficient space to store all the addresses then enable
* unicast promiscuous mode
*/
- count = ixgbe_write_uc_addr_list(netdev);
+ count = ixgbe_write_uc_addr_list(netdev, VMDQ_P(0));
if (count < 0) {
fctrl |= IXGBE_FCTRL_UPE;
vmolr |= IXGBE_VMOLR_ROPE;
@@ -3960,11 +4092,13 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
* then we should just turn on promiscuous mode so
* that we can at least receive multicast traffic
*/
- hw->mac.ops.update_mc_addr_list(hw, netdev);
- vmolr |= IXGBE_VMOLR_ROMPE;
-
- if (adapter->num_vfs)
- ixgbe_restore_vf_multicasts(adapter);
+ count = ixgbe_write_mc_addr_list(netdev);
+ if (count < 0) {
+ fctrl |= IXGBE_FCTRL_MPE;
+ vmolr |= IXGBE_VMOLR_MPE;
+ } else if (count) {
+ vmolr |= IXGBE_VMOLR_ROMPE;
+ }
if (hw->mac.type != ixgbe_mac_82598EB) {
vmolr |= IXGBE_READ_REG(hw, IXGBE_VMOLR(VMDQ_P(0))) &
@@ -3985,6 +4119,7 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
/* NOTE: VLAN filtering is disabled by setting PROMISC */
}
+ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl);
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
@@ -4101,8 +4236,8 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
(tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
(pb == ixgbe_fcoe_get_tc(adapter)))
tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
-
#endif
+
/* Calculate delay value for device */
switch (hw->mac.type) {
case ixgbe_mac_X540:
@@ -4143,7 +4278,7 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb)
* @adapter: board private structure to calculate for
* @pb: packet buffer to calculate
*/
-static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter)
+static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb)
{
struct ixgbe_hw *hw = &adapter->hw;
struct net_device *dev = adapter->netdev;
@@ -4153,6 +4288,14 @@ static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter)
/* Calculate max LAN frame size */
tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
+#ifdef IXGBE_FCOE
+ /* FCoE traffic class uses FCOE jumbo frames */
+ if ((dev->features & NETIF_F_FCOE_MTU) &&
+ (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) &&
+ (pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up)))
+ tc = IXGBE_FCOE_JUMBO_FRAME_SIZE;
+#endif
+
/* Calculate delay value for device */
switch (hw->mac.type) {
case ixgbe_mac_X540:
@@ -4179,15 +4322,17 @@ static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter)
if (!num_tc)
num_tc = 1;
- hw->fc.low_water = ixgbe_lpbthresh(adapter);
-
for (i = 0; i < num_tc; i++) {
hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, i);
+ hw->fc.low_water[i] = ixgbe_lpbthresh(adapter, i);
/* Low water marks must not be larger than high water marks */
- if (hw->fc.low_water > hw->fc.high_water[i])
- hw->fc.low_water = 0;
+ if (hw->fc.low_water[i] > hw->fc.high_water[i])
+ hw->fc.low_water[i] = 0;
}
+
+ for (; i < MAX_TRAFFIC_CLASS; i++)
+ hw->fc.high_water[i] = 0;
}
static void ixgbe_configure_pb(struct ixgbe_adapter *adapter)
@@ -4249,20 +4394,10 @@ static void ixgbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool,
vmolr |= IXGBE_VMOLR_ROMPE;
hw->mac.ops.update_mc_addr_list(hw, dev);
}
- ixgbe_write_uc_addr_list(adapter->netdev);
+ ixgbe_write_uc_addr_list(adapter->netdev, pool);
IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
}
-static void ixgbe_add_mac_filter(struct ixgbe_adapter *adapter,
- u8 *addr, u16 pool)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- unsigned int entry;
-
- entry = hw->mac.num_rar_entries - pool;
- hw->mac.ops.set_rar(hw, entry, addr, VMDQ_P(pool), IXGBE_RAH_AV);
-}
-
static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter)
{
struct ixgbe_adapter *adapter = vadapter->real_adapter;
@@ -4521,6 +4656,8 @@ static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw)
case ixgbe_phy_qsfp_active_unknown:
case ixgbe_phy_qsfp_intel:
case ixgbe_phy_qsfp_unknown:
+ /* ixgbe_phy_none is set when no SFP module is present */
+ case ixgbe_phy_none:
return true;
case ixgbe_phy_nl:
if (hw->mac.type == ixgbe_mac_82598EB)
@@ -4742,7 +4879,9 @@ void ixgbe_up(struct ixgbe_adapter *adapter)
void ixgbe_reset(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
+ struct net_device *netdev = adapter->netdev;
int err;
+ u8 old_addr[ETH_ALEN];
if (ixgbe_removed(hw->hw_addr))
return;
@@ -4778,9 +4917,10 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
}
clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
-
- /* reprogram the RAR[0] in case user changed it. */
- hw->mac.ops.set_rar(hw, 0, hw->mac.addr, VMDQ_P(0), IXGBE_RAH_AV);
+ /* do not flush user set addresses */
+ memcpy(old_addr, &adapter->mac_table[0].addr, netdev->addr_len);
+ ixgbe_flush_sw_mac_table(adapter);
+ ixgbe_mac_set_default_filter(adapter, old_addr);
/* update SAN MAC vmdq pool selection */
if (hw->mac.san_mac_rar_index)
@@ -5026,6 +5166,10 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter)
#endif /* CONFIG_IXGBE_DCB */
#endif /* IXGBE_FCOE */
+ adapter->mac_table = kzalloc(sizeof(struct ixgbe_mac_addr) *
+ hw->mac.num_rar_entries,
+ GFP_ATOMIC);
+
/* Set MAC specific capability flags and exceptions */
switch (hw->mac.type) {
case ixgbe_mac_82598EB:
@@ -5517,6 +5661,17 @@ err_setup_tx:
return err;
}
+static void ixgbe_close_suspend(struct ixgbe_adapter *adapter)
+{
+ ixgbe_ptp_suspend(adapter);
+
+ ixgbe_down(adapter);
+ ixgbe_free_irq(adapter);
+
+ ixgbe_free_all_tx_resources(adapter);
+ ixgbe_free_all_rx_resources(adapter);
+}
+
/**
* ixgbe_close - Disables a network interface
* @netdev: network interface device structure
@@ -5534,14 +5689,10 @@ static int ixgbe_close(struct net_device *netdev)
ixgbe_ptp_stop(adapter);
- ixgbe_down(adapter);
- ixgbe_free_irq(adapter);
+ ixgbe_close_suspend(adapter);
ixgbe_fdir_filter_exit(adapter);
- ixgbe_free_all_tx_resources(adapter);
- ixgbe_free_all_rx_resources(adapter);
-
ixgbe_release_hw_control(adapter);
return 0;
@@ -5608,12 +5759,8 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
netif_device_detach(netdev);
rtnl_lock();
- if (netif_running(netdev)) {
- ixgbe_down(adapter);
- ixgbe_free_irq(adapter);
- ixgbe_free_all_tx_resources(adapter);
- ixgbe_free_all_rx_resources(adapter);
- }
+ if (netif_running(netdev))
+ ixgbe_close_suspend(adapter);
rtnl_unlock();
ixgbe_clear_interrupt_scheme(adapter);
@@ -5945,7 +6092,7 @@ static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter)
if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
for (i = 0; i < adapter->num_tx_queues; i++)
set_bit(__IXGBE_TX_FDIR_INIT_DONE,
- &(adapter->tx_ring[i]->state));
+ &(adapter->tx_ring[i]->state));
/* re-enable flow director interrupts */
IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_FLOW_DIR);
} else {
@@ -7172,16 +7319,17 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p)
struct ixgbe_adapter *adapter = netdev_priv(netdev);
struct ixgbe_hw *hw = &adapter->hw;
struct sockaddr *addr = p;
+ int ret;
if (!is_valid_ether_addr(addr->sa_data))
return -EADDRNOTAVAIL;
+ ixgbe_del_mac_filter(adapter, hw->mac.addr, VMDQ_P(0));
memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
- hw->mac.ops.set_rar(hw, 0, hw->mac.addr, VMDQ_P(0), IXGBE_RAH_AV);
-
- return 0;
+ ret = ixgbe_add_mac_filter(adapter, hw->mac.addr, VMDQ_P(0));
+ return ret > 0 ? 0 : ret;
}
static int
@@ -7783,7 +7931,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
.ndo_do_ioctl = ixgbe_ioctl,
.ndo_set_vf_mac = ixgbe_ndo_set_vf_mac,
.ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan,
- .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw,
+ .ndo_set_vf_rate = ixgbe_ndo_set_vf_bw,
.ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk,
.ndo_get_vf_config = ixgbe_ndo_get_vf_config,
.ndo_get_stats64 = ixgbe_get_stats64,
@@ -8187,6 +8335,8 @@ skip_sriov:
goto err_sw_init;
}
+ ixgbe_mac_set_default_filter(adapter, hw->mac.perm_addr);
+
setup_timer(&adapter->service_timer, &ixgbe_service_timer,
(unsigned long) adapter);
@@ -8242,7 +8392,7 @@ skip_sriov:
if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
hw->mac.type, hw->phy.type, hw->phy.sfp_type,
- part_str);
+ part_str);
else
e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
hw->mac.type, hw->phy.type, part_str);
@@ -8304,8 +8454,8 @@ skip_sriov:
ixgbe_dbg_adapter_init(adapter);
- /* Need link setup for MNG FW, else wait for IXGBE_UP */
- if (ixgbe_mng_enabled(hw) && hw->mac.ops.setup_link)
+ /* setup link for SFP devices with MNG FW, else wait for IXGBE_UP */
+ if (ixgbe_mng_enabled(hw) && ixgbe_is_sfp(hw) && hw->mac.ops.setup_link)
hw->mac.ops.setup_link(hw,
IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL,
true);
@@ -8319,6 +8469,7 @@ err_sw_init:
ixgbe_disable_sriov(adapter);
adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP;
iounmap(adapter->io_addr);
+ kfree(adapter->mac_table);
err_ioremap:
free_netdev(netdev);
err_alloc_etherdev:
@@ -8392,6 +8543,7 @@ static void ixgbe_remove(struct pci_dev *pdev)
e_dev_info("complete\n");
+ kfree(adapter->mac_table);
free_netdev(netdev);
pci_disable_pcie_error_reporting(pdev);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
index f5c6af2..1918e0a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
@@ -223,7 +223,7 @@ out:
* received an ack to that message within delay * timeout period
**/
static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
- u16 mbx_id)
+ u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
s32 ret_val = IXGBE_ERR_MBX;
@@ -269,7 +269,7 @@ static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
u32 vf_bit = vf_number % 16;
if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit,
- index)) {
+ index)) {
ret_val = 0;
hw->mbx.stats.reqs++;
}
@@ -291,7 +291,7 @@ static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
u32 vf_bit = vf_number % 16;
if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit,
- index)) {
+ index)) {
ret_val = 0;
hw->mbx.stats.acks++;
}
@@ -366,7 +366,7 @@ static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
* returns SUCCESS if it successfully copied message into the buffer
**/
static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
- u16 vf_number)
+ u16 vf_number)
{
s32 ret_val;
u16 i;
@@ -407,7 +407,7 @@ out_no_write:
* a message due to a VF request so no polling for message is needed.
**/
static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
- u16 vf_number)
+ u16 vf_number)
{
s32 ret_val;
u16 i;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
index a9b9ad6..a5cb755 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h
@@ -54,11 +54,11 @@
* Message ACK's are the value or'd with 0xF0000000
*/
#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with
- * this are the ACK */
+ * this are the ACK */
#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with
- * this are the NACK */
+ * this are the NACK */
#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still
- clear to send requests */
+ clear to send requests */
#define IXGBE_VT_MSGINFO_SHIFT 16
/* bits 23:16 are used for exra info for certain messages */
#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index a76af8e2..ff68b7a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -67,7 +67,7 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
if (mdio45_probe(&hw->phy.mdio, phy_addr) == 0) {
ixgbe_get_phy_id(hw);
hw->phy.type =
- ixgbe_get_phy_type_from_id(hw->phy.id);
+ ixgbe_get_phy_type_from_id(hw->phy.id);
if (hw->phy.type == ixgbe_phy_unknown) {
hw->phy.ops.read_reg(hw,
@@ -136,12 +136,12 @@ static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw)
u16 phy_id_low = 0;
status = hw->phy.ops.read_reg(hw, MDIO_DEVID1, MDIO_MMD_PMAPMD,
- &phy_id_high);
+ &phy_id_high);
if (status == 0) {
hw->phy.id = (u32)(phy_id_high << 16);
status = hw->phy.ops.read_reg(hw, MDIO_DEVID2, MDIO_MMD_PMAPMD,
- &phy_id_low);
+ &phy_id_low);
hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK);
hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK);
}
@@ -318,7 +318,7 @@ s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type,
* @phy_data: Pointer to read data from PHY register
**/
s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
- u32 device_type, u16 *phy_data)
+ u32 device_type, u16 *phy_data)
{
s32 status;
u16 gssr;
@@ -421,7 +421,7 @@ s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
* @phy_data: Data to write to the PHY register
**/
s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
- u32 device_type, u16 phy_data)
+ u32 device_type, u16 phy_data)
{
s32 status;
u16 gssr;
@@ -548,8 +548,8 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
* @speed: new link speed
**/
s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete)
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete)
{
/*
@@ -582,8 +582,8 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
* Determines the link capabilities by reading the AUTOC register.
*/
s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *autoneg)
+ ixgbe_link_speed *speed,
+ bool *autoneg)
{
s32 status = IXGBE_ERR_LINK_SETUP;
u16 speed_ability;
@@ -592,7 +592,7 @@ s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
*autoneg = true;
status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
- &speed_ability);
+ &speed_ability);
if (status == 0) {
if (speed_ability & MDIO_SPEED_10G)
@@ -806,11 +806,11 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
/* reset the PHY and poll for completion */
hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS,
- (phy_data | MDIO_CTRL1_RESET));
+ (phy_data | MDIO_CTRL1_RESET));
for (i = 0; i < 100; i++) {
hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS,
- &phy_data);
+ &phy_data);
if ((phy_data & MDIO_CTRL1_RESET) == 0)
break;
usleep_range(10000, 20000);
@@ -824,7 +824,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
/* Get init offsets */
ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
- &data_offset);
+ &data_offset);
if (ret_val != 0)
goto out;
@@ -838,7 +838,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
if (ret_val)
goto err_eeprom;
control = (eword & IXGBE_CONTROL_MASK_NL) >>
- IXGBE_CONTROL_SHIFT_NL;
+ IXGBE_CONTROL_SHIFT_NL;
edata = eword & IXGBE_DATA_MASK_NL;
switch (control) {
case IXGBE_DELAY_NL:
@@ -859,7 +859,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
if (ret_val)
goto err_eeprom;
hw->phy.ops.write_reg(hw, phy_offset,
- MDIO_MMD_PMAPMD, eword);
+ MDIO_MMD_PMAPMD, eword);
hw_dbg(hw, "Wrote %4.4x to %4.4x\n", eword,
phy_offset);
data_offset++;
@@ -1010,10 +1010,10 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) {
if (hw->bus.lan_id == 0)
hw->phy.sfp_type =
- ixgbe_sfp_type_da_cu_core0;
+ ixgbe_sfp_type_da_cu_core0;
else
hw->phy.sfp_type =
- ixgbe_sfp_type_da_cu_core1;
+ ixgbe_sfp_type_da_cu_core1;
} else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) {
hw->phy.ops.read_i2c_eeprom(
hw, IXGBE_SFF_CABLE_SPEC_COMP,
@@ -1035,10 +1035,10 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
IXGBE_SFF_10GBASELR_CAPABLE)) {
if (hw->bus.lan_id == 0)
hw->phy.sfp_type =
- ixgbe_sfp_type_srlr_core0;
+ ixgbe_sfp_type_srlr_core0;
else
hw->phy.sfp_type =
- ixgbe_sfp_type_srlr_core1;
+ ixgbe_sfp_type_srlr_core1;
} else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) {
if (hw->bus.lan_id == 0)
hw->phy.sfp_type =
@@ -1087,15 +1087,15 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
goto err_read_i2c_eeprom;
status = hw->phy.ops.read_i2c_eeprom(hw,
- IXGBE_SFF_VENDOR_OUI_BYTE1,
- &oui_bytes[1]);
+ IXGBE_SFF_VENDOR_OUI_BYTE1,
+ &oui_bytes[1]);
if (status != 0)
goto err_read_i2c_eeprom;
status = hw->phy.ops.read_i2c_eeprom(hw,
- IXGBE_SFF_VENDOR_OUI_BYTE2,
- &oui_bytes[2]);
+ IXGBE_SFF_VENDOR_OUI_BYTE2,
+ &oui_bytes[2]);
if (status != 0)
goto err_read_i2c_eeprom;
@@ -1403,8 +1403,8 @@ err_read_i2c_eeprom:
* so it returns the offsets to the phy init sequence block.
**/
s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
- u16 *list_offset,
- u16 *data_offset)
+ u16 *list_offset,
+ u16 *data_offset)
{
u16 sfp_id;
u16 sfp_type = hw->phy.sfp_type;
@@ -1493,11 +1493,11 @@ err_phy:
* Performs byte read operation to SFP module's EEPROM over I2C interface.
**/
s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
- u8 *eeprom_data)
+ u8 *eeprom_data)
{
return hw->phy.ops.read_i2c_byte(hw, byte_offset,
- IXGBE_I2C_EEPROM_DEV_ADDR,
- eeprom_data);
+ IXGBE_I2C_EEPROM_DEV_ADDR,
+ eeprom_data);
}
/**
@@ -1525,11 +1525,11 @@ s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
* Performs byte write operation to SFP module's EEPROM over I2C interface.
**/
s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
- u8 eeprom_data)
+ u8 eeprom_data)
{
return hw->phy.ops.write_i2c_byte(hw, byte_offset,
- IXGBE_I2C_EEPROM_DEV_ADDR,
- eeprom_data);
+ IXGBE_I2C_EEPROM_DEV_ADDR,
+ eeprom_data);
}
/**
@@ -1542,7 +1542,7 @@ s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
* a specified device address.
**/
s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
- u8 dev_addr, u8 *data)
+ u8 dev_addr, u8 *data)
{
s32 status = 0;
u32 max_retry = 10;
@@ -1631,7 +1631,7 @@ read_byte_out:
* a specified device address.
**/
s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
- u8 dev_addr, u8 data)
+ u8 dev_addr, u8 data)
{
s32 status = 0;
u32 max_retry = 1;
@@ -2046,7 +2046,7 @@ s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw)
/* Check that the LASI temp alarm status was triggered */
hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG,
- MDIO_MMD_PMAPMD, &phy_data);
+ MDIO_MMD_PMAPMD, &phy_data);
if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM))
goto out;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index 0bb047f..54071ed 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -114,47 +114,47 @@ s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw);
s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw);
s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw);
s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
- u32 device_type, u16 *phy_data);
+ u32 device_type, u16 *phy_data);
s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
- u32 device_type, u16 phy_data);
+ u32 device_type, u16 phy_data);
s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 *phy_data);
s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr,
u32 device_type, u16 phy_data);
s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw);
s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
- ixgbe_link_speed speed,
- bool autoneg_wait_to_complete);
+ ixgbe_link_speed speed,
+ bool autoneg_wait_to_complete);
s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *autoneg);
+ ixgbe_link_speed *speed,
+ bool *autoneg);
bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw);
/* PHY specific */
s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
- ixgbe_link_speed *speed,
- bool *link_up);
+ ixgbe_link_speed *speed,
+ bool *link_up);
s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw);
s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
- u16 *firmware_version);
+ u16 *firmware_version);
s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
- u16 *firmware_version);
+ u16 *firmware_version);
s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw);
s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw,
- u16 *list_offset,
- u16 *data_offset);
+ u16 *list_offset,
+ u16 *data_offset);
s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw);
s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
- u8 dev_addr, u8 *data);
+ u8 dev_addr, u8 *data);
s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
- u8 dev_addr, u8 data);
+ u8 dev_addr, u8 data);
s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
- u8 *eeprom_data);
+ u8 *eeprom_data);
s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset,
u8 *sff8472_data);
s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
- u8 eeprom_data);
+ u8 eeprom_data);
#endif /* _IXGBE_PHY_H_ */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 8902ae6..68f87ec 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -26,7 +26,6 @@
*******************************************************************************/
#include "ixgbe.h"
-#include <linux/export.h>
#include <linux/ptp_classify.h>
/*
@@ -334,7 +333,7 @@ static int ixgbe_ptp_settime(struct ptp_clock_info *ptp,
}
/**
- * ixgbe_ptp_enable
+ * ixgbe_ptp_feature_enable
* @ptp: the ptp clock structure
* @rq: the requested feature to change
* @on: whether to enable or disable the feature
@@ -342,8 +341,8 @@ static int ixgbe_ptp_settime(struct ptp_clock_info *ptp,
* enable (or disable) ancillary features of the phc subsystem.
* our driver only supports the PPS feature on the X540
*/
-static int ixgbe_ptp_enable(struct ptp_clock_info *ptp,
- struct ptp_clock_request *rq, int on)
+static int ixgbe_ptp_feature_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq, int on)
{
struct ixgbe_adapter *adapter =
container_of(ptp, struct ixgbe_adapter, ptp_caps);
@@ -570,9 +569,9 @@ int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
}
/**
- * ixgbe_ptp_set_ts_config - control hardware time stamping
- * @adapter: pointer to adapter struct
- * @ifreq: ioctl data
+ * ixgbe_ptp_set_timestamp_mode - setup the hardware for the requested mode
+ * @adapter: the private ixgbe adapter structure
+ * @config: the hwtstamp configuration requested
*
* Outgoing time stamping can be enabled and disabled. Play nice and
* disable it when requested, although it shouldn't cause any overhead
@@ -590,25 +589,25 @@ int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
* packets, regardless of the type specified in the register, only use V2
* Event mode. This more accurately tells the user what the hardware is going
* to do anyways.
+ *
+ * Note: this may modify the hwtstamp configuration towards a more general
+ * mode, if required to support the specifically requested mode.
*/
-int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
+static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter,
+ struct hwtstamp_config *config)
{
struct ixgbe_hw *hw = &adapter->hw;
- struct hwtstamp_config config;
u32 tsync_tx_ctl = IXGBE_TSYNCTXCTL_ENABLED;
u32 tsync_rx_ctl = IXGBE_TSYNCRXCTL_ENABLED;
u32 tsync_rx_mtrl = PTP_EV_PORT << 16;
bool is_l2 = false;
u32 regval;
- if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
- return -EFAULT;
-
/* reserved for future extensions */
- if (config.flags)
+ if (config->flags)
return -EINVAL;
- switch (config.tx_type) {
+ switch (config->tx_type) {
case HWTSTAMP_TX_OFF:
tsync_tx_ctl = 0;
case HWTSTAMP_TX_ON:
@@ -617,7 +616,7 @@ int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
return -ERANGE;
}
- switch (config.rx_filter) {
+ switch (config->rx_filter) {
case HWTSTAMP_FILTER_NONE:
tsync_rx_ctl = 0;
tsync_rx_mtrl = 0;
@@ -641,7 +640,7 @@ int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2;
is_l2 = true;
- config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
+ config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
break;
case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
case HWTSTAMP_FILTER_ALL:
@@ -652,7 +651,7 @@ int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
* Delay_Req messages and hardware does not support
* timestamping all packets => return error
*/
- config.rx_filter = HWTSTAMP_FILTER_NONE;
+ config->rx_filter = HWTSTAMP_FILTER_NONE;
return -ERANGE;
}
@@ -671,7 +670,6 @@ int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
else
IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), 0);
-
/* enable/disable TX */
regval = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
regval &= ~IXGBE_TSYNCTXCTL_ENABLED;
@@ -693,6 +691,29 @@ int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
regval = IXGBE_READ_REG(hw, IXGBE_TXSTMPH);
regval = IXGBE_READ_REG(hw, IXGBE_RXSTMPH);
+ return 0;
+}
+
+/**
+ * ixgbe_ptp_set_ts_config - user entry point for timestamp mode
+ * @adapter: pointer to adapter struct
+ * @ifreq: ioctl data
+ *
+ * Set hardware to requested mode. If unsupported, return an error with no
+ * changes. Otherwise, store the mode for future reference.
+ */
+int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
+{
+ struct hwtstamp_config config;
+ int err;
+
+ if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+ return -EFAULT;
+
+ err = ixgbe_ptp_set_timestamp_mode(adapter, &config);
+ if (err)
+ return err;
+
/* save these settings for future reference */
memcpy(&adapter->tstamp_config, &config,
sizeof(adapter->tstamp_config));
@@ -790,9 +811,13 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
* ixgbe_ptp_reset
* @adapter: the ixgbe private board structure
*
- * When the MAC resets, all timesync features are reset. This function should be
- * called to re-enable the PTP clock structure. It will re-init the timecounter
- * structure based on the kernel time as well as setup the cycle counter data.
+ * When the MAC resets, all the hardware bits for timesync are reset. This
+ * function is used to re-enable the device for PTP based on current settings.
+ * We do lose the current clock time, so just reset the cyclecounter to the
+ * system real clock time.
+ *
+ * This function will maintain hwtstamp_config settings, and resets the SDP
+ * output if it was enabled.
*/
void ixgbe_ptp_reset(struct ixgbe_adapter *adapter)
{
@@ -804,8 +829,8 @@ void ixgbe_ptp_reset(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x00000000);
IXGBE_WRITE_FLUSH(hw);
- /* Reset the saved tstamp_config */
- memset(&adapter->tstamp_config, 0, sizeof(adapter->tstamp_config));
+ /* reset the hardware timestamping mode */
+ ixgbe_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config);
ixgbe_ptp_start_cyclecounter(adapter);
@@ -825,16 +850,23 @@ void ixgbe_ptp_reset(struct ixgbe_adapter *adapter)
}
/**
- * ixgbe_ptp_init
+ * ixgbe_ptp_create_clock
* @adapter: the ixgbe private adapter structure
*
- * This function performs the required steps for enabling ptp
- * support. If ptp support has already been loaded it simply calls the
- * cyclecounter init routine and exits.
+ * This function performs setup of the user entry point function table and
+ * initializes the PTP clock device, which is used to access the clock-like
+ * features of the PTP core. It will be called by ixgbe_ptp_init, only if
+ * there isn't already a clock device (such as after a suspend/resume cycle,
+ * where the clock device wasn't destroyed).
*/
-void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
+static int ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter)
{
struct net_device *netdev = adapter->netdev;
+ long err;
+
+ /* do nothing if we already have a clock device */
+ if (!IS_ERR_OR_NULL(adapter->ptp_clock))
+ return 0;
switch (adapter->hw.mac.type) {
case ixgbe_mac_X540:
@@ -851,7 +883,7 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime;
adapter->ptp_caps.gettime = ixgbe_ptp_gettime;
adapter->ptp_caps.settime = ixgbe_ptp_settime;
- adapter->ptp_caps.enable = ixgbe_ptp_enable;
+ adapter->ptp_caps.enable = ixgbe_ptp_feature_enable;
break;
case ixgbe_mac_82599EB:
snprintf(adapter->ptp_caps.name,
@@ -867,24 +899,57 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime;
adapter->ptp_caps.gettime = ixgbe_ptp_gettime;
adapter->ptp_caps.settime = ixgbe_ptp_settime;
- adapter->ptp_caps.enable = ixgbe_ptp_enable;
+ adapter->ptp_caps.enable = ixgbe_ptp_feature_enable;
break;
default:
adapter->ptp_clock = NULL;
- return;
+ return -EOPNOTSUPP;
}
- spin_lock_init(&adapter->tmreg_lock);
- INIT_WORK(&adapter->ptp_tx_work, ixgbe_ptp_tx_hwtstamp_work);
-
adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps,
&adapter->pdev->dev);
if (IS_ERR(adapter->ptp_clock)) {
+ err = PTR_ERR(adapter->ptp_clock);
adapter->ptp_clock = NULL;
e_dev_err("ptp_clock_register failed\n");
+ return err;
} else
e_dev_info("registered PHC device on %s\n", netdev->name);
+ /* set default timestamp mode to disabled here. We do this in
+ * create_clock instead of init, because we don't want to override the
+ * previous settings during a resume cycle.
+ */
+ adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+ adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
+
+ return 0;
+}
+
+/**
+ * ixgbe_ptp_init
+ * @adapter: the ixgbe private adapter structure
+ *
+ * This function performs the required steps for enabling PTP
+ * support. If PTP support has already been loaded it simply calls the
+ * cyclecounter init routine and exits.
+ */
+void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
+{
+ /* initialize the spin lock first since we can't control when a user
+ * will call the entry functions once we have initialized the clock
+ * device
+ */
+ spin_lock_init(&adapter->tmreg_lock);
+
+ /* obtain a PTP device, or re-use an existing device */
+ if (ixgbe_ptp_create_clock(adapter))
+ return;
+
+ /* we have a clock so we can initialize work now */
+ INIT_WORK(&adapter->ptp_tx_work, ixgbe_ptp_tx_hwtstamp_work);
+
+ /* reset the PTP related hardware bits */
ixgbe_ptp_reset(adapter);
/* enter the IXGBE_PTP_RUNNING state */
@@ -894,28 +959,45 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
}
/**
- * ixgbe_ptp_stop - disable ptp device and stop the overflow check
- * @adapter: pointer to adapter struct
+ * ixgbe_ptp_suspend - stop PTP work items
+ * @ adapter: pointer to adapter struct
*
- * this function stops the ptp support, and cancels the delayed work.
+ * this function suspends PTP activity, and prevents more PTP work from being
+ * generated, but does not destroy the PTP clock device.
*/
-void ixgbe_ptp_stop(struct ixgbe_adapter *adapter)
+void ixgbe_ptp_suspend(struct ixgbe_adapter *adapter)
{
/* Leave the IXGBE_PTP_RUNNING state. */
if (!test_and_clear_bit(__IXGBE_PTP_RUNNING, &adapter->state))
return;
- /* stop the PPS signal */
- adapter->flags2 &= ~IXGBE_FLAG2_PTP_PPS_ENABLED;
- ixgbe_ptp_setup_sdp(adapter);
+ /* since this might be called in suspend, we don't clear the state,
+ * but simply reset the auxiliary PPS signal control register
+ */
+ IXGBE_WRITE_REG(&adapter->hw, IXGBE_TSAUXC, 0x0);
+ /* ensure that we cancel any pending PTP Tx work item in progress */
cancel_work_sync(&adapter->ptp_tx_work);
if (adapter->ptp_tx_skb) {
dev_kfree_skb_any(adapter->ptp_tx_skb);
adapter->ptp_tx_skb = NULL;
clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state);
}
+}
+
+/**
+ * ixgbe_ptp_stop - close the PTP device
+ * @adapter: pointer to adapter struct
+ *
+ * completely destroy the PTP device, should only be called when the device is
+ * being fully closed.
+ */
+void ixgbe_ptp_stop(struct ixgbe_adapter *adapter)
+{
+ /* first, suspend PTP activity */
+ ixgbe_ptp_suspend(adapter);
+ /* disable the PTP clock device */
if (adapter->ptp_clock) {
ptp_clock_unregister(adapter->ptp_clock);
adapter->ptp_clock = NULL;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index e6c68d3..16b3a1c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -72,8 +72,6 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter)
for (i = 0; i < num_vf_macvlans; i++) {
mv_list->vf = -1;
mv_list->free = true;
- mv_list->rar_entry = hw->mac.num_rar_entries -
- (i + adapter->num_vfs + 1);
list_add(&mv_list->l, &adapter->vf_mvs.l);
mv_list++;
}
@@ -327,6 +325,7 @@ static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
u32 vector_bit;
u32 vector_reg;
u32 mta_reg;
+ u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
/* only so many hash values supported */
entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES);
@@ -353,25 +352,13 @@ static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
mta_reg |= (1 << vector_bit);
IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
}
+ vmolr |= IXGBE_VMOLR_ROMPE;
+ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr);
return 0;
}
-static void ixgbe_restore_vf_macvlans(struct ixgbe_adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- struct list_head *pos;
- struct vf_macvlans *entry;
-
- list_for_each(pos, &adapter->vf_mvs.l) {
- entry = list_entry(pos, struct vf_macvlans, l);
- if (!entry->free)
- hw->mac.ops.set_rar(hw, entry->rar_entry,
- entry->vf_macvlan,
- entry->vf, IXGBE_RAH_AV);
- }
-}
-
+#ifdef CONFIG_PCI_IOV
void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
@@ -382,6 +369,7 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
u32 mta_reg;
for (i = 0; i < adapter->num_vfs; i++) {
+ u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(i));
vfinfo = &adapter->vfinfo[i];
for (j = 0; j < vfinfo->num_vf_mc_hashes; j++) {
hw->addr_ctrl.mta_in_use++;
@@ -391,11 +379,18 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter)
mta_reg |= (1 << vector_bit);
IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg);
}
+
+ if (vfinfo->num_vf_mc_hashes)
+ vmolr |= IXGBE_VMOLR_ROMPE;
+ else
+ vmolr &= ~IXGBE_VMOLR_ROMPE;
+ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(i), vmolr);
}
/* Restore any VF macvlans */
- ixgbe_restore_vf_macvlans(adapter);
+ ixgbe_full_sync_mac_table(adapter);
}
+#endif
static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid,
u32 vf)
@@ -495,8 +490,7 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf)
static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe)
{
u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf));
- vmolr |= (IXGBE_VMOLR_ROMPE |
- IXGBE_VMOLR_BAM);
+ vmolr |= IXGBE_VMOLR_BAM;
if (aupe)
vmolr |= IXGBE_VMOLR_AUPE;
else
@@ -514,7 +508,6 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
{
struct ixgbe_hw *hw = &adapter->hw;
struct vf_data_storage *vfinfo = &adapter->vfinfo[vf];
- int rar_entry = hw->mac.num_rar_entries - (vf + 1);
u8 num_tcs = netdev_get_num_tc(adapter->netdev);
/* add PF assigned VLAN or VLAN 0 */
@@ -544,7 +537,7 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
/* Flush and reset the mta with the new values */
ixgbe_set_rx_mode(adapter->netdev);
- hw->mac.ops.clear_rar(hw, rar_entry);
+ ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
/* reset VF api back to unknown */
adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10;
@@ -553,11 +546,9 @@ static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf)
static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
int vf, unsigned char *mac_addr)
{
- struct ixgbe_hw *hw = &adapter->hw;
- int rar_entry = hw->mac.num_rar_entries - (vf + 1);
-
+ ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
- hw->mac.ops.set_rar(hw, rar_entry, mac_addr, vf, IXGBE_RAH_AV);
+ ixgbe_add_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf);
return 0;
}
@@ -565,7 +556,6 @@ static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
int vf, int index, unsigned char *mac_addr)
{
- struct ixgbe_hw *hw = &adapter->hw;
struct list_head *pos;
struct vf_macvlans *entry;
@@ -576,7 +566,8 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
entry->vf = -1;
entry->free = true;
entry->is_macvlan = false;
- hw->mac.ops.clear_rar(hw, entry->rar_entry);
+ ixgbe_del_mac_filter(adapter,
+ entry->vf_macvlan, vf);
}
}
}
@@ -612,7 +603,7 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter,
entry->vf = vf;
memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN);
- hw->mac.ops.set_rar(hw, entry->rar_entry, mac_addr, vf, IXGBE_RAH_AV);
+ ixgbe_add_mac_filter(adapter, mac_addr, vf);
return 0;
}
@@ -1138,9 +1129,9 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
adapter->vfinfo[vf].vlan_count--;
adapter->vfinfo[vf].pf_vlan = 0;
adapter->vfinfo[vf].pf_qos = 0;
- }
+ }
out:
- return err;
+ return err;
}
static int ixgbe_link_mbps(struct ixgbe_adapter *adapter)
@@ -1231,7 +1222,8 @@ void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter)
}
}
-int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
+int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
+ int max_tx_rate)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
int link_speed;
@@ -1249,13 +1241,16 @@ int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate)
if (link_speed != 10000)
return -EINVAL;
+ if (min_tx_rate)
+ return -EINVAL;
+
/* rate limit cannot be less than 10Mbs or greater than link speed */
- if (tx_rate && ((tx_rate <= 10) || (tx_rate > link_speed)))
+ if (max_tx_rate && ((max_tx_rate <= 10) || (max_tx_rate > link_speed)))
return -EINVAL;
/* store values */
adapter->vf_rate_link_speed = link_speed;
- adapter->vfinfo[vf].tx_rate = tx_rate;
+ adapter->vfinfo[vf].tx_rate = max_tx_rate;
/* update hardware configuration */
ixgbe_set_vf_rate_limit(adapter, vf);
@@ -1297,7 +1292,8 @@ int ixgbe_ndo_get_vf_config(struct net_device *netdev,
return -EINVAL;
ivi->vf = vf;
memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN);
- ivi->tx_rate = adapter->vfinfo[vf].tx_rate;
+ ivi->max_tx_rate = adapter->vfinfo[vf].tx_rate;
+ ivi->min_tx_rate = 0;
ivi->vlan = adapter->vfinfo[vf].pf_vlan;
ivi->qos = adapter->vfinfo[vf].pf_qos;
ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
index 139eadd..32c26d5 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h
@@ -34,7 +34,9 @@
*/
#define IXGBE_MAX_VFS_DRV_LIMIT (IXGBE_MAX_VF_FUNCTIONS - 1)
+#ifdef CONFIG_PCI_IOV
void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter);
+#endif
void ixgbe_msg_task(struct ixgbe_adapter *adapter);
int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask);
void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter);
@@ -42,7 +44,8 @@ void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter);
int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac);
int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan,
u8 qos);
-int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate);
+int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate,
+ int max_tx_rate);
int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting);
int ixgbe_ndo_get_vf_config(struct net_device *netdev,
int vf, struct ifla_vf_info *ivi);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 8a6ff24..9a89f98 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -160,7 +160,7 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_MAX_EITR 0x00000FF8
#define IXGBE_MIN_EITR 8
#define IXGBE_EITR(_i) (((_i) <= 23) ? (0x00820 + ((_i) * 4)) : \
- (0x012300 + (((_i) - 24) * 4)))
+ (0x012300 + (((_i) - 24) * 4)))
#define IXGBE_EITR_ITR_INT_MASK 0x00000FF8
#define IXGBE_EITR_LLI_MOD 0x00008000
#define IXGBE_EITR_CNT_WDIS 0x80000000
@@ -213,7 +213,7 @@ struct ixgbe_thermal_sensor_data {
* 64-127: 0x0D014 + (n-64)*0x40
*/
#define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \
- (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \
+ (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \
(0x0D014 + (((_i) - 64) * 0x40))))
/*
* Rx DCA Control Register:
@@ -222,11 +222,11 @@ struct ixgbe_thermal_sensor_data {
* 64-127: 0x0D00C + (n-64)*0x40
*/
#define IXGBE_DCA_RXCTRL(_i) (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \
- (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \
+ (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \
(0x0D00C + (((_i) - 64) * 0x40))))
#define IXGBE_RDRXCTL 0x02F00
#define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4))
- /* 8 of these 0x03C00 - 0x03C1C */
+ /* 8 of these 0x03C00 - 0x03C1C */
#define IXGBE_RXCTRL 0x03000
#define IXGBE_DROPEN 0x03D04
#define IXGBE_RXPBSIZE_SHIFT 10
@@ -239,14 +239,14 @@ struct ixgbe_thermal_sensor_data {
/* Multicast Table Array - 128 entries */
#define IXGBE_MTA(_i) (0x05200 + ((_i) * 4))
#define IXGBE_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
- (0x0A200 + ((_i) * 8)))
+ (0x0A200 + ((_i) * 8)))
#define IXGBE_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
- (0x0A204 + ((_i) * 8)))
+ (0x0A204 + ((_i) * 8)))
#define IXGBE_MPSAR_LO(_i) (0x0A600 + ((_i) * 8))
#define IXGBE_MPSAR_HI(_i) (0x0A604 + ((_i) * 8))
/* Packet split receive type */
#define IXGBE_PSRTYPE(_i) (((_i) <= 15) ? (0x05480 + ((_i) * 4)) : \
- (0x0EA00 + ((_i) * 4)))
+ (0x0EA00 + ((_i) * 4)))
/* array of 4096 1-bit vlan filters */
#define IXGBE_VFTA(_i) (0x0A000 + ((_i) * 4))
/*array of 4096 4-bit vlan vmdq indices */
@@ -696,7 +696,7 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4))
#define IXGBE_TQSMR(_i) (((_i) <= 7) ? (0x07300 + ((_i) * 4)) : \
- (0x08600 + ((_i) * 4)))
+ (0x08600 + ((_i) * 4)))
#define IXGBE_TQSM(_i) (0x08600 + ((_i) * 4))
#define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */
@@ -820,7 +820,7 @@ struct ixgbe_thermal_sensor_data {
#define IXGBE_GCR_EXT_VT_MODE_32 0x00000002
#define IXGBE_GCR_EXT_VT_MODE_64 0x00000003
#define IXGBE_GCR_EXT_SRIOV (IXGBE_GCR_EXT_MSIX_EN | \
- IXGBE_GCR_EXT_VT_MODE_64)
+ IXGBE_GCR_EXT_VT_MODE_64)
/* Time Sync Registers */
#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */
@@ -1396,10 +1396,10 @@ enum {
#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */
#define IXGBE_EIMS_ENABLE_MASK ( \
- IXGBE_EIMS_RTX_QUEUE | \
- IXGBE_EIMS_LSC | \
- IXGBE_EIMS_TCP_TIMER | \
- IXGBE_EIMS_OTHER)
+ IXGBE_EIMS_RTX_QUEUE | \
+ IXGBE_EIMS_LSC | \
+ IXGBE_EIMS_TCP_TIMER | \
+ IXGBE_EIMS_OTHER)
/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
#define IXGBE_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */
@@ -2161,18 +2161,18 @@ enum {
/* Masks to determine if packets should be dropped due to frame errors */
#define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \
- IXGBE_RXD_ERR_CE | \
- IXGBE_RXD_ERR_LE | \
- IXGBE_RXD_ERR_PE | \
- IXGBE_RXD_ERR_OSE | \
- IXGBE_RXD_ERR_USE)
+ IXGBE_RXD_ERR_CE | \
+ IXGBE_RXD_ERR_LE | \
+ IXGBE_RXD_ERR_PE | \
+ IXGBE_RXD_ERR_OSE | \
+ IXGBE_RXD_ERR_USE)
#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \
- IXGBE_RXDADV_ERR_CE | \
- IXGBE_RXDADV_ERR_LE | \
- IXGBE_RXDADV_ERR_PE | \
- IXGBE_RXDADV_ERR_OSE | \
- IXGBE_RXDADV_ERR_USE)
+ IXGBE_RXDADV_ERR_CE | \
+ IXGBE_RXDADV_ERR_LE | \
+ IXGBE_RXDADV_ERR_PE | \
+ IXGBE_RXDADV_ERR_OSE | \
+ IXGBE_RXDADV_ERR_USE)
/* Multicast bit mask */
#define IXGBE_MCSTCTRL_MFE 0x4
@@ -2393,9 +2393,9 @@ struct ixgbe_adv_tx_context_desc {
#define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */
#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */
#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \
- IXGBE_ADVTXD_POPTS_SHIFT)
+ IXGBE_ADVTXD_POPTS_SHIFT)
#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \
- IXGBE_ADVTXD_POPTS_SHIFT)
+ IXGBE_ADVTXD_POPTS_SHIFT)
#define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */
#define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */
#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */
@@ -2435,10 +2435,10 @@ typedef u32 ixgbe_link_speed;
#define IXGBE_LINK_SPEED_1GB_FULL 0x0020
#define IXGBE_LINK_SPEED_10GB_FULL 0x0080
#define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \
- IXGBE_LINK_SPEED_10GB_FULL)
+ IXGBE_LINK_SPEED_10GB_FULL)
#define IXGBE_LINK_SPEED_82599_AUTONEG (IXGBE_LINK_SPEED_100_FULL | \
- IXGBE_LINK_SPEED_1GB_FULL | \
- IXGBE_LINK_SPEED_10GB_FULL)
+ IXGBE_LINK_SPEED_1GB_FULL | \
+ IXGBE_LINK_SPEED_10GB_FULL)
/* Physical layer type */
@@ -2746,7 +2746,7 @@ struct ixgbe_bus_info {
/* Flow control parameters */
struct ixgbe_fc_info {
u32 high_water[MAX_TRAFFIC_CLASS]; /* Flow Control High-water */
- u32 low_water; /* Flow Control Low-water */
+ u32 low_water[MAX_TRAFFIC_CLASS]; /* Flow Control Low-water */
u16 pause_time; /* Flow Control Pause timer */
bool send_xon; /* Flow control send XON */
bool strict_ieee; /* Strict IEEE mode */
@@ -2840,7 +2840,7 @@ struct ixgbe_hw;
/* iterator type for walking multicast address lists */
typedef u8* (*ixgbe_mc_addr_itr) (struct ixgbe_hw *hw, u8 **mc_addr_ptr,
- u32 *vmdq);
+ u32 *vmdq);
/* Function pointer table */
struct ixgbe_eeprom_operations {
@@ -2887,7 +2887,7 @@ struct ixgbe_mac_operations {
s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool);
s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool);
s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *,
- bool *);
+ bool *);
/* Packet Buffer Manipulation */
void (*set_rxpba)(struct ixgbe_hw *, int, u32, int);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 188a597..40dd798 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -81,7 +81,7 @@ static s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw,
bool autoneg_wait_to_complete)
{
return hw->phy.ops.setup_link_speed(hw, speed,
- autoneg_wait_to_complete);
+ autoneg_wait_to_complete);
}
/**
@@ -155,7 +155,7 @@ mac_reset_top:
/* Add the SAN MAC address to the RAR only if it's a valid address */
if (is_valid_ether_addr(hw->mac.san_addr)) {
hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
- hw->mac.san_addr, 0, IXGBE_RAH_AV);
+ hw->mac.san_addr, 0, IXGBE_RAH_AV);
/* Save the SAN MAC RAR index */
hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1;
@@ -166,7 +166,7 @@ mac_reset_top:
/* Store the alternative WWNN/WWPN prefix */
hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
- &hw->mac.wwpn_prefix);
+ &hw->mac.wwpn_prefix);
reset_hw_out:
return status;
@@ -237,9 +237,9 @@ static s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
eec = IXGBE_READ_REG(hw, IXGBE_EEC);
eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
- IXGBE_EEC_SIZE_SHIFT);
+ IXGBE_EEC_SIZE_SHIFT);
eeprom->word_size = 1 << (eeprom_size +
- IXGBE_EEPROM_WORD_SIZE_SHIFT);
+ IXGBE_EEPROM_WORD_SIZE_SHIFT);
hw_dbg(hw, "Eeprom params: type = %d, size = %d\n",
eeprom->type, eeprom->word_size);
@@ -712,8 +712,7 @@ static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
udelay(50);
}
} else {
- hw_dbg(hw, "Software semaphore SMBI between device drivers "
- "not granted.\n");
+ hw_dbg(hw, "Software semaphore SMBI between device drivers not granted.\n");
}
return status;
@@ -813,7 +812,7 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
.clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic,
.get_media_type = &ixgbe_get_media_type_X540,
.get_supported_physical_layer =
- &ixgbe_get_supported_physical_layer_X540,
+ &ixgbe_get_supported_physical_layer_X540,
.enable_rx_dma = &ixgbe_enable_rx_dma_generic,
.get_mac_addr = &ixgbe_get_mac_addr_generic,
.get_san_mac_addr = &ixgbe_get_san_mac_addr_generic,
diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
index 1baecb6..d420f12 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c
@@ -135,8 +135,8 @@ static int ixgbevf_get_settings(struct net_device *netdev,
ethtool_cmd_speed_set(ecmd, speed);
ecmd->duplex = DUPLEX_FULL;
} else {
- ethtool_cmd_speed_set(ecmd, -1);
- ecmd->duplex = -1;
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
+ ecmd->duplex = DUPLEX_UNKNOWN;
}
return 0;
@@ -813,5 +813,5 @@ static const struct ethtool_ops ixgbevf_ethtool_ops = {
void ixgbevf_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &ixgbevf_ethtool_ops);
+ netdev->ethtool_ops = &ixgbevf_ethtool_ops;
}
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index de2793b..75467f8 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -85,7 +85,7 @@ static DEFINE_PCI_DEVICE_TABLE(ixgbevf_pci_tbl) = {
MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl);
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
-MODULE_DESCRIPTION("Intel(R) 82599 Virtual Function Driver");
+MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index b7b8d74..b151a94 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -42,6 +42,7 @@
#include <linux/dma-mapping.h>
#include <linux/in.h>
#include <linux/ip.h>
+#include <net/tso.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/etherdevice.h>
@@ -179,10 +180,18 @@ static char mv643xx_eth_driver_version[] = "1.4";
* Misc definitions.
*/
#define DEFAULT_RX_QUEUE_SIZE 128
-#define DEFAULT_TX_QUEUE_SIZE 256
+#define DEFAULT_TX_QUEUE_SIZE 512
#define SKB_DMA_REALIGN ((PAGE_SIZE - NET_SKB_PAD) % SMP_CACHE_BYTES)
+#define TSO_HEADER_SIZE 128
+/* Max number of allowed TCP segments for software TSO */
+#define MV643XX_MAX_TSO_SEGS 100
+#define MV643XX_MAX_SKB_DESCS (MV643XX_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
+
+#define IS_TSO_HEADER(txq, addr) \
+ ((addr >= txq->tso_hdrs_dma) && \
+ (addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
/*
* RX/TX descriptors.
*/
@@ -250,6 +259,7 @@ struct tx_desc {
#define GEN_TCP_UDP_CHECKSUM 0x00020000
#define UDP_FRAME 0x00010000
#define MAC_HDR_EXTRA_4_BYTES 0x00008000
+#define GEN_TCP_UDP_CHK_FULL 0x00000400
#define MAC_HDR_EXTRA_8_BYTES 0x00000200
#define TX_IHL_SHIFT 11
@@ -345,6 +355,12 @@ struct tx_queue {
int tx_curr_desc;
int tx_used_desc;
+ int tx_stop_threshold;
+ int tx_wake_threshold;
+
+ char *tso_hdrs;
+ dma_addr_t tso_hdrs_dma;
+
struct tx_desc *tx_desc_area;
dma_addr_t tx_desc_dma;
int tx_desc_area_size;
@@ -491,7 +507,7 @@ static void txq_maybe_wake(struct tx_queue *txq)
if (netif_tx_queue_stopped(nq)) {
__netif_tx_lock(nq, smp_processor_id());
- if (txq->tx_ring_size - txq->tx_desc_count >= MAX_SKB_FRAGS + 1)
+ if (txq->tx_desc_count <= txq->tx_wake_threshold)
netif_tx_wake_queue(nq);
__netif_tx_unlock(nq);
}
@@ -661,6 +677,198 @@ static inline unsigned int has_tiny_unaligned_frags(struct sk_buff *skb)
return 0;
}
+static inline __be16 sum16_as_be(__sum16 sum)
+{
+ return (__force __be16)sum;
+}
+
+static int skb_tx_csum(struct mv643xx_eth_private *mp, struct sk_buff *skb,
+ u16 *l4i_chk, u32 *command, int length)
+{
+ int ret;
+ u32 cmd = 0;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ int hdr_len;
+ int tag_bytes;
+
+ BUG_ON(skb->protocol != htons(ETH_P_IP) &&
+ skb->protocol != htons(ETH_P_8021Q));
+
+ hdr_len = (void *)ip_hdr(skb) - (void *)skb->data;
+ tag_bytes = hdr_len - ETH_HLEN;
+
+ if (length - hdr_len > mp->shared->tx_csum_limit ||
+ unlikely(tag_bytes & ~12)) {
+ ret = skb_checksum_help(skb);
+ if (!ret)
+ goto no_csum;
+ return ret;
+ }
+
+ if (tag_bytes & 4)
+ cmd |= MAC_HDR_EXTRA_4_BYTES;
+ if (tag_bytes & 8)
+ cmd |= MAC_HDR_EXTRA_8_BYTES;
+
+ cmd |= GEN_TCP_UDP_CHECKSUM | GEN_TCP_UDP_CHK_FULL |
+ GEN_IP_V4_CHECKSUM |
+ ip_hdr(skb)->ihl << TX_IHL_SHIFT;
+
+ /* TODO: Revisit this. With the usage of GEN_TCP_UDP_CHK_FULL
+ * it seems we don't need to pass the initial checksum. */
+ switch (ip_hdr(skb)->protocol) {
+ case IPPROTO_UDP:
+ cmd |= UDP_FRAME;
+ *l4i_chk = 0;
+ break;
+ case IPPROTO_TCP:
+ *l4i_chk = 0;
+ break;
+ default:
+ WARN(1, "protocol not supported");
+ }
+ } else {
+no_csum:
+ /* Errata BTS #50, IHL must be 5 if no HW checksum */
+ cmd |= 5 << TX_IHL_SHIFT;
+ }
+ *command = cmd;
+ return 0;
+}
+
+static inline int
+txq_put_data_tso(struct net_device *dev, struct tx_queue *txq,
+ struct sk_buff *skb, char *data, int length,
+ bool last_tcp, bool is_last)
+{
+ int tx_index;
+ u32 cmd_sts;
+ struct tx_desc *desc;
+
+ tx_index = txq->tx_curr_desc++;
+ if (txq->tx_curr_desc == txq->tx_ring_size)
+ txq->tx_curr_desc = 0;
+ desc = &txq->tx_desc_area[tx_index];
+
+ desc->l4i_chk = 0;
+ desc->byte_cnt = length;
+ desc->buf_ptr = dma_map_single(dev->dev.parent, data,
+ length, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev->dev.parent, desc->buf_ptr))) {
+ WARN(1, "dma_map_single failed!\n");
+ return -ENOMEM;
+ }
+
+ cmd_sts = BUFFER_OWNED_BY_DMA;
+ if (last_tcp) {
+ /* last descriptor in the TCP packet */
+ cmd_sts |= ZERO_PADDING | TX_LAST_DESC;
+ /* last descriptor in SKB */
+ if (is_last)
+ cmd_sts |= TX_ENABLE_INTERRUPT;
+ }
+ desc->cmd_sts = cmd_sts;
+ return 0;
+}
+
+static inline void
+txq_put_hdr_tso(struct sk_buff *skb, struct tx_queue *txq, int length)
+{
+ struct mv643xx_eth_private *mp = txq_to_mp(txq);
+ int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ int tx_index;
+ struct tx_desc *desc;
+ int ret;
+ u32 cmd_csum = 0;
+ u16 l4i_chk = 0;
+
+ tx_index = txq->tx_curr_desc;
+ desc = &txq->tx_desc_area[tx_index];
+
+ ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_csum, length);
+ if (ret)
+ WARN(1, "failed to prepare checksum!");
+
+ /* Should we set this? Can't use the value from skb_tx_csum()
+ * as it's not the correct initial L4 checksum to use. */
+ desc->l4i_chk = 0;
+
+ desc->byte_cnt = hdr_len;
+ desc->buf_ptr = txq->tso_hdrs_dma +
+ txq->tx_curr_desc * TSO_HEADER_SIZE;
+ desc->cmd_sts = cmd_csum | BUFFER_OWNED_BY_DMA | TX_FIRST_DESC |
+ GEN_CRC;
+
+ txq->tx_curr_desc++;
+ if (txq->tx_curr_desc == txq->tx_ring_size)
+ txq->tx_curr_desc = 0;
+}
+
+static int txq_submit_tso(struct tx_queue *txq, struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct mv643xx_eth_private *mp = txq_to_mp(txq);
+ int total_len, data_left, ret;
+ int desc_count = 0;
+ struct tso_t tso;
+ int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+
+ /* Count needed descriptors */
+ if ((txq->tx_desc_count + tso_count_descs(skb)) >= txq->tx_ring_size) {
+ netdev_dbg(dev, "not enough descriptors for TSO!\n");
+ return -EBUSY;
+ }
+
+ /* Initialize the TSO handler, and prepare the first payload */
+ tso_start(skb, &tso);
+
+ total_len = skb->len - hdr_len;
+ while (total_len > 0) {
+ char *hdr;
+
+ data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
+ total_len -= data_left;
+ desc_count++;
+
+ /* prepare packet headers: MAC + IP + TCP */
+ hdr = txq->tso_hdrs + txq->tx_curr_desc * TSO_HEADER_SIZE;
+ tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
+ txq_put_hdr_tso(skb, txq, data_left);
+
+ while (data_left > 0) {
+ int size;
+ desc_count++;
+
+ size = min_t(int, tso.size, data_left);
+ ret = txq_put_data_tso(dev, txq, skb, tso.data, size,
+ size == data_left,
+ total_len == 0);
+ if (ret)
+ goto err_release;
+ data_left -= size;
+ tso_build_data(skb, &tso, size);
+ }
+ }
+
+ __skb_queue_tail(&txq->tx_skb, skb);
+ skb_tx_timestamp(skb);
+
+ /* clear TX_END status */
+ mp->work_tx_end &= ~(1 << txq->index);
+
+ /* ensure all descriptors are written before poking hardware */
+ wmb();
+ txq_enable(txq);
+ txq->tx_desc_count += desc_count;
+ return 0;
+err_release:
+ /* TODO: Release all used data descriptors; header descriptors must not
+ * be DMA-unmapped.
+ */
+ return ret;
+}
+
static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
{
struct mv643xx_eth_private *mp = txq_to_mp(txq);
@@ -671,8 +879,10 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
skb_frag_t *this_frag;
int tx_index;
struct tx_desc *desc;
+ void *addr;
this_frag = &skb_shinfo(skb)->frags[frag];
+ addr = page_address(this_frag->page.p) + this_frag->page_offset;
tx_index = txq->tx_curr_desc++;
if (txq->tx_curr_desc == txq->tx_ring_size)
txq->tx_curr_desc = 0;
@@ -692,19 +902,13 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
desc->l4i_chk = 0;
desc->byte_cnt = skb_frag_size(this_frag);
- desc->buf_ptr = skb_frag_dma_map(mp->dev->dev.parent,
- this_frag, 0,
- skb_frag_size(this_frag),
- DMA_TO_DEVICE);
+ desc->buf_ptr = dma_map_single(mp->dev->dev.parent, addr,
+ desc->byte_cnt, DMA_TO_DEVICE);
}
}
-static inline __be16 sum16_as_be(__sum16 sum)
-{
- return (__force __be16)sum;
-}
-
-static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
+static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb,
+ struct net_device *dev)
{
struct mv643xx_eth_private *mp = txq_to_mp(txq);
int nr_frags = skb_shinfo(skb)->nr_frags;
@@ -712,54 +916,22 @@ static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
struct tx_desc *desc;
u32 cmd_sts;
u16 l4i_chk;
- int length;
+ int length, ret;
- cmd_sts = TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA;
+ cmd_sts = 0;
l4i_chk = 0;
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- int hdr_len;
- int tag_bytes;
-
- BUG_ON(skb->protocol != htons(ETH_P_IP) &&
- skb->protocol != htons(ETH_P_8021Q));
-
- hdr_len = (void *)ip_hdr(skb) - (void *)skb->data;
- tag_bytes = hdr_len - ETH_HLEN;
- if (skb->len - hdr_len > mp->shared->tx_csum_limit ||
- unlikely(tag_bytes & ~12)) {
- if (skb_checksum_help(skb) == 0)
- goto no_csum;
- dev_kfree_skb_any(skb);
- return 1;
- }
-
- if (tag_bytes & 4)
- cmd_sts |= MAC_HDR_EXTRA_4_BYTES;
- if (tag_bytes & 8)
- cmd_sts |= MAC_HDR_EXTRA_8_BYTES;
-
- cmd_sts |= GEN_TCP_UDP_CHECKSUM |
- GEN_IP_V4_CHECKSUM |
- ip_hdr(skb)->ihl << TX_IHL_SHIFT;
-
- switch (ip_hdr(skb)->protocol) {
- case IPPROTO_UDP:
- cmd_sts |= UDP_FRAME;
- l4i_chk = ntohs(sum16_as_be(udp_hdr(skb)->check));
- break;
- case IPPROTO_TCP:
- l4i_chk = ntohs(sum16_as_be(tcp_hdr(skb)->check));
- break;
- default:
- BUG();
- }
- } else {
-no_csum:
- /* Errata BTS #50, IHL must be 5 if no HW checksum */
- cmd_sts |= 5 << TX_IHL_SHIFT;
+ if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
+ if (net_ratelimit())
+ netdev_err(dev, "tx queue full?!\n");
+ return -EBUSY;
}
+ ret = skb_tx_csum(mp, skb, &l4i_chk, &cmd_sts, skb->len);
+ if (ret)
+ return ret;
+ cmd_sts |= TX_FIRST_DESC | GEN_CRC | BUFFER_OWNED_BY_DMA;
+
tx_index = txq->tx_curr_desc++;
if (txq->tx_curr_desc == txq->tx_ring_size)
txq->tx_curr_desc = 0;
@@ -801,7 +973,7 @@ no_csum:
static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct mv643xx_eth_private *mp = netdev_priv(dev);
- int length, queue;
+ int length, queue, ret;
struct tx_queue *txq;
struct netdev_queue *nq;
@@ -810,30 +982,26 @@ static netdev_tx_t mv643xx_eth_xmit(struct sk_buff *skb, struct net_device *dev)
nq = netdev_get_tx_queue(dev, queue);
if (has_tiny_unaligned_frags(skb) && __skb_linearize(skb)) {
- txq->tx_dropped++;
netdev_printk(KERN_DEBUG, dev,
"failed to linearize skb with tiny unaligned fragment\n");
return NETDEV_TX_BUSY;
}
- if (txq->tx_ring_size - txq->tx_desc_count < MAX_SKB_FRAGS + 1) {
- if (net_ratelimit())
- netdev_err(dev, "tx queue full?!\n");
- dev_kfree_skb_any(skb);
- return NETDEV_TX_OK;
- }
-
length = skb->len;
- if (!txq_submit_skb(txq, skb)) {
- int entries_left;
-
+ if (skb_is_gso(skb))
+ ret = txq_submit_tso(txq, skb, dev);
+ else
+ ret = txq_submit_skb(txq, skb, dev);
+ if (!ret) {
txq->tx_bytes += length;
txq->tx_packets++;
- entries_left = txq->tx_ring_size - txq->tx_desc_count;
- if (entries_left < MAX_SKB_FRAGS + 1)
+ if (txq->tx_desc_count >= txq->tx_stop_threshold)
netif_tx_stop_queue(nq);
+ } else {
+ txq->tx_dropped++;
+ dev_kfree_skb_any(skb);
}
return NETDEV_TX_OK;
@@ -907,14 +1075,9 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
mp->dev->stats.tx_errors++;
}
- if (cmd_sts & TX_FIRST_DESC) {
+ if (!IS_TSO_HEADER(txq, desc->buf_ptr))
dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr,
desc->byte_cnt, DMA_TO_DEVICE);
- } else {
- dma_unmap_page(mp->dev->dev.parent, desc->buf_ptr,
- desc->byte_cnt, DMA_TO_DEVICE);
- }
-
dev_kfree_skb(skb);
}
@@ -1010,8 +1173,9 @@ static void txq_set_fixed_prio_mode(struct tx_queue *txq)
/* mii management interface *************************************************/
-static void mv643xx_adjust_pscr(struct mv643xx_eth_private *mp)
+static void mv643xx_eth_adjust_link(struct net_device *dev)
{
+ struct mv643xx_eth_private *mp = netdev_priv(dev);
u32 pscr = rdlp(mp, PORT_SERIAL_CONTROL);
u32 autoneg_disable = FORCE_LINK_PASS |
DISABLE_AUTO_NEG_SPEED_GMII |
@@ -1387,7 +1551,7 @@ mv643xx_eth_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
ret = phy_ethtool_sset(mp->phy, cmd);
if (!ret)
- mv643xx_adjust_pscr(mp);
+ mv643xx_eth_adjust_link(dev);
return ret;
}
@@ -1456,7 +1620,11 @@ mv643xx_eth_set_ringparam(struct net_device *dev, struct ethtool_ringparam *er)
return -EINVAL;
mp->rx_ring_size = er->rx_pending < 4096 ? er->rx_pending : 4096;
- mp->tx_ring_size = er->tx_pending < 4096 ? er->tx_pending : 4096;
+ mp->tx_ring_size = clamp_t(unsigned int, er->tx_pending,
+ MV643XX_MAX_SKB_DESCS * 2, 4096);
+ if (mp->tx_ring_size != er->tx_pending)
+ netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
+ mp->tx_ring_size, er->tx_pending);
if (netif_running(dev)) {
mv643xx_eth_stop(dev);
@@ -1832,6 +2000,13 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
txq->tx_ring_size = mp->tx_ring_size;
+ /* A queue must always have room for at least one skb.
+ * Therefore, stop the queue when the free entries reaches
+ * the maximum number of descriptors per skb.
+ */
+ txq->tx_stop_threshold = txq->tx_ring_size - MV643XX_MAX_SKB_DESCS;
+ txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
+
txq->tx_desc_count = 0;
txq->tx_curr_desc = 0;
txq->tx_used_desc = 0;
@@ -1871,6 +2046,15 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
nexti * sizeof(struct tx_desc);
}
+ /* Allocate DMA buffers for TSO MAC/IP/TCP headers */
+ txq->tso_hdrs = dma_alloc_coherent(mp->dev->dev.parent,
+ txq->tx_ring_size * TSO_HEADER_SIZE,
+ &txq->tso_hdrs_dma, GFP_KERNEL);
+ if (txq->tso_hdrs == NULL) {
+ dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
+ txq->tx_desc_area, txq->tx_desc_dma);
+ return -ENOMEM;
+ }
skb_queue_head_init(&txq->tx_skb);
return 0;
@@ -1891,6 +2075,10 @@ static void txq_deinit(struct tx_queue *txq)
else
dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
txq->tx_desc_area, txq->tx_desc_dma);
+ if (txq->tso_hdrs)
+ dma_free_coherent(mp->dev->dev.parent,
+ txq->tx_ring_size * TSO_HEADER_SIZE,
+ txq->tso_hdrs, txq->tso_hdrs_dma);
}
@@ -2303,7 +2491,7 @@ static int mv643xx_eth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
ret = phy_mii_ioctl(mp->phy, ifr, cmd);
if (!ret)
- mv643xx_adjust_pscr(mp);
+ mv643xx_eth_adjust_link(dev);
return ret;
}
@@ -2678,6 +2866,7 @@ static void set_params(struct mv643xx_eth_private *mp,
struct mv643xx_eth_platform_data *pd)
{
struct net_device *dev = mp->dev;
+ unsigned int tx_ring_size;
if (is_valid_ether_addr(pd->mac_addr))
memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
@@ -2692,22 +2881,22 @@ static void set_params(struct mv643xx_eth_private *mp,
mp->rxq_count = pd->rx_queue_count ? : 1;
- mp->tx_ring_size = DEFAULT_TX_QUEUE_SIZE;
+ tx_ring_size = DEFAULT_TX_QUEUE_SIZE;
if (pd->tx_queue_size)
- mp->tx_ring_size = pd->tx_queue_size;
+ tx_ring_size = pd->tx_queue_size;
+
+ mp->tx_ring_size = clamp_t(unsigned int, tx_ring_size,
+ MV643XX_MAX_SKB_DESCS * 2, 4096);
+ if (mp->tx_ring_size != tx_ring_size)
+ netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
+ mp->tx_ring_size, tx_ring_size);
+
mp->tx_desc_sram_addr = pd->tx_sram_addr;
mp->tx_desc_sram_size = pd->tx_sram_size;
mp->txq_count = pd->tx_queue_count ? : 1;
}
-static void mv643xx_eth_adjust_link(struct net_device *dev)
-{
- struct mv643xx_eth_private *mp = netdev_priv(dev);
-
- mv643xx_adjust_pscr(mp);
-}
-
static struct phy_device *phy_scan(struct mv643xx_eth_private *mp,
int phy_addr)
{
@@ -2889,7 +3078,7 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
if (err)
goto out;
- SET_ETHTOOL_OPS(dev, &mv643xx_eth_ethtool_ops);
+ dev->ethtool_ops = &mv643xx_eth_ethtool_ops;
init_pscr(mp, pd->speed, pd->duplex);
@@ -2921,11 +3110,14 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
dev->watchdog_timeo = 2 * HZ;
dev->base_addr = 0;
- dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
- dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
- dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM;
+ dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
+ dev->vlan_features = dev->features;
+
+ dev->features |= NETIF_F_RXCSUM;
+ dev->hw_features = dev->features;
dev->priv_flags |= IFF_UNICAST_FLT;
+ dev->gso_max_segs = MV643XX_MAX_TSO_SEGS;
SET_NETDEV_DEV(dev, &pdev->dev);
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index 9d5ced2..fc2fb25 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -195,11 +195,10 @@ static int orion_mdio_probe(struct platform_device *pdev)
return -ENODEV;
}
- bus = mdiobus_alloc_size(sizeof(struct orion_mdio_dev));
- if (!bus) {
- dev_err(&pdev->dev, "Cannot allocate MDIO bus\n");
+ bus = devm_mdiobus_alloc_size(&pdev->dev,
+ sizeof(struct orion_mdio_dev));
+ if (!bus)
return -ENOMEM;
- }
bus->name = "orion_mdio_bus";
bus->read = orion_mdio_read;
@@ -208,11 +207,10 @@ static int orion_mdio_probe(struct platform_device *pdev)
dev_name(&pdev->dev));
bus->parent = &pdev->dev;
- bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
- if (!bus->irq) {
- mdiobus_free(bus);
+ bus->irq = devm_kmalloc_array(&pdev->dev, PHY_MAX_ADDR, sizeof(int),
+ GFP_KERNEL);
+ if (!bus->irq)
return -ENOMEM;
- }
for (i = 0; i < PHY_MAX_ADDR; i++)
bus->irq[i] = PHY_POLL;
@@ -264,8 +262,6 @@ static int orion_mdio_probe(struct platform_device *pdev)
out_mdio:
if (!IS_ERR(dev->clk))
clk_disable_unprepare(dev->clk);
- kfree(bus->irq);
- mdiobus_free(bus);
return ret;
}
@@ -276,8 +272,6 @@ static int orion_mdio_remove(struct platform_device *pdev)
writel(0, dev->regs + MVMDIO_ERR_INT_MASK);
mdiobus_unregister(bus);
- kfree(bus->irq);
- mdiobus_free(bus);
if (!IS_ERR(dev->clk))
clk_disable_unprepare(dev->clk);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 14786c8..45beca1 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -23,6 +23,7 @@
#include <net/ip.h>
#include <net/ipv6.h>
#include <linux/io.h>
+#include <net/tso.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_mdio.h>
@@ -218,9 +219,6 @@
#define MVNETA_RX_COAL_PKTS 32
#define MVNETA_RX_COAL_USEC 100
-/* Napi polling weight */
-#define MVNETA_RX_POLL_WEIGHT 64
-
/* The two bytes Marvell header. Either contains a special value used
* by Marvell switches when a specific hardware mode is enabled (not
* supported by this driver) or is filled automatically by zeroes on
@@ -244,12 +242,20 @@
#define MVNETA_TX_MTU_MAX 0x3ffff
+/* TSO header size */
+#define TSO_HEADER_SIZE 128
+
/* Max number of Rx descriptors */
#define MVNETA_MAX_RXD 128
/* Max number of Tx descriptors */
#define MVNETA_MAX_TXD 532
+/* Max number of allowed TCP segments for software TSO */
+#define MVNETA_MAX_TSO_SEGS 100
+
+#define MVNETA_MAX_SKB_DESCS (MVNETA_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
+
/* descriptor aligned size */
#define MVNETA_DESC_ALIGNED_SIZE 32
@@ -258,6 +264,10 @@
ETH_HLEN + ETH_FCS_LEN, \
MVNETA_CPU_D_CACHE_LINE_SIZE)
+#define IS_TSO_HEADER(txq, addr) \
+ ((addr >= txq->tso_hdrs_phys) && \
+ (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE))
+
#define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD)
struct mvneta_pcpu_stats {
@@ -279,9 +289,6 @@ struct mvneta_port {
u32 cause_rx_tx;
struct napi_struct napi;
- /* Napi weight */
- int weight;
-
/* Core clock */
struct clk *clk;
u8 mcast_count[256];
@@ -390,6 +397,8 @@ struct mvneta_tx_queue {
* descriptor ring
*/
int count;
+ int tx_stop_threshold;
+ int tx_wake_threshold;
/* Array of transmitted skb */
struct sk_buff **tx_skb;
@@ -413,6 +422,12 @@ struct mvneta_tx_queue {
/* Index of the next TX DMA descriptor to process */
int next_desc_to_proc;
+
+ /* DMA buffers for TSO headers */
+ char *tso_hdrs;
+
+ /* DMA address of TSO headers */
+ dma_addr_t tso_hdrs_phys;
};
struct mvneta_rx_queue {
@@ -441,7 +456,10 @@ struct mvneta_rx_queue {
int next_desc_to_proc;
};
-static int rxq_number = 8;
+/* The hardware supports eight (8) rx queues, but we are only allowing
+ * the first one to be used. Therefore, let's just allocate one queue.
+ */
+static int rxq_number = 1;
static int txq_number = 8;
static int rxq_def;
@@ -1277,11 +1295,12 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp,
mvneta_txq_inc_get(txq);
+ if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
+ dma_unmap_single(pp->dev->dev.parent,
+ tx_desc->buf_phys_addr,
+ tx_desc->data_size, DMA_TO_DEVICE);
if (!skb)
continue;
-
- dma_unmap_single(pp->dev->dev.parent, tx_desc->buf_phys_addr,
- tx_desc->data_size, DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
}
}
@@ -1302,7 +1321,7 @@ static void mvneta_txq_done(struct mvneta_port *pp,
txq->count -= tx_done;
if (netif_tx_queue_stopped(nq)) {
- if (txq->size - txq->count >= MAX_SKB_FRAGS + 1)
+ if (txq->count <= txq->tx_wake_threshold)
netif_tx_wake_queue(nq);
}
}
@@ -1519,14 +1538,134 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
return rx_done;
}
+static inline void
+mvneta_tso_put_hdr(struct sk_buff *skb,
+ struct mvneta_port *pp, struct mvneta_tx_queue *txq)
+{
+ struct mvneta_tx_desc *tx_desc;
+ int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+
+ txq->tx_skb[txq->txq_put_index] = NULL;
+ tx_desc = mvneta_txq_next_desc_get(txq);
+ tx_desc->data_size = hdr_len;
+ tx_desc->command = mvneta_skb_tx_csum(pp, skb);
+ tx_desc->command |= MVNETA_TXD_F_DESC;
+ tx_desc->buf_phys_addr = txq->tso_hdrs_phys +
+ txq->txq_put_index * TSO_HEADER_SIZE;
+ mvneta_txq_inc_put(txq);
+}
+
+static inline int
+mvneta_tso_put_data(struct net_device *dev, struct mvneta_tx_queue *txq,
+ struct sk_buff *skb, char *data, int size,
+ bool last_tcp, bool is_last)
+{
+ struct mvneta_tx_desc *tx_desc;
+
+ tx_desc = mvneta_txq_next_desc_get(txq);
+ tx_desc->data_size = size;
+ tx_desc->buf_phys_addr = dma_map_single(dev->dev.parent, data,
+ size, DMA_TO_DEVICE);
+ if (unlikely(dma_mapping_error(dev->dev.parent,
+ tx_desc->buf_phys_addr))) {
+ mvneta_txq_desc_put(txq);
+ return -ENOMEM;
+ }
+
+ tx_desc->command = 0;
+ txq->tx_skb[txq->txq_put_index] = NULL;
+
+ if (last_tcp) {
+ /* last descriptor in the TCP packet */
+ tx_desc->command = MVNETA_TXD_L_DESC;
+
+ /* last descriptor in SKB */
+ if (is_last)
+ txq->tx_skb[txq->txq_put_index] = skb;
+ }
+ mvneta_txq_inc_put(txq);
+ return 0;
+}
+
+static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev,
+ struct mvneta_tx_queue *txq)
+{
+ int total_len, data_left;
+ int desc_count = 0;
+ struct mvneta_port *pp = netdev_priv(dev);
+ struct tso_t tso;
+ int hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+ int i;
+
+ /* Count needed descriptors */
+ if ((txq->count + tso_count_descs(skb)) >= txq->size)
+ return 0;
+
+ if (skb_headlen(skb) < (skb_transport_offset(skb) + tcp_hdrlen(skb))) {
+ pr_info("*** Is this even possible???!?!?\n");
+ return 0;
+ }
+
+ /* Initialize the TSO handler, and prepare the first payload */
+ tso_start(skb, &tso);
+
+ total_len = skb->len - hdr_len;
+ while (total_len > 0) {
+ char *hdr;
+
+ data_left = min_t(int, skb_shinfo(skb)->gso_size, total_len);
+ total_len -= data_left;
+ desc_count++;
+
+ /* prepare packet headers: MAC + IP + TCP */
+ hdr = txq->tso_hdrs + txq->txq_put_index * TSO_HEADER_SIZE;
+ tso_build_hdr(skb, hdr, &tso, data_left, total_len == 0);
+
+ mvneta_tso_put_hdr(skb, pp, txq);
+
+ while (data_left > 0) {
+ int size;
+ desc_count++;
+
+ size = min_t(int, tso.size, data_left);
+
+ if (mvneta_tso_put_data(dev, txq, skb,
+ tso.data, size,
+ size == data_left,
+ total_len == 0))
+ goto err_release;
+ data_left -= size;
+
+ tso_build_data(skb, &tso, size);
+ }
+ }
+
+ return desc_count;
+
+err_release:
+ /* Release all used data descriptors; header descriptors must not
+ * be DMA-unmapped.
+ */
+ for (i = desc_count - 1; i >= 0; i--) {
+ struct mvneta_tx_desc *tx_desc = txq->descs + i;
+ if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
+ dma_unmap_single(pp->dev->dev.parent,
+ tx_desc->buf_phys_addr,
+ tx_desc->data_size,
+ DMA_TO_DEVICE);
+ mvneta_txq_desc_put(txq);
+ }
+ return 0;
+}
+
/* Handle tx fragmentation processing */
static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
struct mvneta_tx_queue *txq)
{
struct mvneta_tx_desc *tx_desc;
- int i;
+ int i, nr_frags = skb_shinfo(skb)->nr_frags;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ for (i = 0; i < nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
void *addr = page_address(frag->page.p) + frag->page_offset;
@@ -1543,20 +1682,16 @@ static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
goto error;
}
- if (i == (skb_shinfo(skb)->nr_frags - 1)) {
+ if (i == nr_frags - 1) {
/* Last descriptor */
tx_desc->command = MVNETA_TXD_L_DESC | MVNETA_TXD_Z_PAD;
-
txq->tx_skb[txq->txq_put_index] = skb;
-
- mvneta_txq_inc_put(txq);
} else {
/* Descriptor in the middle: Not First, Not Last */
tx_desc->command = 0;
-
txq->tx_skb[txq->txq_put_index] = NULL;
- mvneta_txq_inc_put(txq);
}
+ mvneta_txq_inc_put(txq);
}
return 0;
@@ -1584,15 +1719,18 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
u16 txq_id = skb_get_queue_mapping(skb);
struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
struct mvneta_tx_desc *tx_desc;
- struct netdev_queue *nq;
int frags = 0;
u32 tx_cmd;
if (!netif_running(dev))
goto out;
+ if (skb_is_gso(skb)) {
+ frags = mvneta_tx_tso(skb, dev, txq);
+ goto out;
+ }
+
frags = skb_shinfo(skb)->nr_frags + 1;
- nq = netdev_get_tx_queue(dev, txq_id);
/* Get a descriptor for the first part of the packet */
tx_desc = mvneta_txq_next_desc_get(txq);
@@ -1635,15 +1773,16 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
}
}
- txq->count += frags;
- mvneta_txq_pend_desc_add(pp, txq, frags);
-
- if (txq->size - txq->count < MAX_SKB_FRAGS + 1)
- netif_tx_stop_queue(nq);
-
out:
if (frags > 0) {
struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
+ struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
+
+ txq->count += frags;
+ mvneta_txq_pend_desc_add(pp, txq, frags);
+
+ if (txq->count >= txq->tx_stop_threshold)
+ netif_tx_stop_queue(nq);
u64_stats_update_begin(&stats->syncp);
stats->tx_packets++;
@@ -2003,7 +2142,7 @@ static void mvneta_tx_reset(struct mvneta_port *pp)
{
int queue;
- /* free the skb's in the hal tx ring */
+ /* free the skb's in the tx ring */
for (queue = 0; queue < txq_number; queue++)
mvneta_txq_done_force(pp, &pp->txqs[queue]);
@@ -2081,6 +2220,14 @@ static int mvneta_txq_init(struct mvneta_port *pp,
{
txq->size = pp->tx_ring_size;
+ /* A queue must always have room for at least one skb.
+ * Therefore, stop the queue when the free entries reaches
+ * the maximum number of descriptors per skb.
+ */
+ txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS;
+ txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
+
+
/* Allocate memory for TX descriptors */
txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
txq->size * MVNETA_DESC_ALIGNED_SIZE,
@@ -2109,6 +2256,18 @@ static int mvneta_txq_init(struct mvneta_port *pp,
txq->descs, txq->descs_phys);
return -ENOMEM;
}
+
+ /* Allocate DMA buffers for TSO MAC/IP/TCP headers */
+ txq->tso_hdrs = dma_alloc_coherent(pp->dev->dev.parent,
+ txq->size * TSO_HEADER_SIZE,
+ &txq->tso_hdrs_phys, GFP_KERNEL);
+ if (txq->tso_hdrs == NULL) {
+ kfree(txq->tx_skb);
+ dma_free_coherent(pp->dev->dev.parent,
+ txq->size * MVNETA_DESC_ALIGNED_SIZE,
+ txq->descs, txq->descs_phys);
+ return -ENOMEM;
+ }
mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
return 0;
@@ -2120,6 +2279,10 @@ static void mvneta_txq_deinit(struct mvneta_port *pp,
{
kfree(txq->tx_skb);
+ if (txq->tso_hdrs)
+ dma_free_coherent(pp->dev->dev.parent,
+ txq->size * TSO_HEADER_SIZE,
+ txq->tso_hdrs, txq->tso_hdrs_phys);
if (txq->descs)
dma_free_coherent(pp->dev->dev.parent,
txq->size * MVNETA_DESC_ALIGNED_SIZE,
@@ -2279,24 +2442,28 @@ static int mvneta_change_mtu(struct net_device *dev, int mtu)
return 0;
/* The interface is running, so we have to force a
- * reallocation of the RXQs
+ * reallocation of the queues
*/
mvneta_stop_dev(pp);
mvneta_cleanup_txqs(pp);
mvneta_cleanup_rxqs(pp);
- pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
+ pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu);
pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
ret = mvneta_setup_rxqs(pp);
if (ret) {
- netdev_err(pp->dev, "unable to setup rxqs after MTU change\n");
+ netdev_err(dev, "unable to setup rxqs after MTU change\n");
return ret;
}
- mvneta_setup_txqs(pp);
+ ret = mvneta_setup_txqs(pp);
+ if (ret) {
+ netdev_err(dev, "unable to setup txqs after MTU change\n");
+ return ret;
+ }
mvneta_start_dev(pp);
mvneta_port_up(pp);
@@ -2323,22 +2490,19 @@ static void mvneta_get_mac_addr(struct mvneta_port *pp, unsigned char *addr)
static int mvneta_set_mac_addr(struct net_device *dev, void *addr)
{
struct mvneta_port *pp = netdev_priv(dev);
- u8 *mac = addr + 2;
- int i;
-
- if (netif_running(dev))
- return -EBUSY;
+ struct sockaddr *sockaddr = addr;
+ int ret;
+ ret = eth_prepare_mac_addr_change(dev, addr);
+ if (ret < 0)
+ return ret;
/* Remove previous address table entry */
mvneta_mac_addr_set(pp, dev->dev_addr, -1);
/* Set new addr in hw */
- mvneta_mac_addr_set(pp, mac, rxq_def);
-
- /* Set addr in the device */
- for (i = 0; i < ETH_ALEN; i++)
- dev->dev_addr[i] = mac[i];
+ mvneta_mac_addr_set(pp, sockaddr->sa_data, rxq_def);
+ eth_commit_mac_addr_change(dev, addr);
return 0;
}
@@ -2433,8 +2597,6 @@ static int mvneta_open(struct net_device *dev)
struct mvneta_port *pp = netdev_priv(dev);
int ret;
- mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def);
-
pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
@@ -2600,8 +2762,12 @@ static int mvneta_ethtool_set_ringparam(struct net_device *dev,
return -EINVAL;
pp->rx_ring_size = ring->rx_pending < MVNETA_MAX_RXD ?
ring->rx_pending : MVNETA_MAX_RXD;
- pp->tx_ring_size = ring->tx_pending < MVNETA_MAX_TXD ?
- ring->tx_pending : MVNETA_MAX_TXD;
+
+ pp->tx_ring_size = clamp_t(u16, ring->tx_pending,
+ MVNETA_MAX_SKB_DESCS * 2, MVNETA_MAX_TXD);
+ if (pp->tx_ring_size != ring->tx_pending)
+ netdev_warn(dev, "TX queue size set to %u (requested %u)\n",
+ pp->tx_ring_size, ring->tx_pending);
if (netif_running(dev)) {
mvneta_stop(dev);
@@ -2638,7 +2804,7 @@ const struct ethtool_ops mvneta_eth_tool_ops = {
};
/* Initialize hw */
-static int mvneta_init(struct mvneta_port *pp, int phy_addr)
+static int mvneta_init(struct device *dev, struct mvneta_port *pp)
{
int queue;
@@ -2648,8 +2814,8 @@ static int mvneta_init(struct mvneta_port *pp, int phy_addr)
/* Set port default values */
mvneta_defaults_set(pp);
- pp->txqs = kzalloc(txq_number * sizeof(struct mvneta_tx_queue),
- GFP_KERNEL);
+ pp->txqs = devm_kcalloc(dev, txq_number, sizeof(struct mvneta_tx_queue),
+ GFP_KERNEL);
if (!pp->txqs)
return -ENOMEM;
@@ -2661,12 +2827,10 @@ static int mvneta_init(struct mvneta_port *pp, int phy_addr)
txq->done_pkts_coal = MVNETA_TXDONE_COAL_PKTS;
}
- pp->rxqs = kzalloc(rxq_number * sizeof(struct mvneta_rx_queue),
- GFP_KERNEL);
- if (!pp->rxqs) {
- kfree(pp->txqs);
+ pp->rxqs = devm_kcalloc(dev, rxq_number, sizeof(struct mvneta_rx_queue),
+ GFP_KERNEL);
+ if (!pp->rxqs)
return -ENOMEM;
- }
/* Create Rx descriptor rings */
for (queue = 0; queue < rxq_number; queue++) {
@@ -2680,12 +2844,6 @@ static int mvneta_init(struct mvneta_port *pp, int phy_addr)
return 0;
}
-static void mvneta_deinit(struct mvneta_port *pp)
-{
- kfree(pp->txqs);
- kfree(pp->rxqs);
-}
-
/* platform glue : initialize decoding windows */
static void mvneta_conf_mbus_windows(struct mvneta_port *pp,
const struct mbus_dram_target_info *dram)
@@ -2768,7 +2926,6 @@ static int mvneta_probe(struct platform_device *pdev)
struct resource *res;
struct device_node *dn = pdev->dev.of_node;
struct device_node *phy_node;
- u32 phy_addr;
struct mvneta_port *pp;
struct net_device *dev;
const char *dt_mac_addr;
@@ -2797,9 +2954,22 @@ static int mvneta_probe(struct platform_device *pdev)
phy_node = of_parse_phandle(dn, "phy", 0);
if (!phy_node) {
- dev_err(&pdev->dev, "no associated PHY\n");
- err = -ENODEV;
- goto err_free_irq;
+ if (!of_phy_is_fixed_link(dn)) {
+ dev_err(&pdev->dev, "no PHY specified\n");
+ err = -ENODEV;
+ goto err_free_irq;
+ }
+
+ err = of_phy_register_fixed_link(dn);
+ if (err < 0) {
+ dev_err(&pdev->dev, "cannot register fixed PHY\n");
+ goto err_free_irq;
+ }
+
+ /* In the case of a fixed PHY, the DT node associated
+ * to the PHY is the Ethernet MAC DT node.
+ */
+ phy_node = dn;
}
phy_mode = of_get_phy_mode(dn);
@@ -2813,11 +2983,9 @@ static int mvneta_probe(struct platform_device *pdev)
dev->watchdog_timeo = 5 * HZ;
dev->netdev_ops = &mvneta_netdev_ops;
- SET_ETHTOOL_OPS(dev, &mvneta_eth_tool_ops);
+ dev->ethtool_ops = &mvneta_eth_tool_ops;
pp = netdev_priv(dev);
-
- pp->weight = MVNETA_RX_POLL_WEIGHT;
pp->phy_node = phy_node;
pp->phy_interface = phy_mode;
@@ -2864,33 +3032,32 @@ static int mvneta_probe(struct platform_device *pdev)
pp->dev = dev;
SET_NETDEV_DEV(dev, &pdev->dev);
- err = mvneta_init(pp, phy_addr);
- if (err < 0) {
- dev_err(&pdev->dev, "can't init eth hal\n");
+ err = mvneta_init(&pdev->dev, pp);
+ if (err < 0)
goto err_free_stats;
- }
err = mvneta_port_power_up(pp, phy_mode);
if (err < 0) {
dev_err(&pdev->dev, "can't power up port\n");
- goto err_deinit;
+ goto err_free_stats;
}
dram_target_info = mv_mbus_dram_info();
if (dram_target_info)
mvneta_conf_mbus_windows(pp, dram_target_info);
- netif_napi_add(dev, &pp->napi, mvneta_poll, pp->weight);
+ netif_napi_add(dev, &pp->napi, mvneta_poll, NAPI_POLL_WEIGHT);
- dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
- dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM;
- dev->vlan_features |= NETIF_F_SG | NETIF_F_IP_CSUM;
+ dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
+ dev->hw_features |= dev->features;
+ dev->vlan_features |= dev->features;
dev->priv_flags |= IFF_UNICAST_FLT;
+ dev->gso_max_segs = MVNETA_MAX_TSO_SEGS;
err = register_netdev(dev);
if (err < 0) {
dev_err(&pdev->dev, "failed to register\n");
- goto err_deinit;
+ goto err_free_stats;
}
netdev_info(dev, "Using %s mac address %pM\n", mac_from,
@@ -2900,8 +3067,6 @@ static int mvneta_probe(struct platform_device *pdev)
return 0;
-err_deinit:
- mvneta_deinit(pp);
err_free_stats:
free_percpu(pp->stats);
err_clk:
@@ -2920,7 +3085,6 @@ static int mvneta_remove(struct platform_device *pdev)
struct mvneta_port *pp = netdev_priv(dev);
unregister_netdev(dev);
- mvneta_deinit(pp);
clk_disable_unprepare(pp->clk);
free_percpu(pp->stats);
irq_dispose_mapping(dev->irq);
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index b358c2f..8f5aa7c 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1488,7 +1488,7 @@ static int pxa168_eth_probe(struct platform_device *pdev)
dev->netdev_ops = &pxa168_eth_netdev_ops;
dev->watchdog_timeo = 2 * HZ;
dev->base_addr = 0;
- SET_ETHTOOL_OPS(dev, &pxa168_ethtool_ops);
+ dev->ethtool_ops = &pxa168_ethtool_ops;
INIT_WORK(&pep->tx_timeout_task, pxa168_eth_tx_timeout_task);
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index b811064..6969338 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4760,7 +4760,7 @@ static struct net_device *sky2_init_netdev(struct sky2_hw *hw, unsigned port,
SET_NETDEV_DEV(dev, &hw->pdev->dev);
dev->irq = hw->pdev->irq;
- SET_ETHTOOL_OPS(dev, &sky2_ethtool_ops);
+ dev->ethtool_ops = &sky2_ethtool_ops;
dev->watchdog_timeo = TX_WATCHDOG;
dev->netdev_ops = &sky2_netdev_ops[port];
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 29b6169..5d940a2 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -212,8 +212,7 @@ static int mlx4_comm_cmd_poll(struct mlx4_dev *dev, u8 cmd, u16 param,
/* First, verify that the master reports correct status */
if (comm_pending(dev)) {
- mlx4_warn(dev, "Communication channel is not idle."
- "my toggle is %d (cmd:0x%x)\n",
+ mlx4_warn(dev, "Communication channel is not idle - my toggle is %d (cmd:0x%x)\n",
priv->cmd.comm_toggle, cmd);
return -EAGAIN;
}
@@ -422,9 +421,8 @@ static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
*out_param =
be64_to_cpu(vhcr->out_param);
else {
- mlx4_err(dev, "response expected while"
- "output mailbox is NULL for "
- "command 0x%x\n", op);
+ mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
+ op);
vhcr->status = CMD_STAT_BAD_PARAM;
}
}
@@ -439,16 +437,15 @@ static int mlx4_slave_cmd(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
*out_param =
be64_to_cpu(vhcr->out_param);
else {
- mlx4_err(dev, "response expected while"
- "output mailbox is NULL for "
- "command 0x%x\n", op);
+ mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
+ op);
vhcr->status = CMD_STAT_BAD_PARAM;
}
}
ret = mlx4_status_to_errno(vhcr->status);
} else
- mlx4_err(dev, "failed execution of VHCR_POST command"
- "opcode 0x%x\n", op);
+ mlx4_err(dev, "failed execution of VHCR_POST command opcode 0x%x\n",
+ op);
}
mutex_unlock(&priv->cmd.slave_cmd_mutex);
@@ -476,6 +473,13 @@ static int mlx4_cmd_poll(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
goto out;
}
+ if (out_is_imm && !out_param) {
+ mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
+ op);
+ err = -EINVAL;
+ goto out;
+ }
+
err = mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
in_modifier, op_modifier, op, CMD_POLL_TOKEN, 0);
if (err)
@@ -554,6 +558,13 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
cmd->free_head = context->next;
spin_unlock(&cmd->context_lock);
+ if (out_is_imm && !out_param) {
+ mlx4_err(dev, "response expected while output mailbox is NULL for command 0x%x\n",
+ op);
+ err = -EINVAL;
+ goto out;
+ }
+
init_completion(&context->done);
mlx4_cmd_post(dev, in_param, out_param ? *out_param : 0,
@@ -625,9 +636,8 @@ static int mlx4_ACCESS_MEM(struct mlx4_dev *dev, u64 master_addr,
if ((slave_addr & 0xfff) | (master_addr & 0xfff) |
(slave & ~0x7f) | (size & 0xff)) {
- mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx "
- "master_addr:0x%llx slave_id:%d size:%d\n",
- slave_addr, master_addr, slave, size);
+ mlx4_err(dev, "Bad access mem params - slave_addr:0x%llx master_addr:0x%llx slave_id:%d size:%d\n",
+ slave_addr, master_addr, slave, size);
return -EINVAL;
}
@@ -1422,8 +1432,8 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
ALIGN(sizeof(struct mlx4_vhcr_cmd),
MLX4_ACCESS_MEM_ALIGN), 1);
if (ret) {
- mlx4_err(dev, "%s:Failed reading vhcr"
- "ret: 0x%x\n", __func__, ret);
+ mlx4_err(dev, "%s: Failed reading vhcr ret: 0x%x\n",
+ __func__, ret);
kfree(vhcr);
return ret;
}
@@ -1474,9 +1484,8 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
/* Apply permission and bound checks if applicable */
if (cmd->verify && cmd->verify(dev, slave, vhcr, inbox)) {
- mlx4_warn(dev, "Command:0x%x from slave: %d failed protection "
- "checks for resource_id:%d\n", vhcr->op, slave,
- vhcr->in_modifier);
+ mlx4_warn(dev, "Command:0x%x from slave: %d failed protection checks for resource_id:%d\n",
+ vhcr->op, slave, vhcr->in_modifier);
vhcr_cmd->status = CMD_STAT_BAD_OP;
goto out_status;
}
@@ -1515,8 +1524,7 @@ static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave,
}
if (err) {
- mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with"
- " error:%d, status %d\n",
+ mlx4_warn(dev, "vhcr command:0x%x slave:%d failed with error:%d, status %d\n",
vhcr->op, slave, vhcr->errno, err);
vhcr_cmd->status = mlx4_errno_to_status(err);
goto out_status;
@@ -1550,8 +1558,8 @@ out_status:
__func__);
else if (vhcr->e_bit &&
mlx4_GEN_EQE(dev, slave, &priv->mfunc.master.cmd_eqe))
- mlx4_warn(dev, "Failed to generate command completion "
- "eqe for slave %d\n", slave);
+ mlx4_warn(dev, "Failed to generate command completion eqe for slave %d\n",
+ slave);
}
out:
@@ -1590,8 +1598,9 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
mlx4_dbg(dev, "updating immediately admin params slave %d port %d\n",
slave, port);
- mlx4_dbg(dev, "vlan %d QoS %d link down %d\n", vp_admin->default_vlan,
- vp_admin->default_qos, vp_admin->link_state);
+ mlx4_dbg(dev, "vlan %d QoS %d link down %d\n",
+ vp_admin->default_vlan, vp_admin->default_qos,
+ vp_admin->link_state);
work = kzalloc(sizeof(*work), GFP_KERNEL);
if (!work)
@@ -1604,7 +1613,7 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
&admin_vlan_ix);
if (err) {
kfree(work);
- mlx4_warn((&priv->dev),
+ mlx4_warn(&priv->dev,
"No vlan resources slave %d, port %d\n",
slave, port);
return err;
@@ -1613,7 +1622,7 @@ static int mlx4_master_immediate_activate_vlan_qos(struct mlx4_priv *priv,
admin_vlan_ix = NO_INDX;
}
work->flags |= MLX4_VF_IMMED_VLAN_FLAG_VLAN;
- mlx4_dbg((&(priv->dev)),
+ mlx4_dbg(&priv->dev,
"alloc vlan %d idx %d slave %d port %d\n",
(int)(vp_admin->default_vlan),
admin_vlan_ix, slave, port);
@@ -1676,12 +1685,12 @@ static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
vp_admin->default_vlan, &(vp_oper->vlan_idx));
if (err) {
vp_oper->vlan_idx = NO_INDX;
- mlx4_warn((&priv->dev),
+ mlx4_warn(&priv->dev,
"No vlan resorces slave %d, port %d\n",
slave, port);
return err;
}
- mlx4_dbg((&(priv->dev)), "alloc vlan %d idx %d slave %d port %d\n",
+ mlx4_dbg(&priv->dev, "alloc vlan %d idx %d slave %d port %d\n",
(int)(vp_oper->state.default_vlan),
vp_oper->vlan_idx, slave, port);
}
@@ -1692,12 +1701,12 @@ static int mlx4_master_activate_admin_state(struct mlx4_priv *priv, int slave)
if (0 > vp_oper->mac_idx) {
err = vp_oper->mac_idx;
vp_oper->mac_idx = NO_INDX;
- mlx4_warn((&priv->dev),
+ mlx4_warn(&priv->dev,
"No mac resorces slave %d, port %d\n",
slave, port);
return err;
}
- mlx4_dbg((&(priv->dev)), "alloc mac %llx idx %d slave %d port %d\n",
+ mlx4_dbg(&priv->dev, "alloc mac %llx idx %d slave %d port %d\n",
vp_oper->state.mac, vp_oper->mac_idx, slave, port);
}
}
@@ -1748,8 +1757,8 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
slave_state[slave].comm_toggle ^= 1;
reply = (u32) slave_state[slave].comm_toggle << 31;
if (toggle != slave_state[slave].comm_toggle) {
- mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER"
- "STATE COMPROMISIED ***\n", toggle, slave);
+ mlx4_warn(dev, "Incorrect toggle %d from slave %d. *** MASTER STATE COMPROMISED ***\n",
+ toggle, slave);
goto reset_slave;
}
if (cmd == MLX4_COMM_CMD_RESET) {
@@ -1776,8 +1785,8 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
/*command from slave in the middle of FLR*/
if (cmd != MLX4_COMM_CMD_RESET &&
MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
- mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) "
- "in the middle of FLR\n", slave, cmd);
+ mlx4_warn(dev, "slave:%d is Trying to run cmd(0x%x) in the middle of FLR\n",
+ slave, cmd);
return;
}
@@ -1815,8 +1824,8 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
mutex_lock(&priv->cmd.slave_cmd_mutex);
if (mlx4_master_process_vhcr(dev, slave, NULL)) {
- mlx4_err(dev, "Failed processing vhcr for slave:%d,"
- " resetting slave.\n", slave);
+ mlx4_err(dev, "Failed processing vhcr for slave:%d, resetting slave\n",
+ slave);
mutex_unlock(&priv->cmd.slave_cmd_mutex);
goto reset_slave;
}
@@ -1833,8 +1842,7 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
is_going_down = 1;
spin_unlock_irqrestore(&priv->mfunc.master.slave_state_lock, flags);
if (is_going_down) {
- mlx4_warn(dev, "Slave is going down aborting command(%d)"
- " executing from slave:%d\n",
+ mlx4_warn(dev, "Slave is going down aborting command(%d) executing from slave:%d\n",
cmd, slave);
return;
}
@@ -1897,10 +1905,9 @@ void mlx4_master_comm_channel(struct work_struct *work)
if (toggle != slt) {
if (master->slave_state[slave].comm_toggle
!= slt) {
- printk(KERN_INFO "slave %d out of sync."
- " read toggle %d, state toggle %d. "
- "Resynching.\n", slave, slt,
- master->slave_state[slave].comm_toggle);
+ pr_info("slave %d out of sync. read toggle %d, state toggle %d. Resynching.\n",
+ slave, slt,
+ master->slave_state[slave].comm_toggle);
master->slave_state[slave].comm_toggle =
slt;
}
@@ -1913,8 +1920,7 @@ void mlx4_master_comm_channel(struct work_struct *work)
}
if (reported && reported != served)
- mlx4_warn(dev, "Got command event with bitmask from %d slaves"
- " but %d were served\n",
+ mlx4_warn(dev, "Got command event with bitmask from %d slaves but %d were served\n",
reported, served);
if (mlx4_ARM_COMM_CHANNEL(dev))
@@ -1970,7 +1976,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
ioremap(pci_resource_start(dev->pdev, 2) +
MLX4_SLAVE_COMM_BASE, MLX4_COMM_PAGESIZE);
if (!priv->mfunc.comm) {
- mlx4_err(dev, "Couldn't map communication vector.\n");
+ mlx4_err(dev, "Couldn't map communication vector\n");
goto err_vhcr;
}
@@ -2097,7 +2103,7 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
priv->cmd.hcr = ioremap(pci_resource_start(dev->pdev, 0) +
MLX4_HCR_BASE, MLX4_HCR_SIZE);
if (!priv->cmd.hcr) {
- mlx4_err(dev, "Couldn't map command register.\n");
+ mlx4_err(dev, "Couldn't map command register\n");
return -ENOMEM;
}
}
@@ -2498,11 +2504,12 @@ int mlx4_get_vf_config(struct mlx4_dev *dev, int port, int vf, struct ifla_vf_in
ivf->mac[4] = ((s_info->mac >> (1*8)) & 0xff);
ivf->mac[5] = ((s_info->mac) & 0xff);
- ivf->vlan = s_info->default_vlan;
- ivf->qos = s_info->default_qos;
- ivf->tx_rate = s_info->tx_rate;
- ivf->spoofchk = s_info->spoofchk;
- ivf->linkstate = s_info->link_state;
+ ivf->vlan = s_info->default_vlan;
+ ivf->qos = s_info->default_qos;
+ ivf->max_tx_rate = s_info->tx_rate;
+ ivf->min_tx_rate = 0;
+ ivf->spoofchk = s_info->spoofchk;
+ ivf->linkstate = s_info->link_state;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index c90cde5..80f7252 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -293,6 +293,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
atomic_set(&cq->refcount, 1);
init_completion(&cq->free);
+ cq->irq = priv->eq_table.eq[cq->vector].irq;
+ cq->irq_affinity_change = false;
+
return 0;
err_radix:
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index c2cd8d3..4b21307 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -125,8 +125,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
&cq->vector)) {
cq->vector = (cq->ring + 1 + priv->port)
% mdev->dev->caps.num_comp_vectors;
- mlx4_warn(mdev, "Failed Assigning an EQ to "
- "%s ,Falling back to legacy EQ's\n",
+ mlx4_warn(mdev, "Failed assigning an EQ to %s, falling back to legacy EQ's\n",
name);
}
}
@@ -164,6 +163,13 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq,
NAPI_POLL_WEIGHT);
} else {
+ struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring];
+
+ err = irq_set_affinity_hint(cq->mcq.irq,
+ ring->affinity_mask);
+ if (err)
+ mlx4_warn(mdev, "Failed setting affinity hint\n");
+
netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
napi_hash_add(&cq->napi);
}
@@ -180,8 +186,11 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
mlx4_en_unmap_buffer(&cq->wqres.buf);
mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
- if (priv->mdev->dev->caps.comp_pool && cq->vector)
+ if (priv->mdev->dev->caps.comp_pool && cq->vector) {
+ if (!cq->is_tx)
+ irq_set_affinity_hint(cq->mcq.irq, NULL);
mlx4_release_eq(priv->mdev->dev, cq->vector);
+ }
cq->vector = 0;
cq->buf_size = 0;
cq->buf = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 3e8d336..fa1a069 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -378,8 +378,8 @@ static int mlx4_en_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
ethtool_cmd_speed_set(cmd, priv->port_state.link_speed);
cmd->duplex = DUPLEX_FULL;
} else {
- ethtool_cmd_speed_set(cmd, -1);
- cmd->duplex = -1;
+ ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+ cmd->duplex = DUPLEX_UNKNOWN;
}
if (trans_type > 0 && trans_type <= 0xC) {
@@ -564,7 +564,7 @@ static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev)
return priv->rx_ring_num;
}
-static int mlx4_en_get_rxfh_indir(struct net_device *dev, u32 *ring_index)
+static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_rss_map *rss_map = &priv->rss_map;
@@ -582,8 +582,8 @@ static int mlx4_en_get_rxfh_indir(struct net_device *dev, u32 *ring_index)
return err;
}
-static int mlx4_en_set_rxfh_indir(struct net_device *dev,
- const u32 *ring_index)
+static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
+ const u8 *key)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
@@ -925,13 +925,13 @@ static int mlx4_en_flow_replace(struct net_device *dev,
qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1);
} else {
if (cmd->fs.ring_cookie >= priv->rx_ring_num) {
- en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist.\n",
+ en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist\n",
cmd->fs.ring_cookie);
return -EINVAL;
}
qpn = priv->rss_map.qps[cmd->fs.ring_cookie].qpn;
if (!qpn) {
- en_warn(priv, "rxnfc: RX ring (%llu) is inactive.\n",
+ en_warn(priv, "rxnfc: RX ring (%llu) is inactive\n",
cmd->fs.ring_cookie);
return -EINVAL;
}
@@ -956,7 +956,7 @@ static int mlx4_en_flow_replace(struct net_device *dev,
}
err = mlx4_flow_attach(priv->mdev->dev, &rule, &reg_id);
if (err) {
- en_err(priv, "Fail to attach network rule at location %d.\n",
+ en_err(priv, "Fail to attach network rule at location %d\n",
cmd->fs.location);
goto out_free_list;
}
@@ -1121,7 +1121,7 @@ static int mlx4_en_set_channels(struct net_device *dev,
{
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev;
- int port_up;
+ int port_up = 0;
int err = 0;
if (channel->other_count || channel->combined_count ||
@@ -1151,7 +1151,8 @@ static int mlx4_en_set_channels(struct net_device *dev,
netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
- mlx4_en_setup_tc(dev, MLX4_EN_NUM_UP);
+ if (dev->num_tc)
+ mlx4_en_setup_tc(dev, MLX4_EN_NUM_UP);
en_warn(priv, "Using %d TX rings\n", priv->tx_ring_num);
en_warn(priv, "Using %d RX rings\n", priv->rx_ring_num);
@@ -1223,8 +1224,8 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
.get_rxnfc = mlx4_en_get_rxnfc,
.set_rxnfc = mlx4_en_set_rxnfc,
.get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size,
- .get_rxfh_indir = mlx4_en_get_rxfh_indir,
- .set_rxfh_indir = mlx4_en_set_rxfh_indir,
+ .get_rxfh = mlx4_en_get_rxfh,
+ .set_rxfh = mlx4_en_set_rxfh,
.get_channels = mlx4_en_get_channels,
.set_channels = mlx4_en_set_channels,
.get_ts_info = mlx4_en_get_ts_info,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index 0c59d4f..f953c1d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -133,7 +133,7 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
MLX4_EN_MAX_TX_RING_P_UP);
if (params->udp_rss && !(mdev->dev->caps.flags
& MLX4_DEV_CAP_FLAG_UDP_RSS)) {
- mlx4_warn(mdev, "UDP RSS is not supported on this device.\n");
+ mlx4_warn(mdev, "UDP RSS is not supported on this device\n");
params->udp_rss = 0;
}
for (i = 1; i <= MLX4_MAX_PORTS; i++) {
@@ -251,8 +251,7 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
mdev->LSO_support = !!(dev->caps.flags & (1 << 15));
if (!mdev->LSO_support)
- mlx4_warn(mdev, "LSO not supported, please upgrade to later "
- "FW version to enable LSO\n");
+ mlx4_warn(mdev, "LSO not supported, please upgrade to later FW version to enable LSO\n");
if (mlx4_mr_alloc(mdev->dev, mdev->priv_pdn, 0, ~0ull,
MLX4_PERM_LOCAL_WRITE | MLX4_PERM_LOCAL_READ,
@@ -268,7 +267,7 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
/* Build device profile according to supplied module parameters */
err = mlx4_en_get_profile(mdev);
if (err) {
- mlx4_err(mdev, "Bad module parameters, aborting.\n");
+ mlx4_err(mdev, "Bad module parameters, aborting\n");
goto err_mr;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 7e4b172..7d4fb7b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -130,7 +130,7 @@ static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto)
case IPPROTO_TCP:
return MLX4_NET_TRANS_RULE_ID_TCP;
default:
- return -EPROTONOSUPPORT;
+ return MLX4_NET_TRANS_RULE_NUM;
}
};
@@ -177,7 +177,7 @@ static void mlx4_en_filter_work(struct work_struct *work)
int rc;
__be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16);
- if (spec_tcp_udp.id < 0) {
+ if (spec_tcp_udp.id >= MLX4_NET_TRANS_RULE_NUM) {
en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n",
filter->ip_proto);
goto ignore;
@@ -770,11 +770,12 @@ static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv)
priv->dev->dev_addr, priv->prev_mac);
if (err)
en_err(priv, "Failed changing HW MAC address\n");
- memcpy(priv->prev_mac, priv->dev->dev_addr,
- sizeof(priv->prev_mac));
} else
en_dbg(HW, priv, "Port is down while registering mac, exiting...\n");
+ memcpy(priv->prev_mac, priv->dev->dev_addr,
+ sizeof(priv->prev_mac));
+
return err;
}
@@ -788,9 +789,8 @@ static int mlx4_en_set_mac(struct net_device *dev, void *addr)
if (!is_valid_ether_addr(saddr->sa_data))
return -EADDRNOTAVAIL;
- memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
-
mutex_lock(&mdev->state_lock);
+ memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
err = mlx4_en_do_set_mac(priv);
mutex_unlock(&mdev->state_lock);
@@ -1526,6 +1526,27 @@ static void mlx4_en_linkstate(struct work_struct *work)
mutex_unlock(&mdev->state_lock);
}
+static int mlx4_en_init_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
+{
+ struct mlx4_en_rx_ring *ring = priv->rx_ring[ring_idx];
+ int numa_node = priv->mdev->dev->numa_node;
+ int ret = 0;
+
+ if (!zalloc_cpumask_var(&ring->affinity_mask, GFP_KERNEL))
+ return -ENOMEM;
+
+ ret = cpumask_set_cpu_local_first(ring_idx, numa_node,
+ ring->affinity_mask);
+ if (ret)
+ free_cpumask_var(ring->affinity_mask);
+
+ return ret;
+}
+
+static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
+{
+ free_cpumask_var(priv->rx_ring[ring_idx]->affinity_mask);
+}
int mlx4_en_start_port(struct net_device *dev)
{
@@ -1567,17 +1588,25 @@ int mlx4_en_start_port(struct net_device *dev)
mlx4_en_cq_init_lock(cq);
+ err = mlx4_en_init_affinity_hint(priv, i);
+ if (err) {
+ en_err(priv, "Failed preparing IRQ affinity hint\n");
+ goto cq_err;
+ }
+
err = mlx4_en_activate_cq(priv, cq, i);
if (err) {
en_err(priv, "Failed activating Rx CQ\n");
+ mlx4_en_free_affinity_hint(priv, i);
goto cq_err;
}
for (j = 0; j < cq->size; j++)
cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK;
err = mlx4_en_set_cq_moder(priv, cq);
if (err) {
- en_err(priv, "Failed setting cq moderation parameters");
+ en_err(priv, "Failed setting cq moderation parameters\n");
mlx4_en_deactivate_cq(priv, cq);
+ mlx4_en_free_affinity_hint(priv, i);
goto cq_err;
}
mlx4_en_arm_cq(priv, cq);
@@ -1615,7 +1644,7 @@ int mlx4_en_start_port(struct net_device *dev)
}
err = mlx4_en_set_cq_moder(priv, cq);
if (err) {
- en_err(priv, "Failed setting cq moderation parameters");
+ en_err(priv, "Failed setting cq moderation parameters\n");
mlx4_en_deactivate_cq(priv, cq);
goto tx_err;
}
@@ -1715,8 +1744,10 @@ rss_err:
mac_err:
mlx4_en_put_qp(priv);
cq_err:
- while (rx_index--)
+ while (rx_index--) {
mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]);
+ mlx4_en_free_affinity_hint(priv, i);
+ }
for (i = 0; i < priv->rx_ring_num; i++)
mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
@@ -1847,6 +1878,8 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
msleep(1);
mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
mlx4_en_deactivate_cq(priv, cq);
+
+ mlx4_en_free_affinity_hint(priv, i);
}
}
@@ -2539,7 +2572,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
netif_set_real_num_tx_queues(dev, priv->tx_ring_num);
netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
- SET_ETHTOOL_OPS(dev, &mlx4_en_ethtool_ops);
+ dev->ethtool_ops = &mlx4_en_ethtool_ops;
/*
* Set driver features
@@ -2594,8 +2627,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
prof->tx_pause, prof->tx_ppp,
prof->rx_pause, prof->rx_ppp);
if (err) {
- en_err(priv, "Failed setting port general configurations "
- "for port %d, with error %d\n", priv->port, err);
+ en_err(priv, "Failed setting port general configurations for port %d, with error %d\n",
+ priv->port, err);
goto out;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 87857a6..d2d4157 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -270,13 +270,11 @@ static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv)
ring->actual_size,
GFP_KERNEL)) {
if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) {
- en_err(priv, "Failed to allocate "
- "enough rx buffers\n");
+ en_err(priv, "Failed to allocate enough rx buffers\n");
return -ENOMEM;
} else {
new_size = rounddown_pow_of_two(ring->actual_size);
- en_warn(priv, "Only %d buffers allocated "
- "reducing ring size to %d",
+ en_warn(priv, "Only %d buffers allocated reducing ring size to %d\n",
ring->actual_size, new_size);
goto reduce_rings;
}
@@ -685,10 +683,9 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
/* Drop packet on bad receive or bad checksum */
if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) ==
MLX4_CQE_OPCODE_ERROR)) {
- en_err(priv, "CQE completed in error - vendor "
- "syndrom:%d syndrom:%d\n",
- ((struct mlx4_err_cqe *) cqe)->vendor_err_syndrome,
- ((struct mlx4_err_cqe *) cqe)->syndrome);
+ en_err(priv, "CQE completed in error - vendor syndrom:%d syndrom:%d\n",
+ ((struct mlx4_err_cqe *)cqe)->vendor_err_syndrome,
+ ((struct mlx4_err_cqe *)cqe)->syndrome);
goto next;
}
if (unlikely(cqe->badfcs_enc & MLX4_CQE_BAD_FCS)) {
@@ -898,10 +895,17 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
mlx4_en_cq_unlock_napi(cq);
/* If we used up all the quota - we're probably not done yet... */
- if (done == budget)
+ if (done == budget) {
INC_PERF_COUNTER(priv->pstats.napi_quota);
- else {
+ if (unlikely(cq->mcq.irq_affinity_change)) {
+ cq->mcq.irq_affinity_change = false;
+ napi_complete(napi);
+ mlx4_en_arm_cq(priv, cq);
+ return 0;
+ }
+ } else {
/* Done for now */
+ cq->mcq.irq_affinity_change = false;
napi_complete(napi);
mlx4_en_arm_cq(priv, cq);
}
@@ -944,8 +948,8 @@ void mlx4_en_calc_rx_buf(struct net_device *dev)
priv->rx_skb_size = eff_mtu;
priv->log_rx_info = ROUNDUP_LOG2(i * sizeof(struct mlx4_en_rx_alloc));
- en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d "
- "num_frags:%d):\n", eff_mtu, priv->num_frags);
+ en_dbg(DRV, priv, "Rx buffer scatter-list (effective-mtu:%d num_frags:%d):\n",
+ eff_mtu, priv->num_frags);
for (i = 0; i < priv->num_frags; i++) {
en_err(priv,
" frag:%d - size:%d prefix:%d align:%d stride:%d\n",
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index bc0cc1e..8be7483 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -108,9 +108,9 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
ring->buf = ring->wqres.buf.direct.buf;
- en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d "
- "buf_size:%d dma:%llx\n", ring, ring->buf, ring->size,
- ring->buf_size, (unsigned long long) ring->wqres.buf.direct.map);
+ en_dbg(DRV, priv, "Allocated TX ring (addr:%p) - buf:%p size:%d buf_size:%d dma:%llx\n",
+ ring, ring->buf, ring->size, ring->buf_size,
+ (unsigned long long) ring->wqres.buf.direct.map);
ring->qpn = qpn;
err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->qp, GFP_KERNEL);
@@ -122,7 +122,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
err = mlx4_bf_alloc(mdev->dev, &ring->bf, node);
if (err) {
- en_dbg(DRV, priv, "working without blueflame (%d)", err);
+ en_dbg(DRV, priv, "working without blueflame (%d)\n", err);
ring->bf.uar = &mdev->priv_uar;
ring->bf.uar->map = mdev->uar_map;
ring->bf_enabled = false;
@@ -474,9 +474,15 @@ int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget)
/* If we used up all the quota - we're probably not done yet... */
if (done < budget) {
/* Done for now */
+ cq->mcq.irq_affinity_change = false;
napi_complete(napi);
mlx4_en_arm_cq(priv, cq);
return done;
+ } else if (unlikely(cq->mcq.irq_affinity_change)) {
+ cq->mcq.irq_affinity_change = false;
+ napi_complete(napi);
+ mlx4_en_arm_cq(priv, cq);
+ return 0;
}
return budget;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index d501a2b..d954ec1 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -53,6 +53,11 @@ enum {
MLX4_EQ_ENTRY_SIZE = 0x20
};
+struct mlx4_irq_notify {
+ void *arg;
+ struct irq_affinity_notify notify;
+};
+
#define MLX4_EQ_STATUS_OK ( 0 << 28)
#define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28)
#define MLX4_EQ_OWNER_SW ( 0 << 24)
@@ -152,14 +157,13 @@ void mlx4_gen_slave_eqe(struct work_struct *work)
if (i != dev->caps.function &&
master->slave_state[i].active)
if (mlx4_GEN_EQE(dev, i, eqe))
- mlx4_warn(dev, "Failed to "
- " generate event "
- "for slave %d\n", i);
+ mlx4_warn(dev, "Failed to generate event for slave %d\n",
+ i);
}
} else {
if (mlx4_GEN_EQE(dev, slave, eqe))
- mlx4_warn(dev, "Failed to generate event "
- "for slave %d\n", slave);
+ mlx4_warn(dev, "Failed to generate event for slave %d\n",
+ slave);
}
++slave_eq->cons;
}
@@ -177,8 +181,8 @@ static void slave_event(struct mlx4_dev *dev, u8 slave, struct mlx4_eqe *eqe)
s_eqe = &slave_eq->event_eqe[slave_eq->prod & (SLAVE_EVENT_EQ_SIZE - 1)];
if ((!!(s_eqe->owner & 0x80)) ^
(!!(slave_eq->prod & SLAVE_EVENT_EQ_SIZE))) {
- mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. "
- "No free EQE on slave events queue\n", slave);
+ mlx4_warn(dev, "Master failed to generate an EQE for slave: %d. No free EQE on slave events queue\n",
+ slave);
spin_unlock_irqrestore(&slave_eq->event_lock, flags);
return;
}
@@ -375,9 +379,9 @@ int set_and_calc_slave_port_state(struct mlx4_dev *dev, int slave,
}
break;
default:
- pr_err("%s: BUG!!! UNKNOWN state: "
- "slave:%d, port:%d\n", __func__, slave, port);
- goto out;
+ pr_err("%s: BUG!!! UNKNOWN state: slave:%d, port:%d\n",
+ __func__, slave, port);
+ goto out;
}
ret = mlx4_get_slave_port_state(dev, slave, port);
@@ -425,8 +429,8 @@ void mlx4_master_handle_slave_flr(struct work_struct *work)
for (i = 0 ; i < dev->num_slaves; i++) {
if (MLX4_COMM_CMD_FLR == slave_state[i].last_cmd) {
- mlx4_dbg(dev, "mlx4_handle_slave_flr: "
- "clean slave: %d\n", i);
+ mlx4_dbg(dev, "mlx4_handle_slave_flr: clean slave: %d\n",
+ i);
mlx4_delete_all_resources_for_slave(dev, i);
/*return the slave to running mode*/
@@ -438,8 +442,8 @@ void mlx4_master_handle_slave_flr(struct work_struct *work)
err = mlx4_cmd(dev, 0, i, 0, MLX4_CMD_INFORM_FLR_DONE,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
if (err)
- mlx4_warn(dev, "Failed to notify FW on "
- "FLR done (slave:%d)\n", i);
+ mlx4_warn(dev, "Failed to notify FW on FLR done (slave:%d)\n",
+ i);
}
}
}
@@ -490,9 +494,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
be32_to_cpu(eqe->event.qp.qpn)
& 0xffffff, &slave);
if (ret && ret != -ENOENT) {
- mlx4_dbg(dev, "QP event %02x(%02x) on "
- "EQ %d at index %u: could "
- "not get slave id (%d)\n",
+ mlx4_dbg(dev, "QP event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n",
eqe->type, eqe->subtype,
eq->eqn, eq->cons_index, ret);
break;
@@ -520,23 +522,19 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
& 0xffffff,
&slave);
if (ret && ret != -ENOENT) {
- mlx4_warn(dev, "SRQ event %02x(%02x) "
- "on EQ %d at index %u: could"
- " not get slave id (%d)\n",
+ mlx4_warn(dev, "SRQ event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n",
eqe->type, eqe->subtype,
eq->eqn, eq->cons_index, ret);
break;
}
- mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x,"
- " event: %02x(%02x)\n", __func__,
- slave,
+ mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n",
+ __func__, slave,
be32_to_cpu(eqe->event.srq.srqn),
eqe->type, eqe->subtype);
if (!ret && slave != dev->caps.function) {
- mlx4_warn(dev, "%s: sending event "
- "%02x(%02x) to slave:%d\n",
- __func__, eqe->type,
+ mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n",
+ __func__, eqe->type,
eqe->subtype, slave);
mlx4_slave_event(dev, slave, eqe);
break;
@@ -569,8 +567,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) {
if (i == mlx4_master_func_num(dev))
continue;
- mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN"
- " to slave: %d, port:%d\n",
+ mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN to slave: %d, port:%d\n",
__func__, i, port);
s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state;
if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
@@ -634,11 +631,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
be32_to_cpu(eqe->event.cq_err.cqn)
& 0xffffff, &slave);
if (ret && ret != -ENOENT) {
- mlx4_dbg(dev, "CQ event %02x(%02x) on "
- "EQ %d at index %u: could "
- "not get slave id (%d)\n",
- eqe->type, eqe->subtype,
- eq->eqn, eq->cons_index, ret);
+ mlx4_dbg(dev, "CQ event %02x(%02x) on EQ %d at index %u: could not get slave id (%d)\n",
+ eqe->type, eqe->subtype,
+ eq->eqn, eq->cons_index, ret);
break;
}
@@ -667,8 +662,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
case MLX4_EVENT_TYPE_COMM_CHANNEL:
if (!mlx4_is_master(dev)) {
- mlx4_warn(dev, "Received comm channel event "
- "for non master device\n");
+ mlx4_warn(dev, "Received comm channel event for non master device\n");
break;
}
memcpy(&priv->mfunc.master.comm_arm_bit_vector,
@@ -681,8 +675,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
case MLX4_EVENT_TYPE_FLR_EVENT:
flr_slave = be32_to_cpu(eqe->event.flr_event.slave_id);
if (!mlx4_is_master(dev)) {
- mlx4_warn(dev, "Non-master function received"
- "FLR event\n");
+ mlx4_warn(dev, "Non-master function received FLR event\n");
break;
}
@@ -711,22 +704,17 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
if (eqe->subtype == MLX4_FATAL_WARNING_SUBTYPE_WARMING) {
if (mlx4_is_master(dev))
for (i = 0; i < dev->num_slaves; i++) {
- mlx4_dbg(dev, "%s: Sending "
- "MLX4_FATAL_WARNING_SUBTYPE_WARMING"
- " to slave: %d\n", __func__, i);
+ mlx4_dbg(dev, "%s: Sending MLX4_FATAL_WARNING_SUBTYPE_WARMING to slave: %d\n",
+ __func__, i);
if (i == dev->caps.function)
continue;
mlx4_slave_event(dev, i, eqe);
}
- mlx4_err(dev, "Temperature Threshold was reached! "
- "Threshold: %d celsius degrees; "
- "Current Temperature: %d\n",
- be16_to_cpu(eqe->event.warming.warning_threshold),
- be16_to_cpu(eqe->event.warming.current_temperature));
+ mlx4_err(dev, "Temperature Threshold was reached! Threshold: %d celsius degrees; Current Temperature: %d\n",
+ be16_to_cpu(eqe->event.warming.warning_threshold),
+ be16_to_cpu(eqe->event.warming.current_temperature));
} else
- mlx4_warn(dev, "Unhandled event FATAL WARNING (%02x), "
- "subtype %02x on EQ %d at index %u. owner=%x, "
- "nent=0x%x, slave=%x, ownership=%s\n",
+ mlx4_warn(dev, "Unhandled event FATAL WARNING (%02x), subtype %02x on EQ %d at index %u. owner=%x, nent=0x%x, slave=%x, ownership=%s\n",
eqe->type, eqe->subtype, eq->eqn,
eq->cons_index, eqe->owner, eq->nent,
eqe->slave_id,
@@ -743,9 +731,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
case MLX4_EVENT_TYPE_EEC_CATAS_ERROR:
case MLX4_EVENT_TYPE_ECC_DETECT:
default:
- mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at "
- "index %u. owner=%x, nent=0x%x, slave=%x, "
- "ownership=%s\n",
+ mlx4_warn(dev, "Unhandled event %02x(%02x) on EQ %d at index %u. owner=%x, nent=0x%x, slave=%x, ownership=%s\n",
eqe->type, eqe->subtype, eq->eqn,
eq->cons_index, eqe->owner, eq->nent,
eqe->slave_id,
@@ -1088,7 +1074,7 @@ static int mlx4_map_clr_int(struct mlx4_dev *dev)
priv->clr_base = ioremap(pci_resource_start(dev->pdev, priv->fw.clr_int_bar) +
priv->fw.clr_int_base, MLX4_CLR_INT_SIZE);
if (!priv->clr_base) {
- mlx4_err(dev, "Couldn't map interrupt clear register, aborting.\n");
+ mlx4_err(dev, "Couldn't map interrupt clear register, aborting\n");
return -ENOMEM;
}
@@ -1102,6 +1088,57 @@ static void mlx4_unmap_clr_int(struct mlx4_dev *dev)
iounmap(priv->clr_base);
}
+static void mlx4_irq_notifier_notify(struct irq_affinity_notify *notify,
+ const cpumask_t *mask)
+{
+ struct mlx4_irq_notify *n = container_of(notify,
+ struct mlx4_irq_notify,
+ notify);
+ struct mlx4_priv *priv = (struct mlx4_priv *)n->arg;
+ struct radix_tree_iter iter;
+ void **slot;
+
+ radix_tree_for_each_slot(slot, &priv->cq_table.tree, &iter, 0) {
+ struct mlx4_cq *cq = (struct mlx4_cq *)(*slot);
+
+ if (cq->irq == notify->irq)
+ cq->irq_affinity_change = true;
+ }
+}
+
+static void mlx4_release_irq_notifier(struct kref *ref)
+{
+ struct mlx4_irq_notify *n = container_of(ref, struct mlx4_irq_notify,
+ notify.kref);
+ kfree(n);
+}
+
+static void mlx4_assign_irq_notifier(struct mlx4_priv *priv,
+ struct mlx4_dev *dev, int irq)
+{
+ struct mlx4_irq_notify *irq_notifier = NULL;
+ int err = 0;
+
+ irq_notifier = kzalloc(sizeof(*irq_notifier), GFP_KERNEL);
+ if (!irq_notifier) {
+ mlx4_warn(dev, "Failed to allocate irq notifier. irq %d\n",
+ irq);
+ return;
+ }
+
+ irq_notifier->notify.irq = irq;
+ irq_notifier->notify.notify = mlx4_irq_notifier_notify;
+ irq_notifier->notify.release = mlx4_release_irq_notifier;
+ irq_notifier->arg = priv;
+ err = irq_set_affinity_notifier(irq, &irq_notifier->notify);
+ if (err) {
+ kfree(irq_notifier);
+ irq_notifier = NULL;
+ mlx4_warn(dev, "Failed to set irq notifier. irq %d\n", irq);
+ }
+}
+
+
int mlx4_alloc_eq_table(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
@@ -1372,6 +1409,9 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
continue;
/*we dont want to break here*/
}
+ mlx4_assign_irq_notifier(priv, dev,
+ priv->eq_table.eq[vec].irq);
+
eq_set_ci(&priv->eq_table.eq[vec], 1);
}
}
@@ -1398,6 +1438,9 @@ void mlx4_release_eq(struct mlx4_dev *dev, int vec)
Belonging to a legacy EQ*/
mutex_lock(&priv->msix_ctl.pool_lock);
if (priv->msix_ctl.pool_bm & 1ULL << i) {
+ irq_set_affinity_notifier(
+ priv->eq_table.eq[vec].irq,
+ NULL);
free_irq(priv->eq_table.eq[vec].irq,
&priv->eq_table.eq[vec]);
priv->msix_ctl.pool_bm &= ~(1ULL << i);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 01e6dd6..688e1ea 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -437,8 +437,7 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u32 gen_or_port,
} else if (dev->caps.port_type[gen_or_port] == MLX4_PORT_TYPE_IB) {
MLX4_GET(field, outbox, QUERY_FUNC_CAP_FLAGS0_OFFSET);
if (field & QUERY_FUNC_CAP_FLAGS0_FORCE_PHY_WQE_GID) {
- mlx4_err(dev, "phy_wqe_gid is "
- "enforced on this ib port\n");
+ mlx4_err(dev, "phy_wqe_gid is enforced on this ib port\n");
err = -EPROTONOSUPPORT;
goto out;
}
@@ -1070,10 +1069,10 @@ int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
*/
lg = ffs(mlx4_icm_addr(&iter) | mlx4_icm_size(&iter)) - 1;
if (lg < MLX4_ICM_PAGE_SHIFT) {
- mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx).\n",
- MLX4_ICM_PAGE_SIZE,
- (unsigned long long) mlx4_icm_addr(&iter),
- mlx4_icm_size(&iter));
+ mlx4_warn(dev, "Got FW area not aligned to %d (%llx/%lx)\n",
+ MLX4_ICM_PAGE_SIZE,
+ (unsigned long long) mlx4_icm_addr(&iter),
+ mlx4_icm_size(&iter));
err = -EINVAL;
goto out;
}
@@ -1109,14 +1108,14 @@ int mlx4_map_cmd(struct mlx4_dev *dev, u16 op, struct mlx4_icm *icm, u64 virt)
switch (op) {
case MLX4_CMD_MAP_FA:
- mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW.\n", tc, ts);
+ mlx4_dbg(dev, "Mapped %d chunks/%d KB for FW\n", tc, ts);
break;
case MLX4_CMD_MAP_ICM_AUX:
- mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux.\n", tc, ts);
+ mlx4_dbg(dev, "Mapped %d chunks/%d KB for ICM aux\n", tc, ts);
break;
case MLX4_CMD_MAP_ICM:
- mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM.\n",
- tc, ts, (unsigned long long) virt - (ts << 10));
+ mlx4_dbg(dev, "Mapped %d chunks/%d KB at %llx for ICM\n",
+ tc, ts, (unsigned long long) virt - (ts << 10));
break;
}
@@ -1202,14 +1201,13 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
MLX4_GET(cmd_if_rev, outbox, QUERY_FW_CMD_IF_REV_OFFSET);
if (cmd_if_rev < MLX4_COMMAND_INTERFACE_MIN_REV ||
cmd_if_rev > MLX4_COMMAND_INTERFACE_MAX_REV) {
- mlx4_err(dev, "Installed FW has unsupported "
- "command interface revision %d.\n",
+ mlx4_err(dev, "Installed FW has unsupported command interface revision %d\n",
cmd_if_rev);
mlx4_err(dev, "(Installed FW version is %d.%d.%03d)\n",
(int) (dev->caps.fw_ver >> 32),
(int) (dev->caps.fw_ver >> 16) & 0xffff,
(int) dev->caps.fw_ver & 0xffff);
- mlx4_err(dev, "This driver version supports only revisions %d to %d.\n",
+ mlx4_err(dev, "This driver version supports only revisions %d to %d\n",
MLX4_COMMAND_INTERFACE_MIN_REV, MLX4_COMMAND_INTERFACE_MAX_REV);
err = -ENODEV;
goto out;
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 26169b3..5f42f6d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -104,8 +104,6 @@ module_param(enable_64b_cqe_eqe, bool, 0444);
MODULE_PARM_DESC(enable_64b_cqe_eqe,
"Enable 64 byte CQEs/EQEs when the FW supports this (default: True)");
-#define HCA_GLOBAL_CAP_MASK 0
-
#define PF_CONTEXT_BEHAVIOUR_MASK MLX4_FUNC_CAP_64B_EQE_CQE
static char mlx4_version[] =
@@ -134,8 +132,7 @@ MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)");
static bool use_prio;
module_param_named(use_prio, use_prio, bool, 0444);
-MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
- "(0/1, default 0)");
+MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports (deprecated)");
int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
@@ -163,8 +160,7 @@ int mlx4_check_port_params(struct mlx4_dev *dev,
for (i = 0; i < dev->caps.num_ports - 1; i++) {
if (port_type[i] != port_type[i + 1]) {
if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
- mlx4_err(dev, "Only same port types supported "
- "on this HCA, aborting.\n");
+ mlx4_err(dev, "Only same port types supported on this HCA, aborting\n");
return -EINVAL;
}
}
@@ -172,8 +168,8 @@ int mlx4_check_port_params(struct mlx4_dev *dev,
for (i = 0; i < dev->caps.num_ports; i++) {
if (!(port_type[i] & dev->caps.supported_type[i+1])) {
- mlx4_err(dev, "Requested port type for port %d is not "
- "supported on this HCA\n", i + 1);
+ mlx4_err(dev, "Requested port type for port %d is not supported on this HCA\n",
+ i + 1);
return -EINVAL;
}
}
@@ -195,26 +191,23 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
if (err) {
- mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
+ mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
return err;
}
if (dev_cap->min_page_sz > PAGE_SIZE) {
- mlx4_err(dev, "HCA minimum page size of %d bigger than "
- "kernel PAGE_SIZE of %ld, aborting.\n",
+ mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
dev_cap->min_page_sz, PAGE_SIZE);
return -ENODEV;
}
if (dev_cap->num_ports > MLX4_MAX_PORTS) {
- mlx4_err(dev, "HCA has %d ports, but we only support %d, "
- "aborting.\n",
+ mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n",
dev_cap->num_ports, MLX4_MAX_PORTS);
return -ENODEV;
}
if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) {
- mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than "
- "PCI resource 2 size of 0x%llx, aborting.\n",
+ mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
dev_cap->uar_size,
(unsigned long long) pci_resource_len(dev->pdev, 2));
return -ENODEV;
@@ -296,7 +289,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.log_num_macs = log_num_mac;
dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS;
- dev->caps.log_num_prios = use_prio ? 3 : 0;
for (i = 1; i <= dev->caps.num_ports; ++i) {
dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE;
@@ -347,14 +339,12 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) {
dev->caps.log_num_macs = dev_cap->log_max_macs[i];
- mlx4_warn(dev, "Requested number of MACs is too much "
- "for port %d, reducing to %d.\n",
+ mlx4_warn(dev, "Requested number of MACs is too much for port %d, reducing to %d\n",
i, 1 << dev->caps.log_num_macs);
}
if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) {
dev->caps.log_num_vlans = dev_cap->log_max_vlans[i];
- mlx4_warn(dev, "Requested number of VLANs is too much "
- "for port %d, reducing to %d.\n",
+ mlx4_warn(dev, "Requested number of VLANs is too much for port %d, reducing to %d\n",
i, 1 << dev->caps.log_num_vlans);
}
}
@@ -366,7 +356,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] =
(1 << dev->caps.log_num_macs) *
(1 << dev->caps.log_num_vlans) *
- (1 << dev->caps.log_num_prios) *
dev->caps.num_ports;
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH;
@@ -584,13 +573,14 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
memset(&hca_param, 0, sizeof(hca_param));
err = mlx4_QUERY_HCA(dev, &hca_param);
if (err) {
- mlx4_err(dev, "QUERY_HCA command failed, aborting.\n");
+ mlx4_err(dev, "QUERY_HCA command failed, aborting\n");
return err;
}
- /*fail if the hca has an unknown capability */
- if ((hca_param.global_caps | HCA_GLOBAL_CAP_MASK) !=
- HCA_GLOBAL_CAP_MASK) {
+ /* fail if the hca has an unknown global capability
+ * at this time global_caps should be always zeroed
+ */
+ if (hca_param.global_caps) {
mlx4_err(dev, "Unknown hca global capabilities\n");
return -ENOSYS;
}
@@ -603,19 +593,18 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp;
err = mlx4_dev_cap(dev, &dev_cap);
if (err) {
- mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
+ mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
return err;
}
err = mlx4_QUERY_FW(dev);
if (err)
- mlx4_err(dev, "QUERY_FW command failed: could not get FW version.\n");
+ mlx4_err(dev, "QUERY_FW command failed: could not get FW version\n");
page_size = ~dev->caps.page_size_cap + 1;
mlx4_warn(dev, "HCA minimum page size:%d\n", page_size);
if (page_size > PAGE_SIZE) {
- mlx4_err(dev, "HCA minimum page size of %d bigger than "
- "kernel PAGE_SIZE of %ld, aborting.\n",
+ mlx4_err(dev, "HCA minimum page size of %d bigger than kernel PAGE_SIZE of %ld, aborting\n",
page_size, PAGE_SIZE);
return -ENODEV;
}
@@ -633,8 +622,8 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
memset(&func_cap, 0, sizeof(func_cap));
err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap);
if (err) {
- mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d).\n",
- err);
+ mlx4_err(dev, "QUERY_FUNC_CAP general command failed, aborting (%d)\n",
+ err);
return err;
}
@@ -661,8 +650,8 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
dev->caps.num_amgms = 0;
if (dev->caps.num_ports > MLX4_MAX_PORTS) {
- mlx4_err(dev, "HCA has %d ports, but we only support %d, "
- "aborting.\n", dev->caps.num_ports, MLX4_MAX_PORTS);
+ mlx4_err(dev, "HCA has %d ports, but we only support %d, aborting\n",
+ dev->caps.num_ports, MLX4_MAX_PORTS);
return -ENODEV;
}
@@ -682,8 +671,8 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
for (i = 1; i <= dev->caps.num_ports; ++i) {
err = mlx4_QUERY_FUNC_CAP(dev, (u32) i, &func_cap);
if (err) {
- mlx4_err(dev, "QUERY_FUNC_CAP port command failed for"
- " port %d, aborting (%d).\n", i, err);
+ mlx4_err(dev, "QUERY_FUNC_CAP port command failed for port %d, aborting (%d)\n",
+ i, err);
goto err_mem;
}
dev->caps.qp0_qkey[i - 1] = func_cap.qp0_qkey;
@@ -702,8 +691,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
if (dev->caps.uar_page_size * (dev->caps.num_uars -
dev->caps.reserved_uars) >
pci_resource_len(dev->pdev, 2)) {
- mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than "
- "PCI resource 2 size of 0x%llx, aborting.\n",
+ mlx4_err(dev, "HCA reported UAR region size of 0x%x bigger than PCI resource 2 size of 0x%llx, aborting\n",
dev->caps.uar_page_size * dev->caps.num_uars,
(unsigned long long) pci_resource_len(dev->pdev, 2));
goto err_mem;
@@ -725,7 +713,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
}
dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
- mlx4_warn(dev, "Timestamping is not supported in slave mode.\n");
+ mlx4_warn(dev, "Timestamping is not supported in slave mode\n");
slave_adjust_steering_mode(dev, &dev_cap, &hca_param);
@@ -791,8 +779,8 @@ int mlx4_change_port_types(struct mlx4_dev *dev,
dev->caps.port_type[port] = port_types[port - 1];
err = mlx4_SET_PORT(dev, port, -1);
if (err) {
- mlx4_err(dev, "Failed to set port %d, "
- "aborting\n", port);
+ mlx4_err(dev, "Failed to set port %d, aborting\n",
+ port);
goto out;
}
}
@@ -875,9 +863,7 @@ static ssize_t set_port_type(struct device *dev,
}
}
if (err) {
- mlx4_err(mdev, "Auto sensing is not supported on this HCA. "
- "Set only 'eth' or 'ib' for both ports "
- "(should be the same)\n");
+ mlx4_err(mdev, "Auto sensing is not supported on this HCA. Set only 'eth' or 'ib' for both ports (should be the same)\n");
goto out;
}
@@ -982,8 +968,8 @@ static ssize_t set_port_ib_mtu(struct device *dev,
mlx4_CLOSE_PORT(mdev, port);
err = mlx4_SET_PORT(mdev, port, -1);
if (err) {
- mlx4_err(mdev, "Failed to set port %d, "
- "aborting\n", port);
+ mlx4_err(mdev, "Failed to set port %d, aborting\n",
+ port);
goto err_set_port;
}
}
@@ -1002,19 +988,19 @@ static int mlx4_load_fw(struct mlx4_dev *dev)
priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages,
GFP_HIGHUSER | __GFP_NOWARN, 0);
if (!priv->fw.fw_icm) {
- mlx4_err(dev, "Couldn't allocate FW area, aborting.\n");
+ mlx4_err(dev, "Couldn't allocate FW area, aborting\n");
return -ENOMEM;
}
err = mlx4_MAP_FA(dev, priv->fw.fw_icm);
if (err) {
- mlx4_err(dev, "MAP_FA command failed, aborting.\n");
+ mlx4_err(dev, "MAP_FA command failed, aborting\n");
goto err_free;
}
err = mlx4_RUN_FW(dev);
if (err) {
- mlx4_err(dev, "RUN_FW command failed, aborting.\n");
+ mlx4_err(dev, "RUN_FW command failed, aborting\n");
goto err_unmap_fa;
}
@@ -1098,30 +1084,30 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages);
if (err) {
- mlx4_err(dev, "SET_ICM_SIZE command failed, aborting.\n");
+ mlx4_err(dev, "SET_ICM_SIZE command failed, aborting\n");
return err;
}
- mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory.\n",
+ mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory\n",
(unsigned long long) icm_size >> 10,
(unsigned long long) aux_pages << 2);
priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages,
GFP_HIGHUSER | __GFP_NOWARN, 0);
if (!priv->fw.aux_icm) {
- mlx4_err(dev, "Couldn't allocate aux memory, aborting.\n");
+ mlx4_err(dev, "Couldn't allocate aux memory, aborting\n");
return -ENOMEM;
}
err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm);
if (err) {
- mlx4_err(dev, "MAP_ICM_AUX command failed, aborting.\n");
+ mlx4_err(dev, "MAP_ICM_AUX command failed, aborting\n");
goto err_free_aux;
}
err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz);
if (err) {
- mlx4_err(dev, "Failed to map cMPT context memory, aborting.\n");
+ mlx4_err(dev, "Failed to map cMPT context memory, aborting\n");
goto err_unmap_aux;
}
@@ -1132,7 +1118,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
init_hca->eqc_base, dev_cap->eqc_entry_sz,
num_eqs, num_eqs, 0, 0);
if (err) {
- mlx4_err(dev, "Failed to map EQ context memory, aborting.\n");
+ mlx4_err(dev, "Failed to map EQ context memory, aborting\n");
goto err_unmap_cmpt;
}
@@ -1153,7 +1139,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
dev->caps.num_mtts,
dev->caps.reserved_mtts, 1, 0);
if (err) {
- mlx4_err(dev, "Failed to map MTT context memory, aborting.\n");
+ mlx4_err(dev, "Failed to map MTT context memory, aborting\n");
goto err_unmap_eq;
}
@@ -1163,7 +1149,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
dev->caps.num_mpts,
dev->caps.reserved_mrws, 1, 1);
if (err) {
- mlx4_err(dev, "Failed to map dMPT context memory, aborting.\n");
+ mlx4_err(dev, "Failed to map dMPT context memory, aborting\n");
goto err_unmap_mtt;
}
@@ -1174,7 +1160,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
0, 0);
if (err) {
- mlx4_err(dev, "Failed to map QP context memory, aborting.\n");
+ mlx4_err(dev, "Failed to map QP context memory, aborting\n");
goto err_unmap_dmpt;
}
@@ -1185,7 +1171,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
0, 0);
if (err) {
- mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n");
+ mlx4_err(dev, "Failed to map AUXC context memory, aborting\n");
goto err_unmap_qp;
}
@@ -1196,7 +1182,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
0, 0);
if (err) {
- mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n");
+ mlx4_err(dev, "Failed to map ALTC context memory, aborting\n");
goto err_unmap_auxc;
}
@@ -1217,7 +1203,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
dev->caps.num_cqs,
dev->caps.reserved_cqs, 0, 0);
if (err) {
- mlx4_err(dev, "Failed to map CQ context memory, aborting.\n");
+ mlx4_err(dev, "Failed to map CQ context memory, aborting\n");
goto err_unmap_rdmarc;
}
@@ -1227,7 +1213,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
dev->caps.num_srqs,
dev->caps.reserved_srqs, 0, 0);
if (err) {
- mlx4_err(dev, "Failed to map SRQ context memory, aborting.\n");
+ mlx4_err(dev, "Failed to map SRQ context memory, aborting\n");
goto err_unmap_cq;
}
@@ -1245,7 +1231,7 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
dev->caps.num_mgms + dev->caps.num_amgms,
0, 0);
if (err) {
- mlx4_err(dev, "Failed to map MCG context memory, aborting.\n");
+ mlx4_err(dev, "Failed to map MCG context memory, aborting\n");
goto err_unmap_srq;
}
@@ -1322,7 +1308,7 @@ static void mlx4_slave_exit(struct mlx4_dev *dev)
mutex_lock(&priv->cmd.slave_cmd_mutex);
if (mlx4_comm_cmd(dev, MLX4_COMM_CMD_RESET, 0, MLX4_COMM_TIME))
- mlx4_warn(dev, "Failed to close slave function.\n");
+ mlx4_warn(dev, "Failed to close slave function\n");
mutex_unlock(&priv->cmd.slave_cmd_mutex);
}
@@ -1420,7 +1406,7 @@ static int mlx4_init_slave(struct mlx4_dev *dev)
u32 cmd_channel_ver;
if (atomic_read(&pf_loading)) {
- mlx4_warn(dev, "PF is not ready. Deferring probe\n");
+ mlx4_warn(dev, "PF is not ready - Deferring probe\n");
return -EPROBE_DEFER;
}
@@ -1433,8 +1419,7 @@ static int mlx4_init_slave(struct mlx4_dev *dev)
* NUM_OF_RESET_RETRIES times before leaving.*/
if (ret_from_reset) {
if (MLX4_DELAY_RESET_SLAVE == ret_from_reset) {
- mlx4_warn(dev, "slave is currently in the "
- "middle of FLR. Deferring probe.\n");
+ mlx4_warn(dev, "slave is currently in the middle of FLR - Deferring probe\n");
mutex_unlock(&priv->cmd.slave_cmd_mutex);
return -EPROBE_DEFER;
} else
@@ -1448,8 +1433,7 @@ static int mlx4_init_slave(struct mlx4_dev *dev)
if (MLX4_COMM_GET_IF_REV(cmd_channel_ver) !=
MLX4_COMM_GET_IF_REV(slave_read)) {
- mlx4_err(dev, "slave driver version is not supported"
- " by the master\n");
+ mlx4_err(dev, "slave driver version is not supported by the master\n");
goto err;
}
@@ -1527,8 +1511,7 @@ static void choose_steering_mode(struct mlx4_dev *dev,
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER ||
dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)
- mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags "
- "set to use B0 steering. Falling back to A0 steering mode.\n");
+ mlx4_warn(dev, "Must have both UC_STEER and MC_STEER flags set to use B0 steering - falling back to A0 steering mode\n");
}
dev->oper_log_mgm_entry_size =
mlx4_log_num_mgm_entry_size > 0 ?
@@ -1536,8 +1519,7 @@ static void choose_steering_mode(struct mlx4_dev *dev,
MLX4_DEFAULT_MGM_LOG_ENTRY_SIZE;
dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev);
}
- mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, "
- "modparam log_num_mgm_entry_size = %d\n",
+ mlx4_dbg(dev, "Steering mode is: %s, oper_log_mgm_entry_size = %d, modparam log_num_mgm_entry_size = %d\n",
mlx4_steering_mode_str(dev->caps.steering_mode),
dev->oper_log_mgm_entry_size,
mlx4_log_num_mgm_entry_size);
@@ -1571,15 +1553,15 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
err = mlx4_QUERY_FW(dev);
if (err) {
if (err == -EACCES)
- mlx4_info(dev, "non-primary physical function, skipping.\n");
+ mlx4_info(dev, "non-primary physical function, skipping\n");
else
- mlx4_err(dev, "QUERY_FW command failed, aborting.\n");
+ mlx4_err(dev, "QUERY_FW command failed, aborting\n");
return err;
}
err = mlx4_load_fw(dev);
if (err) {
- mlx4_err(dev, "Failed to start FW, aborting.\n");
+ mlx4_err(dev, "Failed to start FW, aborting\n");
return err;
}
@@ -1591,7 +1573,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
err = mlx4_dev_cap(dev, &dev_cap);
if (err) {
- mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
+ mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
goto err_stop_fw;
}
@@ -1632,7 +1614,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
err = mlx4_INIT_HCA(dev, &init_hca);
if (err) {
- mlx4_err(dev, "INIT_HCA command failed, aborting.\n");
+ mlx4_err(dev, "INIT_HCA command failed, aborting\n");
goto err_free_icm;
}
/*
@@ -1643,7 +1625,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
memset(&init_hca, 0, sizeof(init_hca));
err = mlx4_QUERY_HCA(dev, &init_hca);
if (err) {
- mlx4_err(dev, "QUERY_HCA command failed, disable timestamp.\n");
+ mlx4_err(dev, "QUERY_HCA command failed, disable timestamp\n");
dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
} else {
dev->caps.hca_core_clock =
@@ -1656,14 +1638,14 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
if (!dev->caps.hca_core_clock) {
dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
mlx4_err(dev,
- "HCA frequency is 0. Timestamping is not supported.");
+ "HCA frequency is 0 - timestamping is not supported\n");
} else if (map_internal_clock(dev)) {
/*
* Map internal clock,
* in case of failure disable timestamping
*/
dev->caps.flags2 &= ~MLX4_DEV_CAP_FLAG2_TS;
- mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported.\n");
+ mlx4_err(dev, "Failed to map internal clock. Timestamping is not supported\n");
}
}
} else {
@@ -1690,7 +1672,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
err = mlx4_QUERY_ADAPTER(dev, &adapter);
if (err) {
- mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n");
+ mlx4_err(dev, "QUERY_ADAPTER command failed, aborting\n");
goto unmap_bf;
}
@@ -1808,79 +1790,69 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
err = mlx4_init_uar_table(dev);
if (err) {
- mlx4_err(dev, "Failed to initialize "
- "user access region table, aborting.\n");
- return err;
+ mlx4_err(dev, "Failed to initialize user access region table, aborting\n");
+ return err;
}
err = mlx4_uar_alloc(dev, &priv->driver_uar);
if (err) {
- mlx4_err(dev, "Failed to allocate driver access region, "
- "aborting.\n");
+ mlx4_err(dev, "Failed to allocate driver access region, aborting\n");
goto err_uar_table_free;
}
priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
if (!priv->kar) {
- mlx4_err(dev, "Couldn't map kernel access region, "
- "aborting.\n");
+ mlx4_err(dev, "Couldn't map kernel access region, aborting\n");
err = -ENOMEM;
goto err_uar_free;
}
err = mlx4_init_pd_table(dev);
if (err) {
- mlx4_err(dev, "Failed to initialize "
- "protection domain table, aborting.\n");
+ mlx4_err(dev, "Failed to initialize protection domain table, aborting\n");
goto err_kar_unmap;
}
err = mlx4_init_xrcd_table(dev);
if (err) {
- mlx4_err(dev, "Failed to initialize "
- "reliable connection domain table, aborting.\n");
+ mlx4_err(dev, "Failed to initialize reliable connection domain table, aborting\n");
goto err_pd_table_free;
}
err = mlx4_init_mr_table(dev);
if (err) {
- mlx4_err(dev, "Failed to initialize "
- "memory region table, aborting.\n");
+ mlx4_err(dev, "Failed to initialize memory region table, aborting\n");
goto err_xrcd_table_free;
}
if (!mlx4_is_slave(dev)) {
err = mlx4_init_mcg_table(dev);
if (err) {
- mlx4_err(dev, "Failed to initialize multicast group table, aborting.\n");
+ mlx4_err(dev, "Failed to initialize multicast group table, aborting\n");
goto err_mr_table_free;
}
}
err = mlx4_init_eq_table(dev);
if (err) {
- mlx4_err(dev, "Failed to initialize "
- "event queue table, aborting.\n");
+ mlx4_err(dev, "Failed to initialize event queue table, aborting\n");
goto err_mcg_table_free;
}
err = mlx4_cmd_use_events(dev);
if (err) {
- mlx4_err(dev, "Failed to switch to event-driven "
- "firmware commands, aborting.\n");
+ mlx4_err(dev, "Failed to switch to event-driven firmware commands, aborting\n");
goto err_eq_table_free;
}
err = mlx4_NOP(dev);
if (err) {
if (dev->flags & MLX4_FLAG_MSI_X) {
- mlx4_warn(dev, "NOP command failed to generate MSI-X "
- "interrupt IRQ %d).\n",
+ mlx4_warn(dev, "NOP command failed to generate MSI-X interrupt IRQ %d)\n",
priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
- mlx4_warn(dev, "Trying again without MSI-X.\n");
+ mlx4_warn(dev, "Trying again without MSI-X\n");
} else {
- mlx4_err(dev, "NOP command failed to generate interrupt "
- "(IRQ %d), aborting.\n",
+ mlx4_err(dev, "NOP command failed to generate interrupt (IRQ %d), aborting\n",
priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
}
@@ -1892,28 +1864,25 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
err = mlx4_init_cq_table(dev);
if (err) {
- mlx4_err(dev, "Failed to initialize "
- "completion queue table, aborting.\n");
+ mlx4_err(dev, "Failed to initialize completion queue table, aborting\n");
goto err_cmd_poll;
}
err = mlx4_init_srq_table(dev);
if (err) {
- mlx4_err(dev, "Failed to initialize "
- "shared receive queue table, aborting.\n");
+ mlx4_err(dev, "Failed to initialize shared receive queue table, aborting\n");
goto err_cq_table_free;
}
err = mlx4_init_qp_table(dev);
if (err) {
- mlx4_err(dev, "Failed to initialize "
- "queue pair table, aborting.\n");
+ mlx4_err(dev, "Failed to initialize queue pair table, aborting\n");
goto err_srq_table_free;
}
err = mlx4_init_counters_table(dev);
if (err && err != -ENOENT) {
- mlx4_err(dev, "Failed to initialize counters table, aborting.\n");
+ mlx4_err(dev, "Failed to initialize counters table, aborting\n");
goto err_qp_table_free;
}
@@ -1923,9 +1892,8 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
err = mlx4_get_port_ib_caps(dev, port,
&ib_port_default_caps);
if (err)
- mlx4_warn(dev, "failed to get port %d default "
- "ib capabilities (%d). Continuing "
- "with caps = 0\n", port, err);
+ mlx4_warn(dev, "failed to get port %d default ib capabilities (%d). Continuing with caps = 0\n",
+ port, err);
dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
/* initialize per-slave default ib port capabilities */
@@ -1935,7 +1903,7 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
if (i == mlx4_master_func_num(dev))
continue;
priv->mfunc.master.slave_state[i].ib_cap_mask[port] =
- ib_port_default_caps;
+ ib_port_default_caps;
}
}
@@ -1948,7 +1916,7 @@ static int mlx4_setup_hca(struct mlx4_dev *dev)
dev->caps.pkey_table_len[port] : -1);
if (err) {
mlx4_err(dev, "Failed to set port %d, aborting\n",
- port);
+ port);
goto err_counters_table_free;
}
}
@@ -2024,7 +1992,7 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
kfree(entries);
goto no_msi;
} else if (nreq < MSIX_LEGACY_SZ +
- dev->caps.num_ports * MIN_MSIX_P_PORT) {
+ dev->caps.num_ports * MIN_MSIX_P_PORT) {
/*Working in legacy mode , all EQ's shared*/
dev->caps.comp_pool = 0;
dev->caps.num_comp_vectors = nreq - 1;
@@ -2225,8 +2193,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
err = pci_enable_device(pdev);
if (err) {
- dev_err(&pdev->dev, "Cannot enable PCI device, "
- "aborting.\n");
+ dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
return err;
}
@@ -2273,14 +2240,13 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
*/
if (!(pci_dev_data & MLX4_PCI_DEV_IS_VF) &&
!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
- dev_err(&pdev->dev, "Missing DCS, aborting."
- "(driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n",
+ dev_err(&pdev->dev, "Missing DCS, aborting (driver_data: 0x%x, pci_resource_flags(pdev, 0):0x%lx)\n",
pci_dev_data, pci_resource_flags(pdev, 0));
err = -ENODEV;
goto err_disable_pdev;
}
if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
- dev_err(&pdev->dev, "Missing UAR, aborting.\n");
+ dev_err(&pdev->dev, "Missing UAR, aborting\n");
err = -ENODEV;
goto err_disable_pdev;
}
@@ -2295,21 +2261,19 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
- dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
+ dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n");
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
- dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
+ dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n");
goto err_release_regions;
}
}
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
- dev_warn(&pdev->dev, "Warning: couldn't set 64-bit "
- "consistent PCI DMA mask.\n");
+ dev_warn(&pdev->dev, "Warning: couldn't set 64-bit consistent PCI DMA mask\n");
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
- dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, "
- "aborting.\n");
+ dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, aborting\n");
goto err_release_regions;
}
}
@@ -2340,7 +2304,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
if (total_vfs) {
unsigned vfs_offset = 0;
for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]) &&
- vfs_offset + nvfs[i] < extended_func_num(pdev);
+ vfs_offset + nvfs[i] < extended_func_num(pdev);
vfs_offset += nvfs[i], i++)
;
if (i == sizeof(nvfs)/sizeof(nvfs[0])) {
@@ -2366,8 +2330,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
if (err < 0)
goto err_free_dev;
else {
- mlx4_warn(dev, "Multiple PFs not yet supported."
- " Skipping PF.\n");
+ mlx4_warn(dev, "Multiple PFs not yet supported - Skipping PF\n");
err = -EINVAL;
goto err_free_dev;
}
@@ -2377,8 +2340,8 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n",
total_vfs);
dev->dev_vfs = kzalloc(
- total_vfs * sizeof(*dev->dev_vfs),
- GFP_KERNEL);
+ total_vfs * sizeof(*dev->dev_vfs),
+ GFP_KERNEL);
if (NULL == dev->dev_vfs) {
mlx4_err(dev, "Failed to allocate memory for VFs\n");
err = 0;
@@ -2386,14 +2349,14 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
atomic_inc(&pf_loading);
err = pci_enable_sriov(pdev, total_vfs);
if (err) {
- mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n",
+ mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d)\n",
err);
atomic_dec(&pf_loading);
err = 0;
} else {
mlx4_warn(dev, "Running in master mode\n");
dev->flags |= MLX4_FLAG_SRIOV |
- MLX4_FLAG_MASTER;
+ MLX4_FLAG_MASTER;
dev->num_vfs = total_vfs;
sriov_initialized = 1;
}
@@ -2410,7 +2373,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
*/
err = mlx4_reset(dev);
if (err) {
- mlx4_err(dev, "Failed to reset HCA, aborting.\n");
+ mlx4_err(dev, "Failed to reset HCA, aborting\n");
goto err_rel_own;
}
}
@@ -2418,7 +2381,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
slave_start:
err = mlx4_cmd_init(dev);
if (err) {
- mlx4_err(dev, "Failed to init command interface, aborting.\n");
+ mlx4_err(dev, "Failed to init command interface, aborting\n");
goto err_sriov;
}
@@ -2432,8 +2395,7 @@ slave_start:
dev->num_slaves = 0;
err = mlx4_multi_func_init(dev);
if (err) {
- mlx4_err(dev, "Failed to init slave mfunc"
- " interface, aborting.\n");
+ mlx4_err(dev, "Failed to init slave mfunc interface, aborting\n");
goto err_cmd;
}
}
@@ -2465,8 +2427,7 @@ slave_start:
unsigned sum = 0;
err = mlx4_multi_func_init(dev);
if (err) {
- mlx4_err(dev, "Failed to init master mfunc"
- "interface, aborting.\n");
+ mlx4_err(dev, "Failed to init master mfunc interface, aborting\n");
goto err_close;
}
if (sriov_initialized) {
@@ -2477,10 +2438,7 @@ slave_start:
if (ib_ports &&
(num_vfs_argc > 1 || probe_vfs_argc > 1)) {
mlx4_err(dev,
- "Invalid syntax of num_vfs/probe_vfs "
- "with IB port. Single port VFs syntax"
- " is only supported when all ports "
- "are configured as ethernet\n");
+ "Invalid syntax of num_vfs/probe_vfs with IB port - single port VFs syntax is only supported when all ports are configured as ethernet\n");
goto err_close;
}
for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]); i++) {
@@ -2506,8 +2464,7 @@ slave_start:
if ((mlx4_is_mfunc(dev)) &&
!(dev->flags & MLX4_FLAG_MSI_X)) {
err = -ENOSYS;
- mlx4_err(dev, "INTx is not supported in multi-function mode."
- " aborting.\n");
+ mlx4_err(dev, "INTx is not supported in multi-function mode, aborting\n");
goto err_free_eq;
}
@@ -2660,7 +2617,7 @@ static void __mlx4_remove_one(struct pci_dev *pdev)
/* in SRIOV it is not allowed to unload the pf's
* driver while there are alive vf's */
if (mlx4_is_master(dev) && mlx4_how_many_lives_vf(dev))
- printk(KERN_ERR "Removing PF when there are assigned VF's !!!\n");
+ pr_warn("Removing PF when there are assigned VF's !!!\n");
mlx4_stop_sense(dev);
mlx4_unregister_device(dev);
@@ -2824,7 +2781,7 @@ static struct pci_driver mlx4_driver = {
.name = DRV_NAME,
.id_table = mlx4_pci_table,
.probe = mlx4_init_one,
- .shutdown = mlx4_remove_one,
+ .shutdown = __mlx4_remove_one,
.remove = mlx4_remove_one,
.err_handler = &mlx4_err_handler,
};
@@ -2832,33 +2789,36 @@ static struct pci_driver mlx4_driver = {
static int __init mlx4_verify_params(void)
{
if ((log_num_mac < 0) || (log_num_mac > 7)) {
- pr_warning("mlx4_core: bad num_mac: %d\n", log_num_mac);
+ pr_warn("mlx4_core: bad num_mac: %d\n", log_num_mac);
return -1;
}
if (log_num_vlan != 0)
- pr_warning("mlx4_core: log_num_vlan - obsolete module param, using %d\n",
- MLX4_LOG_NUM_VLANS);
+ pr_warn("mlx4_core: log_num_vlan - obsolete module param, using %d\n",
+ MLX4_LOG_NUM_VLANS);
+
+ if (use_prio != 0)
+ pr_warn("mlx4_core: use_prio - obsolete module param, ignored\n");
if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) {
- pr_warning("mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg);
+ pr_warn("mlx4_core: bad log_mtts_per_seg: %d\n",
+ log_mtts_per_seg);
return -1;
}
/* Check if module param for ports type has legal combination */
if (port_type_array[0] == false && port_type_array[1] == true) {
- printk(KERN_WARNING "Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n");
+ pr_warn("Module parameter configuration ETH/IB is not supported. Switching to default configuration IB/IB\n");
port_type_array[0] = true;
}
if (mlx4_log_num_mgm_entry_size != -1 &&
(mlx4_log_num_mgm_entry_size < MLX4_MIN_MGM_LOG_ENTRY_SIZE ||
mlx4_log_num_mgm_entry_size > MLX4_MAX_MGM_LOG_ENTRY_SIZE)) {
- pr_warning("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not "
- "in legal range (-1 or %d..%d)\n",
- mlx4_log_num_mgm_entry_size,
- MLX4_MIN_MGM_LOG_ENTRY_SIZE,
- MLX4_MAX_MGM_LOG_ENTRY_SIZE);
+ pr_warn("mlx4_core: mlx4_log_num_mgm_entry_size (%d) not in legal range (-1 or %d..%d)\n",
+ mlx4_log_num_mgm_entry_size,
+ MLX4_MIN_MGM_LOG_ENTRY_SIZE,
+ MLX4_MAX_MGM_LOG_ENTRY_SIZE);
return -1;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index 80ccb4e..4c36def 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -638,7 +638,7 @@ static int find_entry(struct mlx4_dev *dev, u8 port,
if (!(be32_to_cpu(mgm->members_count) & 0xffffff)) {
if (*index != hash) {
- mlx4_err(dev, "Found zero MGID in AMGM.\n");
+ mlx4_err(dev, "Found zero MGID in AMGM\n");
err = -EINVAL;
}
return err;
@@ -874,7 +874,7 @@ static void mlx4_err_rule(struct mlx4_dev *dev, char *str,
mlx4_err(dev, "%s", buf);
if (len >= BUF_SIZE)
- mlx4_err(dev, "Network rule error message was truncated, print buffer is too small.\n");
+ mlx4_err(dev, "Network rule error message was truncated, print buffer is too small\n");
}
int mlx4_flow_attach(struct mlx4_dev *dev,
@@ -897,7 +897,7 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
ret = parse_trans_rule(dev, cur, mailbox->buf + size);
if (ret < 0) {
mlx4_free_cmd_mailbox(dev, mailbox);
- return -EINVAL;
+ return ret;
}
size += ret;
}
@@ -905,10 +905,10 @@ int mlx4_flow_attach(struct mlx4_dev *dev,
ret = mlx4_QP_FLOW_STEERING_ATTACH(dev, mailbox, size >> 2, reg_id);
if (ret == -ENOMEM)
mlx4_err_rule(dev,
- "mcg table is full. Fail to register network rule.\n",
+ "mcg table is full. Fail to register network rule\n",
rule);
else if (ret)
- mlx4_err_rule(dev, "Fail to register network rule.\n", rule);
+ mlx4_err_rule(dev, "Fail to register network rule\n", rule);
mlx4_free_cmd_mailbox(dev, mailbox);
@@ -994,7 +994,7 @@ int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
if (members_count == dev->caps.num_qp_per_mgm) {
- mlx4_err(dev, "MGM at index %x is full.\n", index);
+ mlx4_err(dev, "MGM at index %x is full\n", index);
err = -ENOMEM;
goto out;
}
@@ -1042,7 +1042,7 @@ out:
}
if (err && link && index != -1) {
if (index < dev->caps.num_mgms)
- mlx4_warn(dev, "Got AMGM index %d < %d",
+ mlx4_warn(dev, "Got AMGM index %d < %d\n",
index, dev->caps.num_mgms);
else
mlx4_bitmap_free(&priv->mcg_table.bitmap,
@@ -1133,7 +1133,7 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
if (amgm_index) {
if (amgm_index < dev->caps.num_mgms)
- mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d",
+ mlx4_warn(dev, "MGM entry %d had AMGM index %d < %d\n",
index, amgm_index, dev->caps.num_mgms);
else
mlx4_bitmap_free(&priv->mcg_table.bitmap,
@@ -1153,7 +1153,7 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
goto out;
if (index < dev->caps.num_mgms)
- mlx4_warn(dev, "entry %d had next AMGM index %d < %d",
+ mlx4_warn(dev, "entry %d had next AMGM index %d < %d\n",
prev, index, dev->caps.num_mgms);
else
mlx4_bitmap_free(&priv->mcg_table.bitmap,
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 7a0665b..1d8af73 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -221,18 +221,19 @@ extern int mlx4_debug_level;
#define mlx4_debug_level (0)
#endif /* CONFIG_MLX4_DEBUG */
-#define mlx4_dbg(mdev, format, arg...) \
+#define mlx4_dbg(mdev, format, ...) \
do { \
if (mlx4_debug_level) \
- dev_printk(KERN_DEBUG, &mdev->pdev->dev, format, ##arg); \
+ dev_printk(KERN_DEBUG, &(mdev)->pdev->dev, format, \
+ ##__VA_ARGS__); \
} while (0)
-#define mlx4_err(mdev, format, arg...) \
- dev_err(&mdev->pdev->dev, format, ##arg)
-#define mlx4_info(mdev, format, arg...) \
- dev_info(&mdev->pdev->dev, format, ##arg)
-#define mlx4_warn(mdev, format, arg...) \
- dev_warn(&mdev->pdev->dev, format, ##arg)
+#define mlx4_err(mdev, format, ...) \
+ dev_err(&(mdev)->pdev->dev, format, ##__VA_ARGS__)
+#define mlx4_info(mdev, format, ...) \
+ dev_info(&(mdev)->pdev->dev, format, ##__VA_ARGS__)
+#define mlx4_warn(mdev, format, ...) \
+ dev_warn(&(mdev)->pdev->dev, format, ##__VA_ARGS__)
extern int mlx4_log_num_mgm_entry_size;
extern int log_mtts_per_seg;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 04d9b6fe..0e15295 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -313,6 +313,7 @@ struct mlx4_en_rx_ring {
unsigned long csum_ok;
unsigned long csum_none;
int hwtstamp_rx_filter;
+ cpumask_var_t affinity_mask;
};
struct mlx4_en_cq {
@@ -830,26 +831,26 @@ __printf(3, 4)
int en_print(const char *level, const struct mlx4_en_priv *priv,
const char *format, ...);
-#define en_dbg(mlevel, priv, format, arg...) \
-do { \
- if (NETIF_MSG_##mlevel & priv->msg_enable) \
- en_print(KERN_DEBUG, priv, format, ##arg); \
+#define en_dbg(mlevel, priv, format, ...) \
+do { \
+ if (NETIF_MSG_##mlevel & (priv)->msg_enable) \
+ en_print(KERN_DEBUG, priv, format, ##__VA_ARGS__); \
} while (0)
-#define en_warn(priv, format, arg...) \
- en_print(KERN_WARNING, priv, format, ##arg)
-#define en_err(priv, format, arg...) \
- en_print(KERN_ERR, priv, format, ##arg)
-#define en_info(priv, format, arg...) \
- en_print(KERN_INFO, priv, format, ## arg)
-
-#define mlx4_err(mdev, format, arg...) \
- pr_err("%s %s: " format, DRV_NAME, \
- dev_name(&mdev->pdev->dev), ##arg)
-#define mlx4_info(mdev, format, arg...) \
- pr_info("%s %s: " format, DRV_NAME, \
- dev_name(&mdev->pdev->dev), ##arg)
-#define mlx4_warn(mdev, format, arg...) \
- pr_warning("%s %s: " format, DRV_NAME, \
- dev_name(&mdev->pdev->dev), ##arg)
+#define en_warn(priv, format, ...) \
+ en_print(KERN_WARNING, priv, format, ##__VA_ARGS__)
+#define en_err(priv, format, ...) \
+ en_print(KERN_ERR, priv, format, ##__VA_ARGS__)
+#define en_info(priv, format, ...) \
+ en_print(KERN_INFO, priv, format, ##__VA_ARGS__)
+
+#define mlx4_err(mdev, format, ...) \
+ pr_err(DRV_NAME " %s: " format, \
+ dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__)
+#define mlx4_info(mdev, format, ...) \
+ pr_info(DRV_NAME " %s: " format, \
+ dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__)
+#define mlx4_warn(mdev, format, ...) \
+ pr_warn(DRV_NAME " %s: " format, \
+ dev_name(&(mdev)->pdev->dev), ##__VA_ARGS__)
#endif
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index 4c71daf..2839abb 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -250,8 +250,8 @@ static void mlx4_free_mtt_range(struct mlx4_dev *dev, u32 offset, int order)
MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_WRAPPED);
if (err)
- mlx4_warn(dev, "Failed to free mtt range at:"
- "%d order:%d\n", offset, order);
+ mlx4_warn(dev, "Failed to free mtt range at:%d order:%d\n",
+ offset, order);
return;
}
__mlx4_free_mtt_range(dev, offset, order);
@@ -436,8 +436,8 @@ static int mlx4_mr_free_reserved(struct mlx4_dev *dev, struct mlx4_mr *mr)
key_to_hw_index(mr->key) &
(dev->caps.num_mpts - 1));
if (err) {
- mlx4_warn(dev, "HW2SW_MPT failed (%d),", err);
- mlx4_warn(dev, "MR has MWs bound to it.\n");
+ mlx4_warn(dev, "HW2SW_MPT failed (%d), MR has MWs bound to it\n",
+ err);
return err;
}
@@ -774,7 +774,7 @@ int mlx4_init_mr_table(struct mlx4_dev *dev)
mlx4_alloc_mtt_range(dev,
fls(dev->caps.reserved_mtts - 1));
if (priv->reserved_mtts < 0) {
- mlx4_warn(dev, "MTT table of order %u is too small.\n",
+ mlx4_warn(dev, "MTT table of order %u is too small\n",
mr_table->mtt_buddy.max_order);
err = -ENOMEM;
goto err_reserve_mtts;
@@ -955,8 +955,7 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
mailbox = mlx4_alloc_cmd_mailbox(dev);
if (IS_ERR(mailbox)) {
err = PTR_ERR(mailbox);
- printk(KERN_WARNING "mlx4_ib: mlx4_alloc_cmd_mailbox"
- " failed (%d)\n", err);
+ pr_warn("mlx4_ib: mlx4_alloc_cmd_mailbox failed (%d)\n", err);
return;
}
@@ -965,8 +964,7 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr,
(dev->caps.num_mpts - 1));
mlx4_free_cmd_mailbox(dev, mailbox);
if (err) {
- printk(KERN_WARNING "mlx4_ib: mlx4_HW2SW_MPT failed (%d)\n",
- err);
+ pr_warn("mlx4_ib: mlx4_HW2SW_MPT failed (%d)\n", err);
return;
}
fmr->mr.enabled = MLX4_MPT_EN_SW;
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 5ec6f20..7ab9717 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -254,8 +254,8 @@ void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
if (validate_index(dev, table, index))
goto out;
if (--table->refs[index]) {
- mlx4_dbg(dev, "Have more references for index %d,"
- "no need to modify mac table\n", index);
+ mlx4_dbg(dev, "Have more references for index %d, no need to modify mac table\n",
+ index);
goto out;
}
@@ -453,9 +453,8 @@ void __mlx4_unregister_vlan(struct mlx4_dev *dev, u8 port, u16 vlan)
}
if (--table->refs[index]) {
- mlx4_dbg(dev, "Have %d more references for index %d,"
- "no need to modify vlan table\n", table->refs[index],
- index);
+ mlx4_dbg(dev, "Have %d more references for index %d, no need to modify vlan table\n",
+ table->refs[index], index);
goto out;
}
table->entries[index] = 0;
@@ -796,8 +795,7 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
if (!memcmp(gid_entry_mbox->raw, gid_entry_tbl->raw,
sizeof(gid_entry_tbl->raw))) {
/* found duplicate */
- mlx4_warn(dev, "requested gid entry for slave:%d "
- "is a duplicate of gid at index %d\n",
+ mlx4_warn(dev, "requested gid entry for slave:%d is a duplicate of gid at index %d\n",
slave, i);
mutex_unlock(&(priv->port[port].gid_table.mutex));
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c
index 8e0c3cc..14089d9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/profile.c
+++ b/drivers/net/ethernet/mellanox/mlx4/profile.c
@@ -164,18 +164,17 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
}
if (total_size > dev_cap->max_icm_sz) {
- mlx4_err(dev, "Profile requires 0x%llx bytes; "
- "won't fit in 0x%llx bytes of context memory.\n",
- (unsigned long long) total_size,
- (unsigned long long) dev_cap->max_icm_sz);
+ mlx4_err(dev, "Profile requires 0x%llx bytes; won't fit in 0x%llx bytes of context memory\n",
+ (unsigned long long) total_size,
+ (unsigned long long) dev_cap->max_icm_sz);
kfree(profile);
return -ENOMEM;
}
if (profile[i].size)
- mlx4_dbg(dev, " profile[%2d] (%6s): 2^%02d entries @ 0x%10llx, "
- "size 0x%10llx\n",
- i, res_name[profile[i].type], profile[i].log_num,
+ mlx4_dbg(dev, " profile[%2d] (%6s): 2^%02d entries @ 0x%10llx, size 0x%10llx\n",
+ i, res_name[profile[i].type],
+ profile[i].log_num,
(unsigned long long) profile[i].start,
(unsigned long long) profile[i].size);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c
index 40af619..0dc31d8 100644
--- a/drivers/net/ethernet/mellanox/mlx4/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx4/qp.c
@@ -264,8 +264,8 @@ void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt)
MLX4_CMD_FREE_RES,
MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
if (err) {
- mlx4_warn(dev, "Failed to release qp range"
- " base:%d cnt:%d\n", base_qpn, cnt);
+ mlx4_warn(dev, "Failed to release qp range base:%d cnt:%d\n",
+ base_qpn, cnt);
}
} else
__mlx4_qp_release_range(dev, base_qpn, cnt);
@@ -612,8 +612,7 @@ int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1],
context, 0, 0, qp);
if (err) {
- mlx4_err(dev, "Failed to bring QP to state: "
- "%d with error: %d\n",
+ mlx4_err(dev, "Failed to bring QP to state: %d with error: %d\n",
states[i + 1], err);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/reset.c b/drivers/net/ethernet/mellanox/mlx4/reset.c
index dd1b509..ea1c6d0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/reset.c
+++ b/drivers/net/ethernet/mellanox/mlx4/reset.c
@@ -72,8 +72,7 @@ int mlx4_reset(struct mlx4_dev *dev)
hca_header = kmalloc(256, GFP_KERNEL);
if (!hca_header) {
err = -ENOMEM;
- mlx4_err(dev, "Couldn't allocate memory to save HCA "
- "PCI header, aborting.\n");
+ mlx4_err(dev, "Couldn't allocate memory to save HCA PCI header, aborting\n");
goto out;
}
@@ -84,8 +83,7 @@ int mlx4_reset(struct mlx4_dev *dev)
continue;
if (pci_read_config_dword(dev->pdev, i * 4, hca_header + i)) {
err = -ENODEV;
- mlx4_err(dev, "Couldn't save HCA "
- "PCI header, aborting.\n");
+ mlx4_err(dev, "Couldn't save HCA PCI header, aborting\n");
goto out;
}
}
@@ -94,7 +92,7 @@ int mlx4_reset(struct mlx4_dev *dev)
MLX4_RESET_SIZE);
if (!reset) {
err = -ENOMEM;
- mlx4_err(dev, "Couldn't map HCA reset register, aborting.\n");
+ mlx4_err(dev, "Couldn't map HCA reset register, aborting\n");
goto out;
}
@@ -133,8 +131,7 @@ int mlx4_reset(struct mlx4_dev *dev)
if (vendor == 0xffff) {
err = -ENODEV;
- mlx4_err(dev, "PCI device did not come back after reset, "
- "aborting.\n");
+ mlx4_err(dev, "PCI device did not come back after reset, aborting\n");
goto out;
}
@@ -144,16 +141,14 @@ int mlx4_reset(struct mlx4_dev *dev)
if (pcie_capability_write_word(dev->pdev, PCI_EXP_DEVCTL,
devctl)) {
err = -ENODEV;
- mlx4_err(dev, "Couldn't restore HCA PCI Express "
- "Device Control register, aborting.\n");
+ mlx4_err(dev, "Couldn't restore HCA PCI Express Device Control register, aborting\n");
goto out;
}
linkctl = hca_header[(pcie_cap + PCI_EXP_LNKCTL) / 4];
if (pcie_capability_write_word(dev->pdev, PCI_EXP_LNKCTL,
linkctl)) {
err = -ENODEV;
- mlx4_err(dev, "Couldn't restore HCA PCI Express "
- "Link control register, aborting.\n");
+ mlx4_err(dev, "Couldn't restore HCA PCI Express Link control register, aborting\n");
goto out;
}
}
@@ -164,8 +159,8 @@ int mlx4_reset(struct mlx4_dev *dev)
if (pci_write_config_dword(dev->pdev, i * 4, hca_header[i])) {
err = -ENODEV;
- mlx4_err(dev, "Couldn't restore HCA reg %x, "
- "aborting.\n", i);
+ mlx4_err(dev, "Couldn't restore HCA reg %x, aborting\n",
+ i);
goto out;
}
}
@@ -173,8 +168,7 @@ int mlx4_reset(struct mlx4_dev *dev)
if (pci_write_config_dword(dev->pdev, PCI_COMMAND,
hca_header[PCI_COMMAND / 4])) {
err = -ENODEV;
- mlx4_err(dev, "Couldn't restore HCA COMMAND, "
- "aborting.\n");
+ mlx4_err(dev, "Couldn't restore HCA COMMAND, aborting\n");
goto out;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 2ba3b76..0efc136 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -279,7 +279,7 @@ enum qp_transition {
};
/* For Debug uses */
-static const char *ResourceType(enum mlx4_resource rt)
+static const char *resource_str(enum mlx4_resource rt)
{
switch (rt) {
case RES_QP: return "RES_QP";
@@ -307,6 +307,7 @@ static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
&priv->mfunc.master.res_tracker.res_alloc[res_type];
int err = -EINVAL;
int allocated, free, reserved, guaranteed, from_free;
+ int from_rsvd;
if (slave > dev->num_vfs)
return -EINVAL;
@@ -321,11 +322,16 @@ static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
res_alloc->res_reserved;
guaranteed = res_alloc->guaranteed[slave];
- if (allocated + count > res_alloc->quota[slave])
+ if (allocated + count > res_alloc->quota[slave]) {
+ mlx4_warn(dev, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n",
+ slave, port, resource_str(res_type), count,
+ allocated, res_alloc->quota[slave]);
goto out;
+ }
if (allocated + count <= guaranteed) {
err = 0;
+ from_rsvd = count;
} else {
/* portion may need to be obtained from free area */
if (guaranteed - allocated > 0)
@@ -333,8 +339,14 @@ static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
else
from_free = count;
- if (free - from_free > reserved)
+ from_rsvd = count - from_free;
+
+ if (free - from_free >= reserved)
err = 0;
+ else
+ mlx4_warn(dev, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n",
+ slave, port, resource_str(res_type), free,
+ from_free, reserved);
}
if (!err) {
@@ -342,9 +354,11 @@ static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
if (port > 0) {
res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] += count;
res_alloc->res_port_free[port - 1] -= count;
+ res_alloc->res_port_rsvd[port - 1] -= from_rsvd;
} else {
res_alloc->allocated[slave] += count;
res_alloc->res_free -= count;
+ res_alloc->res_reserved -= from_rsvd;
}
}
@@ -360,17 +374,36 @@ static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
struct mlx4_priv *priv = mlx4_priv(dev);
struct resource_allocator *res_alloc =
&priv->mfunc.master.res_tracker.res_alloc[res_type];
+ int allocated, guaranteed, from_rsvd;
if (slave > dev->num_vfs)
return;
spin_lock(&res_alloc->alloc_lock);
+
+ allocated = (port > 0) ?
+ res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] :
+ res_alloc->allocated[slave];
+ guaranteed = res_alloc->guaranteed[slave];
+
+ if (allocated - count >= guaranteed) {
+ from_rsvd = 0;
+ } else {
+ /* portion may need to be returned to reserved area */
+ if (allocated - guaranteed > 0)
+ from_rsvd = count - (allocated - guaranteed);
+ else
+ from_rsvd = count;
+ }
+
if (port > 0) {
res_alloc->allocated[(port - 1) * (dev->num_vfs + 1) + slave] -= count;
res_alloc->res_port_free[port - 1] += count;
+ res_alloc->res_port_rsvd[port - 1] += from_rsvd;
} else {
res_alloc->allocated[slave] -= count;
res_alloc->res_free += count;
+ res_alloc->res_reserved += from_rsvd;
}
spin_unlock(&res_alloc->alloc_lock);
@@ -963,7 +996,7 @@ static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
ret = alloc_srq_tr(id);
break;
case RES_MAC:
- printk(KERN_ERR "implementation missing\n");
+ pr_err("implementation missing\n");
return NULL;
case RES_COUNTER:
ret = alloc_counter_tr(id);
@@ -1057,10 +1090,10 @@ static int remove_mtt_ok(struct res_mtt *res, int order)
{
if (res->com.state == RES_MTT_BUSY ||
atomic_read(&res->ref_count)) {
- printk(KERN_DEBUG "%s-%d: state %s, ref_count %d\n",
- __func__, __LINE__,
- mtt_states_str(res->com.state),
- atomic_read(&res->ref_count));
+ pr_devel("%s-%d: state %s, ref_count %d\n",
+ __func__, __LINE__,
+ mtt_states_str(res->com.state),
+ atomic_read(&res->ref_count));
return -EBUSY;
} else if (res->com.state != RES_MTT_ALLOCATED)
return -EPERM;
@@ -3897,7 +3930,7 @@ static int add_eth_header(struct mlx4_dev *dev, int slave,
}
}
if (!be_mac) {
- pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d .\n",
+ pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
port);
return -EINVAL;
}
@@ -3994,7 +4027,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
err = get_res(dev, slave, qpn, RES_QP, &rqp);
if (err) {
- pr_err("Steering rule with qpn 0x%x rejected.\n", qpn);
+ pr_err("Steering rule with qpn 0x%x rejected\n", qpn);
return err;
}
rule_header = (struct _rule_hw *)(ctrl + 1);
@@ -4012,7 +4045,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
case MLX4_NET_TRANS_RULE_ID_IPV4:
case MLX4_NET_TRANS_RULE_ID_TCP:
case MLX4_NET_TRANS_RULE_ID_UDP:
- pr_warn("Can't attach FS rule without L2 headers, adding L2 header.\n");
+ pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
err = -EINVAL;
goto err_put;
@@ -4021,7 +4054,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
break;
default:
- pr_err("Corrupted mailbox.\n");
+ pr_err("Corrupted mailbox\n");
err = -EINVAL;
goto err_put;
}
@@ -4035,7 +4068,7 @@ int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
if (err) {
- mlx4_err(dev, "Fail to add flow steering resources.\n ");
+ mlx4_err(dev, "Fail to add flow steering resources\n");
/* detach rule*/
mlx4_cmd(dev, vhcr->out_param, 0, 0,
MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
@@ -4073,7 +4106,7 @@ int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
if (err) {
- mlx4_err(dev, "Fail to remove flow steering resources.\n ");
+ mlx4_err(dev, "Fail to remove flow steering resources\n");
goto out;
}
@@ -4151,7 +4184,7 @@ static int _move_all_busy(struct mlx4_dev *dev, int slave,
if (print)
mlx4_dbg(dev,
"%s id 0x%llx is busy\n",
- ResourceType(type),
+ resource_str(type),
r->res_id);
++busy;
} else {
@@ -4202,8 +4235,8 @@ static void rem_slave_qps(struct mlx4_dev *dev, int slave)
err = move_all_busy(dev, slave, RES_QP);
if (err)
- mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy"
- "for slave %d\n", slave);
+ mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
+ slave);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
@@ -4241,10 +4274,8 @@ static void rem_slave_qps(struct mlx4_dev *dev, int slave)
MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err)
- mlx4_dbg(dev, "rem_slave_qps: failed"
- " to move slave %d qpn %d to"
- " reset\n", slave,
- qp->local_qpn);
+ mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
+ slave, qp->local_qpn);
atomic_dec(&qp->rcq->ref_count);
atomic_dec(&qp->scq->ref_count);
atomic_dec(&qp->mtt->ref_count);
@@ -4278,8 +4309,8 @@ static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
err = move_all_busy(dev, slave, RES_SRQ);
if (err)
- mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs to "
- "busy for slave %d\n", slave);
+ mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
+ slave);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
@@ -4309,9 +4340,7 @@ static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err)
- mlx4_dbg(dev, "rem_slave_srqs: failed"
- " to move slave %d srq %d to"
- " SW ownership\n",
+ mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
slave, srqn);
atomic_dec(&srq->mtt->ref_count);
@@ -4346,8 +4375,8 @@ static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
err = move_all_busy(dev, slave, RES_CQ);
if (err)
- mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs to "
- "busy for slave %d\n", slave);
+ mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
+ slave);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
@@ -4377,9 +4406,7 @@ static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err)
- mlx4_dbg(dev, "rem_slave_cqs: failed"
- " to move slave %d cq %d to"
- " SW ownership\n",
+ mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
slave, cqn);
atomic_dec(&cq->mtt->ref_count);
state = RES_CQ_ALLOCATED;
@@ -4411,8 +4438,8 @@ static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
err = move_all_busy(dev, slave, RES_MPT);
if (err)
- mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts to "
- "busy for slave %d\n", slave);
+ mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
+ slave);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
@@ -4447,9 +4474,7 @@ static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err)
- mlx4_dbg(dev, "rem_slave_mrs: failed"
- " to move slave %d mpt %d to"
- " SW ownership\n",
+ mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
slave, mptn);
if (mpt->mtt)
atomic_dec(&mpt->mtt->ref_count);
@@ -4481,8 +4506,8 @@ static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
err = move_all_busy(dev, slave, RES_MTT);
if (err)
- mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts to "
- "busy for slave %d\n", slave);
+ mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts - too busy for slave %d\n",
+ slave);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
@@ -4584,8 +4609,8 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
err = move_all_busy(dev, slave, RES_EQ);
if (err)
- mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs to "
- "busy for slave %d\n", slave);
+ mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
+ slave);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
@@ -4617,9 +4642,8 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err)
- mlx4_dbg(dev, "rem_slave_eqs: failed"
- " to move slave %d eqs %d to"
- " SW ownership\n", slave, eqn);
+ mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
+ slave, eqn);
mlx4_free_cmd_mailbox(dev, mailbox);
atomic_dec(&eq->mtt->ref_count);
state = RES_EQ_RESERVED;
@@ -4648,8 +4672,8 @@ static void rem_slave_counters(struct mlx4_dev *dev, int slave)
err = move_all_busy(dev, slave, RES_COUNTER);
if (err)
- mlx4_warn(dev, "rem_slave_counters: Could not move all counters to "
- "busy for slave %d\n", slave);
+ mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
+ slave);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
@@ -4679,8 +4703,8 @@ static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
err = move_all_busy(dev, slave, RES_XRCD);
if (err)
- mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns to "
- "busy for slave %d\n", slave);
+ mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
+ slave);
spin_lock_irq(mlx4_tlock(dev));
list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
@@ -4825,10 +4849,8 @@ void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
0, MLX4_CMD_UPDATE_QP,
MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
if (err) {
- mlx4_info(dev, "UPDATE_QP failed for slave %d, "
- "port %d, qpn %d (%d)\n",
- work->slave, port, qp->local_qpn,
- err);
+ mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
+ work->slave, port, qp->local_qpn, err);
errors++;
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 405c4fb..87d1b01 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -620,8 +620,8 @@ static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent)
mlx5_command_str(msg_to_opcode(ent->in)),
msg_to_opcode(ent->in));
}
- mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n", err,
- deliv_status_to_str(ent->status), ent->status);
+ mlx5_core_dbg(dev, "err %d, delivery status %s(%d)\n",
+ err, deliv_status_to_str(ent->status), ent->status);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 64a61b2..7f39ebc 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -208,7 +208,8 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
*/
rmb();
- mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n", eq->eqn, eqe_type_str(eqe->type));
+ mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n",
+ eq->eqn, eqe_type_str(eqe->type));
switch (eqe->type) {
case MLX5_EVENT_TYPE_COMP:
cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff;
@@ -270,14 +271,16 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages);
- mlx5_core_dbg(dev, "page request for func 0x%x, napges %d\n", func_id, npages);
+ mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n",
+ func_id, npages);
mlx5_core_req_pages_handler(dev, func_id, npages);
}
break;
default:
- mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n", eqe->type, eq->eqn);
+ mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n",
+ eqe->type, eq->eqn);
break;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index c3eee5f..ee24f13 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -66,10 +66,10 @@ static int set_dma_caps(struct pci_dev *pdev)
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
- dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
+ dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n");
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
- dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
+ dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n");
return err;
}
}
@@ -77,11 +77,11 @@ static int set_dma_caps(struct pci_dev *pdev)
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
dev_warn(&pdev->dev,
- "Warning: couldn't set 64-bit consistent PCI DMA mask.\n");
+ "Warning: couldn't set 64-bit consistent PCI DMA mask\n");
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev,
- "Can't set consistent PCI DMA mask, aborting.\n");
+ "Can't set consistent PCI DMA mask, aborting\n");
return err;
}
}
@@ -95,7 +95,7 @@ static int request_bar(struct pci_dev *pdev)
int err = 0;
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
- dev_err(&pdev->dev, "Missing registers BAR, aborting.\n");
+ dev_err(&pdev->dev, "Missing registers BAR, aborting\n");
return -ENODEV;
}
@@ -319,13 +319,13 @@ int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
err = pci_enable_device(pdev);
if (err) {
- dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
+ dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
goto err_dbg;
}
err = request_bar(pdev);
if (err) {
- dev_err(&pdev->dev, "error requesting BARs, aborting.\n");
+ dev_err(&pdev->dev, "error requesting BARs, aborting\n");
goto err_disable;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 68b74e1..f0c9f9a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -39,24 +39,26 @@
extern int mlx5_core_debug_mask;
-#define mlx5_core_dbg(dev, format, arg...) \
-pr_debug("%s:%s:%d:(pid %d): " format, (dev)->priv.name, __func__, __LINE__, \
- current->pid, ##arg)
+#define mlx5_core_dbg(dev, format, ...) \
+ pr_debug("%s:%s:%d:(pid %d): " format, \
+ (dev)->priv.name, __func__, __LINE__, current->pid, \
+ ##__VA_ARGS__)
-#define mlx5_core_dbg_mask(dev, mask, format, arg...) \
-do { \
- if ((mask) & mlx5_core_debug_mask) \
- pr_debug("%s:%s:%d:(pid %d): " format, (dev)->priv.name, \
- __func__, __LINE__, current->pid, ##arg); \
+#define mlx5_core_dbg_mask(dev, mask, format, ...) \
+do { \
+ if ((mask) & mlx5_core_debug_mask) \
+ mlx5_core_dbg(dev, format, ##__VA_ARGS__); \
} while (0)
-#define mlx5_core_err(dev, format, arg...) \
-pr_err("%s:%s:%d:(pid %d): " format, (dev)->priv.name, __func__, __LINE__, \
- current->pid, ##arg)
+#define mlx5_core_err(dev, format, ...) \
+ pr_err("%s:%s:%d:(pid %d): " format, \
+ (dev)->priv.name, __func__, __LINE__, current->pid, \
+ ##__VA_ARGS__)
-#define mlx5_core_warn(dev, format, arg...) \
-pr_warn("%s:%s:%d:(pid %d): " format, (dev)->priv.name, __func__, __LINE__, \
- current->pid, ##arg)
+#define mlx5_core_warn(dev, format, ...) \
+ pr_warn("%s:%s:%d:(pid %d): " format, \
+ (dev)->priv.name, __func__, __LINE__, current->pid, \
+ ##__VA_ARGS__)
enum {
MLX5_CMD_DATA, /* print command payload only */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
index ac52a0f..ba0401d4 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
@@ -73,7 +73,7 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
}
if (err) {
- mlx5_core_dbg(dev, "cmd exec faile %d\n", err);
+ mlx5_core_dbg(dev, "cmd exec failed %d\n", err);
return err;
}
@@ -195,7 +195,8 @@ int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
}
if (out.hdr.status) {
- mlx5_core_err(dev, "create_psv bad status %d\n", out.hdr.status);
+ mlx5_core_err(dev, "create_psv bad status %d\n",
+ out.hdr.status);
return mlx5_cmd_status_to_err(&out.hdr);
}
@@ -224,7 +225,8 @@ int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num)
}
if (out.hdr.status) {
- mlx5_core_err(dev, "destroy_psv bad status %d\n", out.hdr.status);
+ mlx5_core_err(dev, "destroy_psv bad status %d\n",
+ out.hdr.status);
err = mlx5_cmd_status_to_err(&out.hdr);
goto out;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
index d59790a..c2a953e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
@@ -311,7 +311,8 @@ retry:
in->num_entries = cpu_to_be32(npages);
err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
if (err) {
- mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n", func_id, npages, err);
+ mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
+ func_id, npages, err);
goto out_alloc;
}
dev->priv.fw_pages += npages;
@@ -319,7 +320,8 @@ retry:
if (out.hdr.status) {
err = mlx5_cmd_status_to_err(&out.hdr);
if (err) {
- mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n", func_id, npages, out.hdr.status);
+ mlx5_core_warn(dev, "func_id 0x%x, npages %d, status %d\n",
+ func_id, npages, out.hdr.status);
goto out_alloc;
}
}
@@ -378,7 +380,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
err = mlx5_cmd_exec(dev, &in, sizeof(in), out, outlen);
if (err) {
- mlx5_core_err(dev, "failed recliaming pages\n");
+ mlx5_core_err(dev, "failed reclaiming pages\n");
goto out_free;
}
dev->priv.fw_pages -= npages;
@@ -414,8 +416,8 @@ static void pages_work_handler(struct work_struct *work)
err = give_pages(dev, req->func_id, req->npages, 1);
if (err)
- mlx5_core_warn(dev, "%s fail %d\n", req->npages < 0 ?
- "reclaim" : "give", err);
+ mlx5_core_warn(dev, "%s fail %d\n",
+ req->npages < 0 ? "reclaim" : "give", err);
kfree(req);
}
@@ -487,7 +489,8 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
optimal_reclaimed_pages(),
&nclaimed);
if (err) {
- mlx5_core_warn(dev, "failed reclaiming pages (%d)\n", err);
+ mlx5_core_warn(dev, "failed reclaiming pages (%d)\n",
+ err);
return err;
}
if (nclaimed)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
index 5105762..8145b46 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c
@@ -79,7 +79,7 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
err = mlx5_cmd_exec(dev, in, inlen, &out, sizeof(out));
if (err) {
- mlx5_core_warn(dev, "ret %d", err);
+ mlx5_core_warn(dev, "ret %d\n", err);
return err;
}
@@ -96,7 +96,7 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
err = radix_tree_insert(&table->tree, qp->qpn, qp);
spin_unlock_irq(&table->lock);
if (err) {
- mlx5_core_warn(dev, "err %d", err);
+ mlx5_core_warn(dev, "err %d\n", err);
goto err_cmd;
}
diff --git a/drivers/net/ethernet/micrel/ks8695net.c b/drivers/net/ethernet/micrel/ks8695net.c
index 16435b3..6c7c78ba 100644
--- a/drivers/net/ethernet/micrel/ks8695net.c
+++ b/drivers/net/ethernet/micrel/ks8695net.c
@@ -1504,15 +1504,15 @@ ks8695_probe(struct platform_device *pdev)
if (ksp->phyiface_regs && ksp->link_irq == -1) {
ks8695_init_switch(ksp);
ksp->dtype = KS8695_DTYPE_LAN;
- SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops);
+ ndev->ethtool_ops = &ks8695_ethtool_ops;
} else if (ksp->phyiface_regs && ksp->link_irq != -1) {
ks8695_init_wan_phy(ksp);
ksp->dtype = KS8695_DTYPE_WAN;
- SET_ETHTOOL_OPS(ndev, &ks8695_wan_ethtool_ops);
+ ndev->ethtool_ops = &ks8695_wan_ethtool_ops;
} else {
/* No initialisation since HPNA does not have a PHY */
ksp->dtype = KS8695_DTYPE_HPNA;
- SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops);
+ ndev->ethtool_ops = &ks8695_ethtool_ops;
}
/* And bring up the net_device with the net core */
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index e0c92e0..66d4ab7 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -26,6 +26,8 @@
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
#include "ks8851.h"
@@ -85,6 +87,8 @@ union ks8851_tx_hdr {
* @eeprom_size: Companion eeprom size in Bytes, 0 if no eeprom
* @eeprom: 93CX6 EEPROM state for accessing on-board EEPROM.
* @vdd_reg: Optional regulator supplying the chip
+ * @vdd_io: Optional digital power supply for IO
+ * @gpio: Optional reset_n gpio
*
* The @lock ensures that the chip is protected when certain operations are
* in progress. When the read or write packet transfer is in progress, most
@@ -133,6 +137,8 @@ struct ks8851_net {
struct eeprom_93cx6 eeprom;
struct regulator *vdd_reg;
+ struct regulator *vdd_io;
+ int gpio;
};
static int msg_enable;
@@ -1404,6 +1410,7 @@ static int ks8851_probe(struct spi_device *spi)
struct ks8851_net *ks;
int ret;
unsigned cider;
+ int gpio;
ndev = alloc_etherdev(sizeof(struct ks8851_net));
if (!ndev)
@@ -1417,20 +1424,53 @@ static int ks8851_probe(struct spi_device *spi)
ks->spidev = spi;
ks->tx_space = 6144;
- ks->vdd_reg = regulator_get_optional(&spi->dev, "vdd");
- if (IS_ERR(ks->vdd_reg)) {
- ret = PTR_ERR(ks->vdd_reg);
- if (ret == -EPROBE_DEFER)
- goto err_reg;
- } else {
- ret = regulator_enable(ks->vdd_reg);
+ gpio = of_get_named_gpio_flags(spi->dev.of_node, "reset-gpios",
+ 0, NULL);
+ if (gpio == -EPROBE_DEFER) {
+ ret = gpio;
+ goto err_gpio;
+ }
+
+ ks->gpio = gpio;
+ if (gpio_is_valid(gpio)) {
+ ret = devm_gpio_request_one(&spi->dev, gpio,
+ GPIOF_OUT_INIT_LOW, "ks8851_rst_n");
if (ret) {
- dev_err(&spi->dev, "regulator enable fail: %d\n",
- ret);
- goto err_reg_en;
+ dev_err(&spi->dev, "reset gpio request failed\n");
+ goto err_gpio;
}
}
+ ks->vdd_io = devm_regulator_get(&spi->dev, "vdd-io");
+ if (IS_ERR(ks->vdd_io)) {
+ ret = PTR_ERR(ks->vdd_io);
+ goto err_reg_io;
+ }
+
+ ret = regulator_enable(ks->vdd_io);
+ if (ret) {
+ dev_err(&spi->dev, "regulator vdd_io enable fail: %d\n",
+ ret);
+ goto err_reg_io;
+ }
+
+ ks->vdd_reg = devm_regulator_get(&spi->dev, "vdd");
+ if (IS_ERR(ks->vdd_reg)) {
+ ret = PTR_ERR(ks->vdd_reg);
+ goto err_reg;
+ }
+
+ ret = regulator_enable(ks->vdd_reg);
+ if (ret) {
+ dev_err(&spi->dev, "regulator vdd enable fail: %d\n",
+ ret);
+ goto err_reg;
+ }
+
+ if (gpio_is_valid(gpio)) {
+ usleep_range(10000, 11000);
+ gpio_set_value(gpio, 1);
+ }
mutex_init(&ks->lock);
spin_lock_init(&ks->statelock);
@@ -1471,7 +1511,7 @@ static int ks8851_probe(struct spi_device *spi)
skb_queue_head_init(&ks->txq);
- SET_ETHTOOL_OPS(ndev, &ks8851_ethtool_ops);
+ ndev->ethtool_ops = &ks8851_ethtool_ops;
SET_NETDEV_DEV(ndev, &spi->dev);
spi_set_drvdata(spi, ks);
@@ -1527,13 +1567,14 @@ err_netdev:
free_irq(ndev->irq, ks);
err_irq:
+ if (gpio_is_valid(gpio))
+ gpio_set_value(gpio, 0);
err_id:
- if (!IS_ERR(ks->vdd_reg))
- regulator_disable(ks->vdd_reg);
-err_reg_en:
- if (!IS_ERR(ks->vdd_reg))
- regulator_put(ks->vdd_reg);
+ regulator_disable(ks->vdd_reg);
err_reg:
+ regulator_disable(ks->vdd_io);
+err_reg_io:
+err_gpio:
free_netdev(ndev);
return ret;
}
@@ -1547,18 +1588,24 @@ static int ks8851_remove(struct spi_device *spi)
unregister_netdev(priv->netdev);
free_irq(spi->irq, priv);
- if (!IS_ERR(priv->vdd_reg)) {
- regulator_disable(priv->vdd_reg);
- regulator_put(priv->vdd_reg);
- }
+ if (gpio_is_valid(priv->gpio))
+ gpio_set_value(priv->gpio, 0);
+ regulator_disable(priv->vdd_reg);
+ regulator_disable(priv->vdd_io);
free_netdev(priv->netdev);
return 0;
}
+static const struct of_device_id ks8851_match_table[] = {
+ { .compatible = "micrel,ks8851" },
+ { }
+};
+
static struct spi_driver ks8851_driver = {
.driver = {
.name = "ks8851",
+ .of_match_table = ks8851_match_table,
.owner = THIS_MODULE,
.pm = &ks8851_pm_ops,
},
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index 14ac0e2..064a48d 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -4930,7 +4930,7 @@ static void netdev_tx_timeout(struct net_device *dev)
* Only reset the hardware if time between calls is long
* enough.
*/
- if (jiffies - last_reset <= dev->watchdog_timeo)
+ if (time_before_eq(jiffies, last_reset + dev->watchdog_timeo))
hw_priv = NULL;
}
@@ -7072,6 +7072,7 @@ static int pcidev_init(struct pci_dev *pdev, const struct pci_device_id *id)
dev = alloc_etherdev(sizeof(struct dev_priv));
if (!dev)
goto pcidev_init_reg_err;
+ SET_NETDEV_DEV(dev, &pdev->dev);
info->netdev[i] = dev;
priv = netdev_priv(dev);
@@ -7106,7 +7107,7 @@ static int pcidev_init(struct pci_dev *pdev, const struct pci_device_id *id)
}
dev->netdev_ops = &netdev_ops;
- SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
+ dev->ethtool_ops = &netdev_ethtool_ops;
if (register_netdev(dev))
goto pcidev_init_reg_err;
port_set_power_saving(port, true);
diff --git a/drivers/net/ethernet/microchip/enc28j60.c b/drivers/net/ethernet/microchip/enc28j60.c
index c7b40aa..b1b5f66 100644
--- a/drivers/net/ethernet/microchip/enc28j60.c
+++ b/drivers/net/ethernet/microchip/enc28j60.c
@@ -1593,7 +1593,7 @@ static int enc28j60_probe(struct spi_device *spi)
dev->irq = spi->irq;
dev->netdev_ops = &enc28j60_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
- SET_ETHTOOL_OPS(dev, &enc28j60_ethtool_ops);
+ dev->ethtool_ops = &enc28j60_ethtool_ops;
enc28j60_lowpower(priv, true);
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 130f6b2..f3d5d79 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -4112,7 +4112,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
setup_timer(&mgp->watchdog_timer, myri10ge_watchdog_timer,
(unsigned long)mgp);
- SET_ETHTOOL_OPS(netdev, &myri10ge_ethtool_ops);
+ netdev->ethtool_ops = &myri10ge_ethtool_ops;
INIT_WORK(&mgp->watchdog_work, myri10ge_watchdog);
status = register_netdev(netdev);
if (status != 0) {
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index 64ec2a43..291fba8 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -927,7 +927,7 @@ static int natsemi_probe1(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->netdev_ops = &natsemi_netdev_ops;
dev->watchdog_timeo = TX_TIMEOUT;
- SET_ETHTOOL_OPS(dev, &ethtool_ops);
+ dev->ethtool_ops = &ethtool_ops;
if (mtu)
dev->mtu = mtu;
diff --git a/drivers/net/ethernet/natsemi/ns83820.c b/drivers/net/ethernet/natsemi/ns83820.c
index dbccf1d..19bb824 100644
--- a/drivers/net/ethernet/natsemi/ns83820.c
+++ b/drivers/net/ethernet/natsemi/ns83820.c
@@ -2030,7 +2030,7 @@ static int ns83820_init_one(struct pci_dev *pci_dev,
pci_dev->subsystem_vendor, pci_dev->subsystem_device);
ndev->netdev_ops = &netdev_ops;
- SET_ETHTOOL_OPS(ndev, &ops);
+ ndev->ethtool_ops = &ops;
ndev->watchdog_timeo = 5 * HZ;
pci_set_drvdata(pci_dev, ndev);
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index a2844ff..be58764 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -534,15 +534,6 @@ static inline void s2io_start_all_tx_queue(struct s2io_nic *sp)
netif_tx_start_all_queues(sp->dev);
}
-static inline void s2io_start_tx_queue(struct s2io_nic *sp, int fifo_no)
-{
- if (!sp->config.multiq)
- sp->mac_control.fifos[fifo_no].queue_state =
- FIFO_QUEUE_START;
-
- netif_tx_start_all_queues(sp->dev);
-}
-
static inline void s2io_wake_all_tx_queue(struct s2io_nic *sp)
{
if (!sp->config.multiq) {
@@ -5369,8 +5360,8 @@ static int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
ethtool_cmd_speed_set(info, SPEED_10000);
info->duplex = DUPLEX_FULL;
} else {
- ethtool_cmd_speed_set(info, -1);
- info->duplex = -1;
+ ethtool_cmd_speed_set(info, SPEED_UNKNOWN);
+ info->duplex = DUPLEX_UNKNOWN;
}
info->autoneg = AUTONEG_DISABLE;
@@ -7919,7 +7910,7 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
/* Driver entry points */
dev->netdev_ops = &s2io_netdev_ops;
- SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
+ dev->ethtool_ops = &netdev_ethtool_ops;
dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
NETIF_F_TSO | NETIF_F_TSO6 |
NETIF_F_RXCSUM | NETIF_F_LRO;
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
index 089b713..2bbd01f 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
@@ -120,7 +120,6 @@ __vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
{
u64 val64;
u32 i = 0;
- enum vxge_hw_status ret = VXGE_HW_FAIL;
udelay(10);
@@ -139,7 +138,7 @@ __vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
mdelay(1);
} while (++i <= max_millis);
- return ret;
+ return VXGE_HW_FAIL;
}
static inline enum vxge_hw_status
@@ -1682,12 +1681,10 @@ enum vxge_hw_status vxge_hw_driver_stats_get(
struct __vxge_hw_device *hldev,
struct vxge_hw_device_stats_sw_info *sw_stats)
{
- enum vxge_hw_status status = VXGE_HW_OK;
-
memcpy(sw_stats, &hldev->stats.sw_dev_info_stats,
sizeof(struct vxge_hw_device_stats_sw_info));
- return status;
+ return VXGE_HW_OK;
}
/*
@@ -3228,7 +3225,6 @@ enum vxge_hw_status
vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask)
{
struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
- enum vxge_hw_status status = VXGE_HW_OK;
int i = 0, j = 0;
for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
@@ -3241,7 +3237,7 @@ vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask)
return VXGE_HW_FAIL;
}
}
- return status;
+ return VXGE_HW_OK;
}
/*
* vxge_hw_mgmt_reg_Write - Write Titan register.
@@ -3979,7 +3975,6 @@ __vxge_hw_vpath_mgmt_read(
{
u32 i, mtu = 0, max_pyld = 0;
u64 val64;
- enum vxge_hw_status status = VXGE_HW_OK;
for (i = 0; i < VXGE_HW_MAC_MAX_MAC_PORT_ID; i++) {
@@ -4009,7 +4004,7 @@ __vxge_hw_vpath_mgmt_read(
else
VXGE_HW_DEVICE_LINK_STATE_SET(vpath->hldev, VXGE_HW_LINK_DOWN);
- return status;
+ return VXGE_HW_OK;
}
/*
@@ -4039,14 +4034,13 @@ static enum vxge_hw_status
__vxge_hw_vpath_reset(struct __vxge_hw_device *hldev, u32 vp_id)
{
u64 val64;
- enum vxge_hw_status status = VXGE_HW_OK;
val64 = VXGE_HW_CMN_RSTHDLR_CFG0_SW_RESET_VPATH(1 << (16 - vp_id));
__vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
&hldev->common_reg->cmn_rsthdlr_cfg0);
- return status;
+ return VXGE_HW_OK;
}
/*
@@ -4227,7 +4221,6 @@ static enum vxge_hw_status
__vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id)
{
u64 val64;
- enum vxge_hw_status status = VXGE_HW_OK;
struct __vxge_hw_virtualpath *vpath;
struct vxge_hw_vp_config *vp_config;
struct vxge_hw_vpath_reg __iomem *vp_reg;
@@ -4283,7 +4276,7 @@ __vxge_hw_vpath_mac_configure(struct __vxge_hw_device *hldev, u32 vp_id)
writeq(val64, &vp_reg->rxmac_vcfg1);
}
- return status;
+ return VXGE_HW_OK;
}
/*
@@ -4295,7 +4288,6 @@ static enum vxge_hw_status
__vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
{
u64 val64;
- enum vxge_hw_status status = VXGE_HW_OK;
struct __vxge_hw_virtualpath *vpath;
struct vxge_hw_vpath_reg __iomem *vp_reg;
struct vxge_hw_vp_config *config;
@@ -4545,7 +4537,7 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
val64 |= VXGE_HW_TIM_WRKLD_CLC_CNT_RX_TX(3);
writeq(val64, &vp_reg->tim_wrkld_clc);
- return status;
+ return VXGE_HW_OK;
}
/*
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
index f8f0738..b07d552 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-ethtool.c
@@ -62,8 +62,8 @@ static int vxge_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
ethtool_cmd_speed_set(info, SPEED_10000);
info->duplex = DUPLEX_FULL;
} else {
- ethtool_cmd_speed_set(info, -1);
- info->duplex = -1;
+ ethtool_cmd_speed_set(info, SPEED_UNKNOWN);
+ info->duplex = DUPLEX_UNKNOWN;
}
info->autoneg = AUTONEG_DISABLE;
@@ -1128,5 +1128,5 @@ static const struct ethtool_ops vxge_ethtool_ops = {
void vxge_initialize_ethtool_ops(struct net_device *ndev)
{
- SET_ETHTOOL_OPS(ndev, &vxge_ethtool_ops);
+ ndev->ethtool_ops = &vxge_ethtool_ops;
}
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index d107bcb..7a0dead 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -2122,7 +2122,7 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
static void adaptive_coalesce_tx_interrupts(struct vxge_fifo *fifo)
{
fifo->interrupt_count++;
- if (jiffies > fifo->jiffies + HZ / 100) {
+ if (time_before(fifo->jiffies + HZ / 100, jiffies)) {
struct __vxge_hw_fifo *hw_fifo = fifo->handle;
fifo->jiffies = jiffies;
@@ -2150,7 +2150,7 @@ static void adaptive_coalesce_tx_interrupts(struct vxge_fifo *fifo)
static void adaptive_coalesce_rx_interrupts(struct vxge_ring *ring)
{
ring->interrupt_count++;
- if (jiffies > ring->jiffies + HZ / 100) {
+ if (time_before(ring->jiffies + HZ / 100, jiffies)) {
struct __vxge_hw_ring *hw_ring = ring->handle;
ring->jiffies = jiffies;
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index fddb464..9afc536 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -406,7 +406,7 @@ union ring_type {
#define NV_RX_DESCRIPTORVALID (1<<16)
#define NV_RX_MISSEDFRAME (1<<17)
-#define NV_RX_SUBSTRACT1 (1<<18)
+#define NV_RX_SUBTRACT1 (1<<18)
#define NV_RX_ERROR1 (1<<23)
#define NV_RX_ERROR2 (1<<24)
#define NV_RX_ERROR3 (1<<25)
@@ -423,7 +423,7 @@ union ring_type {
#define NV_RX2_CHECKSUM_IP_TCP (0x14000000)
#define NV_RX2_CHECKSUM_IP_UDP (0x18000000)
#define NV_RX2_DESCRIPTORVALID (1<<29)
-#define NV_RX2_SUBSTRACT1 (1<<25)
+#define NV_RX2_SUBTRACT1 (1<<25)
#define NV_RX2_ERROR1 (1<<18)
#define NV_RX2_ERROR2 (1<<19)
#define NV_RX2_ERROR3 (1<<20)
@@ -2832,7 +2832,7 @@ static int nv_rx_process(struct net_device *dev, int limit)
}
/* framing errors are soft errors */
else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) {
- if (flags & NV_RX_SUBSTRACT1)
+ if (flags & NV_RX_SUBTRACT1)
len--;
}
/* the rest are hard errors */
@@ -2863,7 +2863,7 @@ static int nv_rx_process(struct net_device *dev, int limit)
}
/* framing errors are soft errors */
else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
- if (flags & NV_RX2_SUBSTRACT1)
+ if (flags & NV_RX2_SUBTRACT1)
len--;
}
/* the rest are hard errors */
@@ -2937,7 +2937,7 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
}
/* framing errors are soft errors */
else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
- if (flags & NV_RX2_SUBSTRACT1)
+ if (flags & NV_RX2_SUBTRACT1)
len--;
}
/* the rest are hard errors */
@@ -4285,8 +4285,8 @@ static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
if (np->duplex)
ecmd->duplex = DUPLEX_FULL;
} else {
- speed = -1;
- ecmd->duplex = -1;
+ speed = SPEED_UNKNOWN;
+ ecmd->duplex = DUPLEX_UNKNOWN;
}
ethtool_cmd_speed_set(ecmd, speed);
ecmd->autoneg = np->autoneg;
@@ -5766,7 +5766,7 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
dev->netdev_ops = &nv_netdev_ops_optimized;
netif_napi_add(dev, &np->napi, nv_napi_poll, RX_WORK_PER_LOOP);
- SET_ETHTOOL_OPS(dev, &ops);
+ dev->ethtool_ops = &ops;
dev->watchdog_timeo = NV_WATCHDOG_TIMEO;
pci_set_drvdata(pci_dev, dev);
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 422d9b5..8706c0d 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -1361,7 +1361,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
__lpc_eth_clock_enable(pldat, true);
/* Map IO space */
- pldat->net_base = ioremap(res->start, res->end - res->start + 1);
+ pldat->net_base = ioremap(res->start, resource_size(res));
if (!pldat->net_base) {
dev_err(&pdev->dev, "failed to map registers\n");
ret = -ENOMEM;
@@ -1417,10 +1417,8 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
}
pldat->dma_buff_base_p = dma_handle;
- netdev_dbg(ndev, "IO address start :0x%08x\n",
- res->start);
- netdev_dbg(ndev, "IO address size :%d\n",
- res->end - res->start + 1);
+ netdev_dbg(ndev, "IO address space :%pR\n", res);
+ netdev_dbg(ndev, "IO address size :%d\n", resource_size(res));
netdev_dbg(ndev, "IO address (mapped) :0x%p\n",
pldat->net_base);
netdev_dbg(ndev, "IRQ number :%d\n", ndev->irq);
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
index a588ffd..44c8be1 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig
@@ -4,7 +4,7 @@
config PCH_GBE
tristate "OKI SEMICONDUCTOR IOH(ML7223/ML7831) GbE"
- depends on PCI && (X86 || COMPILE_TEST)
+ depends on PCI && (X86_32 || COMPILE_TEST)
select MII
select PTP_1588_CLOCK_PCH
---help---
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
index 826f0cc..4fe8ea9 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_ethtool.c
@@ -91,7 +91,7 @@ static int pch_gbe_get_settings(struct net_device *netdev,
ecmd->advertising &= ~(ADVERTISED_TP | ADVERTISED_1000baseT_Half);
if (!netif_carrier_ok(adapter->netdev))
- ethtool_cmd_speed_set(ecmd, -1);
+ ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
return ret;
}
@@ -508,5 +508,5 @@ static const struct ethtool_ops pch_gbe_ethtool_ops = {
void pch_gbe_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &pch_gbe_ethtool_ops);
+ netdev->ethtool_ops = &pch_gbe_ethtool_ops;
}
diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c
index b6bdeb3..9a997e4 100644
--- a/drivers/net/ethernet/packetengines/hamachi.c
+++ b/drivers/net/ethernet/packetengines/hamachi.c
@@ -724,10 +724,8 @@ static int hamachi_init_one(struct pci_dev *pdev,
/* The Hamachi-specific entries in the device structure. */
dev->netdev_ops = &hamachi_netdev_ops;
- if (chip_tbl[hmp->chip_id].flags & CanHaveMII)
- SET_ETHTOOL_OPS(dev, &ethtool_ops);
- else
- SET_ETHTOOL_OPS(dev, &ethtool_ops_no_mii);
+ dev->ethtool_ops = (chip_tbl[hmp->chip_id].flags & CanHaveMII) ?
+ &ethtool_ops : &ethtool_ops_no_mii;
dev->watchdog_timeo = TX_TIMEOUT;
if (mtu)
dev->mtu = mtu;
diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c
index 9a6cb48..69a8dc0 100644
--- a/drivers/net/ethernet/packetengines/yellowfin.c
+++ b/drivers/net/ethernet/packetengines/yellowfin.c
@@ -472,7 +472,7 @@ static int yellowfin_init_one(struct pci_dev *pdev,
/* The Yellowfin-specific entries in the device structure. */
dev->netdev_ops = &netdev_ops;
- SET_ETHTOOL_OPS(dev, &ethtool_ops);
+ dev->ethtool_ops = &ethtool_ops;
dev->watchdog_timeo = TX_TIMEOUT;
if (mtu)
diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig
index c14bd31..d49cba1 100644
--- a/drivers/net/ethernet/qlogic/Kconfig
+++ b/drivers/net/ethernet/qlogic/Kconfig
@@ -66,6 +66,17 @@ config QLCNIC_VXLAN
Say Y here if you want to enable hardware offload support for
Virtual eXtensible Local Area Network (VXLAN) in the driver.
+config QLCNIC_HWMON
+ bool "QLOGIC QLCNIC 82XX and 83XX family HWMON support"
+ depends on QLCNIC && HWMON && !(QLCNIC=y && HWMON=m)
+ default y
+ ---help---
+ This configuration parameter can be used to read the
+ board temperature in Converged Ethernet devices
+ supported by qlcnic.
+
+ This data is available via the hwmon sysfs interface.
+
config QLGE
tristate "QLogic QLGE 10Gb Ethernet Driver Support"
depends on PCI
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index f09c35d..5bf0581 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -1373,7 +1373,7 @@ netxen_setup_netdev(struct netxen_adapter *adapter,
netxen_nic_change_mtu(netdev, netdev->mtu);
- SET_ETHTOOL_OPS(netdev, &netxen_nic_ethtool_ops);
+ netdev->ethtool_ops = &netxen_nic_ethtool_ops;
netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
NETIF_F_RXCSUM;
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index 2eabd44..b5d6bc1 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -3838,7 +3838,7 @@ static int ql3xxx_probe(struct pci_dev *pdev,
/* Set driver entry points */
ndev->netdev_ops = &ql3xxx_netdev_ops;
- SET_ETHTOOL_OPS(ndev, &ql3xxx_ethtool_ops);
+ ndev->ethtool_ops = &ql3xxx_ethtool_ops;
ndev->watchdog_timeo = 5 * HZ;
netif_napi_add(ndev, &qdev->napi, ql_poll, 64);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index f785d01..be618b9 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -39,8 +39,8 @@
#define _QLCNIC_LINUX_MAJOR 5
#define _QLCNIC_LINUX_MINOR 3
-#define _QLCNIC_LINUX_SUBVERSION 57
-#define QLCNIC_LINUX_VERSIONID "5.3.57"
+#define _QLCNIC_LINUX_SUBVERSION 60
+#define QLCNIC_LINUX_VERSIONID "5.3.60"
#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -441,6 +441,8 @@ struct qlcnic_82xx_dump_template_hdr {
u32 rsvd1[0];
};
+#define QLC_PEX_DMA_READ_SIZE (PAGE_SIZE * 16)
+
struct qlcnic_fw_dump {
u8 clr; /* flag to indicate if dump is cleared */
bool enable; /* enable/disable dump */
@@ -537,6 +539,7 @@ struct qlcnic_hardware_context {
u8 phys_port_id[ETH_ALEN];
u8 lb_mode;
u16 vxlan_port;
+ struct device *hwmon_dev;
};
struct qlcnic_adapter_stats {
@@ -1018,6 +1021,8 @@ struct qlcnic_ipaddr {
#define QLCNIC_DEL_VXLAN_PORT 0x200000
#endif
+#define QLCNIC_VLAN_FILTERING 0x800000
+
#define QLCNIC_IS_MSI_FAMILY(adapter) \
((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
#define QLCNIC_IS_TSO_CAPABLE(adapter) \
@@ -1316,6 +1321,7 @@ struct qlcnic_eswitch {
#define QL_STATUS_INVALID_PARAM -1
#define MAX_BW 100 /* % of link speed */
+#define MIN_BW 1 /* % of link speed */
#define MAX_VLAN_ID 4095
#define MIN_VLAN_ID 2
#define DEFAULT_MAC_LEARN 1
@@ -1692,7 +1698,7 @@ int qlcnic_read_mac_addr(struct qlcnic_adapter *);
int qlcnic_setup_netdev(struct qlcnic_adapter *, struct net_device *, int);
void qlcnic_set_netdev_features(struct qlcnic_adapter *,
struct qlcnic_esw_func_cfg *);
-void qlcnic_sriov_vf_schedule_multi(struct net_device *);
+void qlcnic_sriov_vf_set_multi(struct net_device *);
int qlcnic_is_valid_nic_func(struct qlcnic_adapter *, u8);
int qlcnic_get_pci_func_type(struct qlcnic_adapter *, u16, u16 *, u16 *,
u16 *);
@@ -2338,6 +2344,16 @@ static inline bool qlcnic_83xx_vf_check(struct qlcnic_adapter *adapter)
return (device == PCI_DEVICE_ID_QLOGIC_VF_QLE834X) ? true : false;
}
+static inline bool qlcnic_sriov_check(struct qlcnic_adapter *adapter)
+{
+ bool status;
+
+ status = (qlcnic_sriov_pf_check(adapter) ||
+ qlcnic_sriov_vf_check(adapter)) ? true : false;
+
+ return status;
+}
+
static inline u32 qlcnic_get_vnic_func_count(struct qlcnic_adapter *adapter)
{
if (qlcnic_84xx_check(adapter))
@@ -2345,4 +2361,18 @@ static inline u32 qlcnic_get_vnic_func_count(struct qlcnic_adapter *adapter)
else
return QLC_DEFAULT_VNIC_COUNT;
}
+
+#ifdef CONFIG_QLCNIC_HWMON
+void qlcnic_register_hwmon_dev(struct qlcnic_adapter *);
+void qlcnic_unregister_hwmon_dev(struct qlcnic_adapter *);
+#else
+static inline void qlcnic_register_hwmon_dev(struct qlcnic_adapter *adapter)
+{
+ return;
+}
+static inline void qlcnic_unregister_hwmon_dev(struct qlcnic_adapter *adapter)
+{
+ return;
+}
+#endif
#endif /* __QLCNIC_H_ */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index b7cffb4..a4a4ec0 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -33,6 +33,7 @@ static void qlcnic_83xx_get_beacon_state(struct qlcnic_adapter *);
#define RSS_HASHTYPE_IP_TCP 0x3
#define QLC_83XX_FW_MBX_CMD 0
#define QLC_SKIP_INACTIVE_PCI_REGS 7
+#define QLC_MAX_LEGACY_FUNC_SUPP 8
static const struct qlcnic_mailbox_metadata qlcnic_83xx_mbx_tbl[] = {
{QLCNIC_CMD_CONFIGURE_IP_ADDR, 6, 1},
@@ -357,8 +358,15 @@ int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter)
if (!ahw->intr_tbl)
return -ENOMEM;
- if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
+ if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ if (adapter->ahw->pci_func >= QLC_MAX_LEGACY_FUNC_SUPP) {
+ dev_err(&adapter->pdev->dev, "PCI function number 8 and higher are not supported with legacy interrupt, func 0x%x\n",
+ ahw->pci_func);
+ return -EOPNOTSUPP;
+ }
+
qlcnic_83xx_enable_legacy(adapter);
+ }
for (i = 0; i < num_msix; i++) {
if (adapter->flags & QLCNIC_MSIX_ENABLED)
@@ -879,6 +887,9 @@ int qlcnic_83xx_alloc_mbx_args(struct qlcnic_cmd_args *mbx,
return 0;
}
}
+
+ dev_err(&adapter->pdev->dev, "%s: Invalid mailbox command opcode 0x%x\n",
+ __func__, type);
return -EINVAL;
}
@@ -3026,19 +3037,18 @@ void qlcnic_83xx_unlock_driver(struct qlcnic_adapter *adapter)
QLCRDX(adapter->ahw, QLC_83XX_DRV_UNLOCK);
}
-int qlcnic_83xx_ms_mem_write128(struct qlcnic_adapter *adapter, u64 addr,
+int qlcnic_ms_mem_write128(struct qlcnic_adapter *adapter, u64 addr,
u32 *data, u32 count)
{
int i, j, ret = 0;
u32 temp;
- int err = 0;
/* Check alignment */
if (addr & 0xF)
return -EIO;
mutex_lock(&adapter->ahw->mem_lock);
- qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_ADDR_HI, 0);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_HI, 0);
for (i = 0; i < count; i++, addr += 16) {
if (!((ADDR_IN_RANGE(addr, QLCNIC_ADDR_QDR_NET,
@@ -3049,26 +3059,16 @@ int qlcnic_83xx_ms_mem_write128(struct qlcnic_adapter *adapter, u64 addr,
return -EIO;
}
- qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_ADDR_LO, addr);
- qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_WRTDATA_LO,
- *data++);
- qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_WRTDATA_HI,
- *data++);
- qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_WRTDATA_ULO,
- *data++);
- qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_WRTDATA_UHI,
- *data++);
- qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_CTRL,
- QLCNIC_TA_WRITE_ENABLE);
- qlcnic_83xx_wrt_reg_indirect(adapter, QLCNIC_MS_CTRL,
- QLCNIC_TA_WRITE_START);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_ADDR_LO, addr);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_LO, *data++);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_HI, *data++);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_ULO, *data++);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_WRTDATA_UHI, *data++);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_CTRL, QLCNIC_TA_WRITE_ENABLE);
+ qlcnic_ind_wr(adapter, QLCNIC_MS_CTRL, QLCNIC_TA_WRITE_START);
for (j = 0; j < MAX_CTL_CHECK; j++) {
- temp = QLCRD32(adapter, QLCNIC_MS_CTRL, &err);
- if (err == -EIO) {
- mutex_unlock(&adapter->ahw->mem_lock);
- return err;
- }
+ temp = qlcnic_ind_rd(adapter, QLCNIC_MS_CTRL);
if ((temp & TA_CTL_BUSY) == 0)
break;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index 88d809c..2bf101a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -418,7 +418,6 @@ enum qlcnic_83xx_states {
#define QLC_83XX_GET_FUNC_MODE_FROM_NPAR_INFO(val) (val & 0x80000000)
#define QLC_83XX_GET_LRO_CAPABILITY(val) (val & 0x20)
#define QLC_83XX_GET_LSO_CAPABILITY(val) (val & 0x40)
-#define QLC_83XX_GET_LSO_CAPABILITY(val) (val & 0x40)
#define QLC_83XX_GET_HW_LRO_CAPABILITY(val) (val & 0x400)
#define QLC_83XX_GET_VLAN_ALIGN_CAPABILITY(val) (val & 0x4000)
#define QLC_83XX_GET_FW_LRO_MSS_CAPABILITY(val) (val & 0x20000)
@@ -560,7 +559,7 @@ void qlcnic_83xx_napi_del(struct qlcnic_adapter *);
void qlcnic_83xx_napi_enable(struct qlcnic_adapter *);
void qlcnic_83xx_napi_disable(struct qlcnic_adapter *);
int qlcnic_83xx_config_led(struct qlcnic_adapter *, u32, u32);
-void qlcnic_ind_wr(struct qlcnic_adapter *, u32, u32);
+int qlcnic_ind_wr(struct qlcnic_adapter *, u32, u32);
int qlcnic_ind_rd(struct qlcnic_adapter *, u32);
int qlcnic_83xx_create_rx_ctx(struct qlcnic_adapter *);
int qlcnic_83xx_create_tx_ctx(struct qlcnic_adapter *,
@@ -617,7 +616,6 @@ void qlcnic_83xx_idc_request_reset(struct qlcnic_adapter *, u32);
int qlcnic_83xx_lock_driver(struct qlcnic_adapter *);
void qlcnic_83xx_unlock_driver(struct qlcnic_adapter *);
int qlcnic_83xx_set_default_offload_settings(struct qlcnic_adapter *);
-int qlcnic_83xx_ms_mem_write128(struct qlcnic_adapter *, u64, u32 *, u32);
int qlcnic_83xx_idc_vnic_pf_entry(struct qlcnic_adapter *);
int qlcnic_83xx_disable_vnic_mode(struct qlcnic_adapter *, int);
int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *);
@@ -659,4 +657,5 @@ void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *);
u32 qlcnic_83xx_get_cap_size(void *, int);
void qlcnic_83xx_set_sys_info(void *, int, u32);
void qlcnic_83xx_store_cap_mask(void *, u32);
+int qlcnic_ms_mem_write128(struct qlcnic_adapter *, u64, u32 *, u32);
#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index ba20c72..f33559b 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -1363,8 +1363,8 @@ static int qlcnic_83xx_copy_bootloader(struct qlcnic_adapter *adapter)
return ret;
}
/* 16 byte write to MS memory */
- ret = qlcnic_83xx_ms_mem_write128(adapter, dest, (u32 *)p_cache,
- size / 16);
+ ret = qlcnic_ms_mem_write128(adapter, dest, (u32 *)p_cache,
+ size / 16);
if (ret) {
vfree(p_cache);
return ret;
@@ -1389,8 +1389,8 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
p_cache = (u32 *)fw->data;
addr = (u64)dest;
- ret = qlcnic_83xx_ms_mem_write128(adapter, addr,
- p_cache, size / 16);
+ ret = qlcnic_ms_mem_write128(adapter, addr,
+ p_cache, size / 16);
if (ret) {
dev_err(&adapter->pdev->dev, "MS memory write failed\n");
release_firmware(fw);
@@ -1405,8 +1405,8 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
data[i] = fw->data[size + i];
for (; i < 16; i++)
data[i] = 0;
- ret = qlcnic_83xx_ms_mem_write128(adapter, addr,
- (u32 *)data, 1);
+ ret = qlcnic_ms_mem_write128(adapter, addr,
+ (u32 *)data, 1);
if (ret) {
dev_err(&adapter->pdev->dev,
"MS memory write failed\n");
@@ -2181,6 +2181,8 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
max_sds_rings = QLCNIC_MAX_SDS_RINGS;
max_tx_rings = QLCNIC_MAX_TX_RINGS;
} else {
+ dev_err(&adapter->pdev->dev, "%s: Invalid opmode %d\n",
+ __func__, ret);
return -EIO;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index c1e11f5..304e247 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -1027,8 +1027,11 @@ int qlcnic_config_port_mirroring(struct qlcnic_adapter *adapter, u8 id,
u32 arg1;
if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC ||
- !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE))
+ !(adapter->eswitch[id].flags & QLCNIC_SWITCH_ENABLE)) {
+ dev_err(&adapter->pdev->dev, "%s: Not a management function\n",
+ __func__);
return err;
+ }
arg1 = id | (enable_mirroring ? BIT_4 : 0);
arg1 |= pci_func << 8;
@@ -1318,8 +1321,12 @@ int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
u32 arg1, arg2 = 0;
u8 pci_func;
- if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC)
+ if (adapter->ahw->op_mode != QLCNIC_MGMT_FUNC) {
+ dev_err(&adapter->pdev->dev, "%s: Not a management function\n",
+ __func__);
return err;
+ }
+
pci_func = esw_cfg->pci_func;
index = qlcnic_is_valid_nic_func(adapter, pci_func);
if (index < 0)
@@ -1363,6 +1370,8 @@ int qlcnic_config_switch_port(struct qlcnic_adapter *adapter,
arg1 &= ~(0x0ffff << 16);
break;
default:
+ dev_err(&adapter->pdev->dev, "%s: Invalid opmode 0x%x\n",
+ __func__, esw_cfg->op_mode);
return err;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 5bacf52..1b7f3db 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -726,6 +726,11 @@ static int qlcnic_set_channels(struct net_device *dev,
struct qlcnic_adapter *adapter = netdev_priv(dev);
int err;
+ if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
+ netdev_err(dev, "No RSS/TSS support in non MSI-X mode\n");
+ return -EINVAL;
+ }
+
if (channel->other_count || channel->combined_count)
return -EINVAL;
@@ -734,7 +739,7 @@ static int qlcnic_set_channels(struct net_device *dev,
if (err)
return err;
- if (channel->rx_count) {
+ if (adapter->drv_sds_rings != channel->rx_count) {
err = qlcnic_validate_rings(adapter, channel->rx_count,
QLCNIC_RX_QUEUE);
if (err) {
@@ -745,7 +750,7 @@ static int qlcnic_set_channels(struct net_device *dev,
adapter->drv_rss_rings = channel->rx_count;
}
- if (channel->tx_count) {
+ if (adapter->drv_tx_rings != channel->tx_count) {
err = qlcnic_validate_rings(adapter, channel->tx_count,
QLCNIC_TX_QUEUE);
if (err) {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 9f3adf4..851cb4a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -373,12 +373,16 @@ int qlcnic_ind_rd(struct qlcnic_adapter *adapter, u32 addr)
return data;
}
-void qlcnic_ind_wr(struct qlcnic_adapter *adapter, u32 addr, u32 data)
+int qlcnic_ind_wr(struct qlcnic_adapter *adapter, u32 addr, u32 data)
{
+ int ret = 0;
+
if (qlcnic_82xx_check(adapter))
qlcnic_write_window_reg(addr, adapter->ahw->pci_base0, data);
else
- qlcnic_83xx_wrt_reg_indirect(adapter, addr, data);
+ ret = qlcnic_83xx_wrt_reg_indirect(adapter, addr, data);
+
+ return ret;
}
static int
@@ -567,28 +571,14 @@ static void __qlcnic_set_multi(struct net_device *netdev, u16 vlan)
void qlcnic_set_multi(struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
- struct qlcnic_mac_vlan_list *cur;
- struct netdev_hw_addr *ha;
- size_t temp;
if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
return;
- if (qlcnic_sriov_vf_check(adapter)) {
- if (!netdev_mc_empty(netdev)) {
- netdev_for_each_mc_addr(ha, netdev) {
- temp = sizeof(struct qlcnic_mac_vlan_list);
- cur = kzalloc(temp, GFP_ATOMIC);
- if (cur == NULL)
- break;
- memcpy(cur->mac_addr,
- ha->addr, ETH_ALEN);
- list_add_tail(&cur->list, &adapter->vf_mc_list);
- }
- }
- qlcnic_sriov_vf_schedule_multi(adapter->netdev);
- return;
- }
- __qlcnic_set_multi(netdev, 0);
+
+ if (qlcnic_sriov_vf_check(adapter))
+ qlcnic_sriov_vf_set_multi(netdev);
+ else
+ __qlcnic_set_multi(netdev, 0);
}
int qlcnic_82xx_nic_set_promisc(struct qlcnic_adapter *adapter, u32 mode)
@@ -630,7 +620,7 @@ void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
struct hlist_node *n;
struct hlist_head *head;
int i;
- unsigned long time;
+ unsigned long expires;
u8 cmd;
for (i = 0; i < adapter->fhash.fbucket_size; i++) {
@@ -638,8 +628,8 @@ void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
hlist_for_each_entry_safe(tmp_fil, n, head, fnode) {
cmd = tmp_fil->vlan_id ? QLCNIC_MAC_VLAN_DEL :
QLCNIC_MAC_DEL;
- time = tmp_fil->ftime;
- if (jiffies > (QLCNIC_FILTER_AGE * HZ + time)) {
+ expires = tmp_fil->ftime + QLCNIC_FILTER_AGE * HZ;
+ if (time_before(expires, jiffies)) {
qlcnic_sre_macaddr_change(adapter,
tmp_fil->faddr,
tmp_fil->vlan_id,
@@ -657,8 +647,8 @@ void qlcnic_prune_lb_filters(struct qlcnic_adapter *adapter)
hlist_for_each_entry_safe(tmp_fil, n, head, fnode)
{
- time = tmp_fil->ftime;
- if (jiffies > (QLCNIC_FILTER_AGE * HZ + time)) {
+ expires = tmp_fil->ftime + QLCNIC_FILTER_AGE * HZ;
+ if (time_before(expires, jiffies)) {
spin_lock_bh(&adapter->rx_mac_learn_lock);
adapter->rx_fhash.fnum--;
hlist_del(&tmp_fil->fnode);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 173b3d1..e45bf09 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -305,7 +305,6 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
{
struct vlan_ethhdr *vh = (struct vlan_ethhdr *)(skb->data);
struct ethhdr *phdr = (struct ethhdr *)(skb->data);
- struct net_device *netdev = adapter->netdev;
u16 protocol = ntohs(skb->protocol);
struct qlcnic_filter *fil, *tmp_fil;
struct hlist_head *head;
@@ -314,27 +313,16 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
u16 vlan_id = 0;
u8 hindex, hval;
- if (!qlcnic_sriov_pf_check(adapter)) {
- if (ether_addr_equal(phdr->h_source, adapter->mac_addr))
- return;
- } else {
+ if (ether_addr_equal(phdr->h_source, adapter->mac_addr))
+ return;
+
+ if (adapter->flags & QLCNIC_VLAN_FILTERING) {
if (protocol == ETH_P_8021Q) {
vh = (struct vlan_ethhdr *)skb->data;
vlan_id = ntohs(vh->h_vlan_TCI);
} else if (vlan_tx_tag_present(skb)) {
vlan_id = vlan_tx_tag_get(skb);
}
-
- if (ether_addr_equal(phdr->h_source, adapter->mac_addr) &&
- !vlan_id)
- return;
- }
-
- if (adapter->fhash.fnum >= adapter->fhash.fmax) {
- adapter->stats.mac_filter_limit_overrun++;
- netdev_info(netdev, "Can not add more than %d mac-vlan filters, configured %d\n",
- adapter->fhash.fmax, adapter->fhash.fnum);
- return;
}
memcpy(&src_addr, phdr->h_source, ETH_ALEN);
@@ -353,6 +341,11 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
}
}
+ if (unlikely(adapter->fhash.fnum >= adapter->fhash.fmax)) {
+ adapter->stats.mac_filter_limit_overrun++;
+ return;
+ }
+
fil = kzalloc(sizeof(struct qlcnic_filter), GFP_ATOMIC);
if (!fil)
return;
@@ -1216,8 +1209,7 @@ qlcnic_process_rcv(struct qlcnic_adapter *adapter,
if (!skb)
return buffer;
- if (adapter->drv_mac_learn &&
- (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
+ if (adapter->rx_mac_learn) {
t_vid = 0;
is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0);
qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
@@ -1293,8 +1285,7 @@ qlcnic_process_lro(struct qlcnic_adapter *adapter,
if (!skb)
return buffer;
- if (adapter->drv_mac_learn &&
- (adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
+ if (adapter->rx_mac_learn) {
t_vid = 0;
is_lb_pkt = qlcnic_82xx_is_lb_pkt(sts_data0);
qlcnic_add_lb_filter(adapter, skb, is_lb_pkt, t_vid);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 7e55e88..4fc1867 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -378,7 +378,8 @@ static int qlcnic_fdb_del(struct ndmsg *ndm, struct nlattr *tb[],
if (!adapter->fdb_mac_learn)
return ndo_dflt_fdb_del(ndm, tb, netdev, addr);
- if (adapter->flags & QLCNIC_ESWITCH_ENABLED) {
+ if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
+ qlcnic_sriov_check(adapter)) {
if (is_unicast_ether_addr(addr)) {
err = dev_uc_del(netdev, addr);
if (!err)
@@ -402,7 +403,8 @@ static int qlcnic_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
if (!adapter->fdb_mac_learn)
return ndo_dflt_fdb_add(ndm, tb, netdev, addr, flags);
- if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED)) {
+ if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED) &&
+ !qlcnic_sriov_check(adapter)) {
pr_info("%s: FDB e-switch is not enabled\n", __func__);
return -EOPNOTSUPP;
}
@@ -432,7 +434,8 @@ static int qlcnic_fdb_dump(struct sk_buff *skb, struct netlink_callback *ncb,
if (!adapter->fdb_mac_learn)
return ndo_dflt_fdb_dump(skb, ncb, netdev, idx);
- if (adapter->flags & QLCNIC_ESWITCH_ENABLED)
+ if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
+ qlcnic_sriov_check(adapter))
idx = ndo_dflt_fdb_dump(skb, ncb, netdev, idx);
return idx;
@@ -522,7 +525,7 @@ static const struct net_device_ops qlcnic_netdev_ops = {
#endif
#ifdef CONFIG_QLCNIC_SRIOV
.ndo_set_vf_mac = qlcnic_sriov_set_vf_mac,
- .ndo_set_vf_tx_rate = qlcnic_sriov_set_vf_tx_rate,
+ .ndo_set_vf_rate = qlcnic_sriov_set_vf_tx_rate,
.ndo_get_vf_config = qlcnic_sriov_get_vf_config,
.ndo_set_vf_vlan = qlcnic_sriov_set_vf_vlan,
.ndo_set_vf_spoofchk = qlcnic_sriov_set_vf_spoofchk,
@@ -690,10 +693,10 @@ int qlcnic_setup_tss_rss_intr(struct qlcnic_adapter *adapter)
adapter->msix_entries[vector].entry = vector;
restore:
- err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
- if (err > 0) {
+ err = pci_enable_msix_exact(pdev, adapter->msix_entries, num_msix);
+ if (err == -ENOSPC) {
if (!adapter->drv_tss_rings && !adapter->drv_rss_rings)
- return -ENOSPC;
+ return err;
netdev_info(adapter->netdev,
"Unable to allocate %d MSI-X vectors, Available vectors %d\n",
@@ -1014,6 +1017,8 @@ int qlcnic_init_pci_info(struct qlcnic_adapter *adapter)
if (pfn >= ahw->max_vnic_func) {
ret = QL_STATUS_INVALID_PARAM;
+ dev_err(&adapter->pdev->dev, "%s: Invalid function 0x%x, max 0x%x\n",
+ __func__, pfn, ahw->max_vnic_func);
goto err_eswitch;
}
@@ -1915,8 +1920,6 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
if (!test_and_clear_bit(__QLCNIC_DEV_UP, &adapter->state))
return;
- if (qlcnic_sriov_vf_check(adapter))
- qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc);
smp_mb();
netif_carrier_off(netdev);
adapter->ahw->linkup = 0;
@@ -1928,6 +1931,8 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
qlcnic_delete_lb_filters(adapter);
qlcnic_nic_set_promisc(adapter, QLCNIC_NIU_NON_PROMISC_MODE);
+ if (qlcnic_sriov_vf_check(adapter))
+ qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc);
qlcnic_napi_disable(adapter);
@@ -2052,6 +2057,7 @@ out:
static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter)
{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
int err = 0;
adapter->recv_ctx = kzalloc(sizeof(struct qlcnic_recv_context),
@@ -2061,6 +2067,18 @@ static int qlcnic_alloc_adapter_resources(struct qlcnic_adapter *adapter)
goto err_out;
}
+ if (qlcnic_83xx_check(adapter)) {
+ ahw->coal.type = QLCNIC_INTR_COAL_TYPE_RX_TX;
+ ahw->coal.tx_time_us = QLCNIC_DEF_INTR_COALESCE_TX_TIME_US;
+ ahw->coal.tx_packets = QLCNIC_DEF_INTR_COALESCE_TX_PACKETS;
+ ahw->coal.rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
+ ahw->coal.rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
+ } else {
+ ahw->coal.type = QLCNIC_INTR_COAL_TYPE_RX;
+ ahw->coal.rx_time_us = QLCNIC_DEF_INTR_COALESCE_RX_TIME_US;
+ ahw->coal.rx_packets = QLCNIC_DEF_INTR_COALESCE_RX_PACKETS;
+ }
+
/* clear stats */
memset(&adapter->stats, 0, sizeof(adapter->stats));
err_out:
@@ -2069,12 +2087,20 @@ err_out:
static void qlcnic_free_adapter_resources(struct qlcnic_adapter *adapter)
{
+ struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
+
kfree(adapter->recv_ctx);
adapter->recv_ctx = NULL;
- if (adapter->ahw->fw_dump.tmpl_hdr) {
- vfree(adapter->ahw->fw_dump.tmpl_hdr);
- adapter->ahw->fw_dump.tmpl_hdr = NULL;
+ if (fw_dump->tmpl_hdr) {
+ vfree(fw_dump->tmpl_hdr);
+ fw_dump->tmpl_hdr = NULL;
+ }
+
+ if (fw_dump->dma_buffer) {
+ dma_free_coherent(&adapter->pdev->dev, QLC_PEX_DMA_READ_SIZE,
+ fw_dump->dma_buffer, fw_dump->phys_addr);
+ fw_dump->dma_buffer = NULL;
}
kfree(adapter->ahw->reset.buff);
@@ -2247,10 +2273,8 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
qlcnic_change_mtu(netdev, netdev->mtu);
- if (qlcnic_sriov_vf_check(adapter))
- SET_ETHTOOL_OPS(netdev, &qlcnic_sriov_vf_ethtool_ops);
- else
- SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_ops);
+ netdev->ethtool_ops = (qlcnic_sriov_vf_check(adapter)) ?
+ &qlcnic_sriov_vf_ethtool_ops : &qlcnic_ethtool_ops;
netdev->features |= (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
NETIF_F_IPV6_CSUM | NETIF_F_GRO |
@@ -2417,9 +2441,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
int err, pci_using_dac = -1;
char board_name[QLCNIC_MAX_BOARD_NAME_LEN + 19]; /* MAC + ": " + name */
- if (pdev->is_virtfn)
- return -ENODEV;
-
err = pci_enable_device(pdev);
if (err)
return err;
@@ -2552,9 +2573,11 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
case -ENOMEM:
dev_err(&pdev->dev, "Adapter initialization failed. Please reboot\n");
goto err_out_free_hw;
+ case -EOPNOTSUPP:
+ dev_err(&pdev->dev, "Adapter initialization failed\n");
+ goto err_out_free_hw;
default:
- dev_err(&pdev->dev, "Adapter initialization failed. A reboot may be required to recover from this failure\n");
- dev_err(&pdev->dev, "If reboot does not help to recover from this failure, try a flash update of the adapter\n");
+ dev_err(&pdev->dev, "Adapter initialization failed. Driver will load in maintenance mode to recover the adapter using the application\n");
goto err_out_maintenance_mode;
}
}
@@ -2628,7 +2651,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
qlcnic_alloc_lb_filters_mem(adapter);
qlcnic_add_sysfs(adapter);
-
+ qlcnic_register_hwmon_dev(adapter);
return 0;
err_out_disable_mbx_intr:
@@ -2665,7 +2688,7 @@ err_out_disable_pdev:
err_out_maintenance_mode:
set_bit(__QLCNIC_MAINTENANCE_MODE, &adapter->state);
netdev->netdev_ops = &qlcnic_netdev_failed_ops;
- SET_ETHTOOL_OPS(netdev, &qlcnic_ethtool_failed_ops);
+ netdev->ethtool_ops = &qlcnic_ethtool_failed_ops;
ahw->port_type = QLCNIC_XGBE;
if (qlcnic_83xx_check(adapter))
@@ -2698,9 +2721,9 @@ static void qlcnic_remove(struct pci_dev *pdev)
return;
netdev = adapter->netdev;
- qlcnic_sriov_pf_disable(adapter);
qlcnic_cancel_idc_work(adapter);
+ qlcnic_sriov_pf_disable(adapter);
ahw = adapter->ahw;
unregister_netdev(netdev);
@@ -2735,6 +2758,8 @@ static void qlcnic_remove(struct pci_dev *pdev)
qlcnic_remove_sysfs(adapter);
+ qlcnic_unregister_hwmon_dev(adapter);
+
qlcnic_cleanup_pci_map(adapter->ahw);
qlcnic_release_firmware(adapter);
@@ -2828,6 +2853,8 @@ static int qlcnic_close(struct net_device *netdev)
return 0;
}
+#define QLCNIC_VF_LB_BUCKET_SIZE 1
+
void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
{
void *head;
@@ -2843,7 +2870,10 @@ void qlcnic_alloc_lb_filters_mem(struct qlcnic_adapter *adapter)
spin_lock_init(&adapter->mac_learn_lock);
spin_lock_init(&adapter->rx_mac_learn_lock);
- if (qlcnic_82xx_check(adapter)) {
+ if (qlcnic_sriov_vf_check(adapter)) {
+ filter_size = QLCNIC_83XX_SRIOV_VF_MAX_MAC - 1;
+ adapter->fhash.fbucket_size = QLCNIC_VF_LB_BUCKET_SIZE;
+ } else if (qlcnic_82xx_check(adapter)) {
filter_size = QLCNIC_LB_MAX_FILTERS;
adapter->fhash.fbucket_size = QLCNIC_LB_BUCKET_SIZE;
} else {
@@ -3973,16 +4003,6 @@ int qlcnic_validate_rings(struct qlcnic_adapter *adapter, __u32 ring_cnt,
strcpy(buf, "Tx");
}
- if (!QLCNIC_IS_MSI_FAMILY(adapter)) {
- netdev_err(netdev, "No RSS/TSS support in INT-x mode\n");
- return -EINVAL;
- }
-
- if (adapter->flags & QLCNIC_MSI_ENABLED) {
- netdev_err(netdev, "No RSS/TSS support in MSI mode\n");
- return -EINVAL;
- }
-
if (!is_power_of_2(ring_cnt)) {
netdev_err(netdev, "%s rings value should be a power of 2\n",
buf);
@@ -4122,7 +4142,7 @@ void qlcnic_restore_indev_addr(struct net_device *netdev, unsigned long event)
rcu_read_lock();
for_each_set_bit(vid, adapter->vlans, VLAN_N_VID) {
- dev = __vlan_find_dev_deep(netdev, htons(ETH_P_8021Q), vid);
+ dev = __vlan_find_dev_deep_rcu(netdev, htons(ETH_P_8021Q), vid);
if (!dev)
continue;
qlcnic_config_indev_addr(adapter, dev, event);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
index 37b979b..e46fc39 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -238,6 +238,8 @@ void qlcnic_82xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
hdr->drv_cap_mask = hdr->cap_mask;
fw_dump->cap_mask = hdr->cap_mask;
+
+ fw_dump->use_pex_dma = (hdr->capabilities & BIT_0) ? true : false;
}
inline u32 qlcnic_82xx_get_cap_size(void *t_hdr, int index)
@@ -276,6 +278,8 @@ inline void qlcnic_83xx_set_saved_state(void *t_hdr, u32 index,
hdr->saved_state[index] = value;
}
+#define QLCNIC_TEMPLATE_VERSION (0x20001)
+
void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
{
struct qlcnic_83xx_dump_template_hdr *hdr;
@@ -288,6 +292,9 @@ void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
hdr->drv_cap_mask = hdr->cap_mask;
fw_dump->cap_mask = hdr->cap_mask;
+
+ fw_dump->use_pex_dma = (fw_dump->version & 0xfffff) >=
+ QLCNIC_TEMPLATE_VERSION;
}
inline u32 qlcnic_83xx_get_cap_size(void *t_hdr, int index)
@@ -653,34 +660,31 @@ out:
#define QLC_DMA_CMD_BUFF_ADDR_HI 4
#define QLC_DMA_CMD_STATUS_CTRL 8
-#define QLC_PEX_DMA_READ_SIZE (PAGE_SIZE * 16)
-
static int qlcnic_start_pex_dma(struct qlcnic_adapter *adapter,
struct __mem *mem)
{
- struct qlcnic_83xx_dump_template_hdr *tmpl_hdr;
struct device *dev = &adapter->pdev->dev;
u32 dma_no, dma_base_addr, temp_addr;
int i, ret, dma_sts;
+ void *tmpl_hdr;
tmpl_hdr = adapter->ahw->fw_dump.tmpl_hdr;
- dma_no = tmpl_hdr->saved_state[QLC_83XX_DMA_ENGINE_INDEX];
+ dma_no = qlcnic_get_saved_state(adapter, tmpl_hdr,
+ QLC_83XX_DMA_ENGINE_INDEX);
dma_base_addr = QLC_DMA_REG_BASE_ADDR(dma_no);
temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_LOW;
- ret = qlcnic_83xx_wrt_reg_indirect(adapter, temp_addr,
- mem->desc_card_addr);
+ ret = qlcnic_ind_wr(adapter, temp_addr, mem->desc_card_addr);
if (ret)
return ret;
temp_addr = dma_base_addr + QLC_DMA_CMD_BUFF_ADDR_HI;
- ret = qlcnic_83xx_wrt_reg_indirect(adapter, temp_addr, 0);
+ ret = qlcnic_ind_wr(adapter, temp_addr, 0);
if (ret)
return ret;
temp_addr = dma_base_addr + QLC_DMA_CMD_STATUS_CTRL;
- ret = qlcnic_83xx_wrt_reg_indirect(adapter, temp_addr,
- mem->start_dma_cmd);
+ ret = qlcnic_ind_wr(adapter, temp_addr, mem->start_dma_cmd);
if (ret)
return ret;
@@ -710,15 +714,16 @@ static u32 qlcnic_read_memory_pexdma(struct qlcnic_adapter *adapter,
struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
u32 temp, dma_base_addr, size = 0, read_size = 0;
struct qlcnic_pex_dma_descriptor *dma_descr;
- struct qlcnic_83xx_dump_template_hdr *tmpl_hdr;
struct device *dev = &adapter->pdev->dev;
dma_addr_t dma_phys_addr;
void *dma_buffer;
+ void *tmpl_hdr;
tmpl_hdr = fw_dump->tmpl_hdr;
/* Check if DMA engine is available */
- temp = tmpl_hdr->saved_state[QLC_83XX_DMA_ENGINE_INDEX];
+ temp = qlcnic_get_saved_state(adapter, tmpl_hdr,
+ QLC_83XX_DMA_ENGINE_INDEX);
dma_base_addr = QLC_DMA_REG_BASE_ADDR(temp);
temp = qlcnic_ind_rd(adapter,
dma_base_addr + QLC_DMA_CMD_STATUS_CTRL);
@@ -764,8 +769,8 @@ static u32 qlcnic_read_memory_pexdma(struct qlcnic_adapter *adapter,
/* Write DMA descriptor to MS memory*/
temp = sizeof(struct qlcnic_pex_dma_descriptor) / 16;
- *ret = qlcnic_83xx_ms_mem_write128(adapter, mem->desc_card_addr,
- (u32 *)dma_descr, temp);
+ *ret = qlcnic_ms_mem_write128(adapter, mem->desc_card_addr,
+ (u32 *)dma_descr, temp);
if (*ret) {
dev_info(dev, "Failed to write DMA descriptor to MS memory at address 0x%x\n",
mem->desc_card_addr);
@@ -1141,8 +1146,6 @@ free_mem:
return err;
}
-#define QLCNIC_TEMPLATE_VERSION (0x20001)
-
int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
{
struct qlcnic_hardware_context *ahw;
@@ -1150,6 +1153,7 @@ int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
u32 version, csum, *tmp_buf;
u8 use_flash_temp = 0;
u32 temp_size = 0;
+ void *temp_buffer;
int err;
ahw = adapter->ahw;
@@ -1199,16 +1203,23 @@ flash_temp:
qlcnic_cache_tmpl_hdr_values(adapter, fw_dump);
+ if (fw_dump->use_pex_dma) {
+ fw_dump->dma_buffer = NULL;
+ temp_buffer = dma_alloc_coherent(&adapter->pdev->dev,
+ QLC_PEX_DMA_READ_SIZE,
+ &fw_dump->phys_addr,
+ GFP_KERNEL);
+ if (!temp_buffer)
+ fw_dump->use_pex_dma = false;
+ else
+ fw_dump->dma_buffer = temp_buffer;
+ }
+
+
dev_info(&adapter->pdev->dev,
"Default minidump capture mask 0x%x\n",
fw_dump->cap_mask);
- if (qlcnic_83xx_check(adapter) &&
- (fw_dump->version & 0xfffff) >= QLCNIC_TEMPLATE_VERSION)
- fw_dump->use_pex_dma = true;
- else
- fw_dump->use_pex_dma = false;
-
qlcnic_enable_fw_dump_state(adapter);
return 0;
@@ -1224,7 +1235,7 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
struct device *dev = &adapter->pdev->dev;
struct qlcnic_hardware_context *ahw;
struct qlcnic_dump_entry *entry;
- void *temp_buffer, *tmpl_hdr;
+ void *tmpl_hdr;
u32 ocm_window;
__le32 *buffer;
char mesg[64];
@@ -1268,16 +1279,6 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
qlcnic_set_sys_info(adapter, tmpl_hdr, 0, QLCNIC_DRIVER_VERSION);
qlcnic_set_sys_info(adapter, tmpl_hdr, 1, adapter->fw_version);
- if (fw_dump->use_pex_dma) {
- temp_buffer = dma_alloc_coherent(dev, QLC_PEX_DMA_READ_SIZE,
- &fw_dump->phys_addr,
- GFP_KERNEL);
- if (!temp_buffer)
- fw_dump->use_pex_dma = false;
- else
- fw_dump->dma_buffer = temp_buffer;
- }
-
if (qlcnic_82xx_check(adapter)) {
ops_cnt = ARRAY_SIZE(qlcnic_fw_dump_ops);
fw_dump_ops = qlcnic_fw_dump_ops;
@@ -1335,10 +1336,6 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
/* Send a udev event to notify availability of FW dump */
kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, msg);
- if (fw_dump->use_pex_dma)
- dma_free_coherent(dev, QLC_PEX_DMA_READ_SIZE,
- fw_dump->dma_buffer, fw_dump->phys_addr);
-
return 0;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
index 396bd1f..4677b2e 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov.h
@@ -52,6 +52,7 @@ enum qlcnic_bc_commands {
QLCNIC_BC_CMD_CFG_GUEST_VLAN = 0x3,
};
+#define QLCNIC_83XX_SRIOV_VF_MAX_MAC 2
#define QLC_BC_CMD 1
struct qlcnic_trans_list {
@@ -151,13 +152,14 @@ struct qlcnic_vf_info {
struct qlcnic_trans_list rcv_pend;
struct qlcnic_adapter *adapter;
struct qlcnic_vport *vp;
- struct mutex vlan_list_lock; /* Lock for VLAN list */
+ spinlock_t vlan_list_lock; /* Lock for VLAN list */
};
struct qlcnic_async_work_list {
struct list_head list;
struct work_struct work;
void *ptr;
+ struct qlcnic_cmd_args *cmd;
};
struct qlcnic_back_channel {
@@ -231,7 +233,7 @@ bool qlcnic_sriov_soft_flr_check(struct qlcnic_adapter *,
void qlcnic_sriov_pf_reset(struct qlcnic_adapter *);
int qlcnic_sriov_pf_reinit(struct qlcnic_adapter *);
int qlcnic_sriov_set_vf_mac(struct net_device *, int, u8 *);
-int qlcnic_sriov_set_vf_tx_rate(struct net_device *, int, int);
+int qlcnic_sriov_set_vf_tx_rate(struct net_device *, int, int, int);
int qlcnic_sriov_get_vf_config(struct net_device *, int ,
struct ifla_vf_info *);
int qlcnic_sriov_set_vf_vlan(struct net_device *, int, u16, u8);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
index 6afe9c1..1659c80 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
@@ -39,6 +39,8 @@ static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *, u8);
static void qlcnic_sriov_process_bc_cmd(struct work_struct *);
static int qlcnic_sriov_vf_shutdown(struct pci_dev *);
static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *);
+static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *,
+ struct qlcnic_cmd_args *);
static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = {
.read_crb = qlcnic_83xx_read_crb,
@@ -181,7 +183,7 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
vf->adapter = adapter;
vf->pci_func = qlcnic_sriov_virtid_fn(adapter, i);
mutex_init(&vf->send_cmd_lock);
- mutex_init(&vf->vlan_list_lock);
+ spin_lock_init(&vf->vlan_list_lock);
INIT_LIST_HEAD(&vf->rcv_act.wait_list);
INIT_LIST_HEAD(&vf->rcv_pend.wait_list);
spin_lock_init(&vf->rcv_act.lock);
@@ -197,8 +199,10 @@ int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
goto qlcnic_destroy_async_wq;
}
sriov->vf_info[i].vp = vp;
+ vp->vlan_mode = QLC_GUEST_VLAN_MODE;
vp->max_tx_bw = MAX_BW;
- vp->spoofchk = true;
+ vp->min_tx_bw = MIN_BW;
+ vp->spoofchk = false;
random_ether_addr(vp->mac);
dev_info(&adapter->pdev->dev,
"MAC Address %pM is configured for VF %d\n",
@@ -454,6 +458,7 @@ static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter)
struct qlcnic_cmd_args cmd;
int ret = 0;
+ memset(&cmd, 0, sizeof(cmd));
ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd, QLCNIC_BC_CMD_GET_ACL);
if (ret)
return ret;
@@ -515,6 +520,8 @@ static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
{
int err;
+ adapter->flags |= QLCNIC_VLAN_FILTERING;
+ adapter->ahw->total_nic_func = 1;
INIT_LIST_HEAD(&adapter->vf_mc_list);
if (!qlcnic_use_msi_x && !!qlcnic_use_msi)
dev_warn(&adapter->pdev->dev,
@@ -770,6 +777,7 @@ static int qlcnic_sriov_prepare_bc_hdr(struct qlcnic_bc_trans *trans,
cmd->req.arg = (u32 *)trans->req_pay;
cmd->rsp.arg = (u32 *)trans->rsp_pay;
cmd_op = cmd->req.arg[0] & 0xff;
+ cmd->cmd_op = cmd_op;
remainder = (trans->rsp_pay_size) % (bc_pay_sz);
num_frags = (trans->rsp_pay_size) / (bc_pay_sz);
if (remainder)
@@ -1356,7 +1364,7 @@ static int qlcnic_sriov_retry_bc_cmd(struct qlcnic_adapter *adapter,
return -EIO;
}
-static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
+static int __qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
struct qlcnic_cmd_args *cmd)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
@@ -1408,12 +1416,17 @@ retry:
(mbx_err_code == QLCNIC_MBX_PORT_RSP_OK)) {
rsp = QLCNIC_RCODE_SUCCESS;
} else {
- rsp = mbx_err_code;
- if (!rsp)
- rsp = 1;
- dev_err(dev,
- "MBX command 0x%x failed with err:0x%x for VF %d\n",
- opcode, mbx_err_code, func);
+ if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT) {
+ rsp = QLCNIC_RCODE_SUCCESS;
+ } else {
+ rsp = mbx_err_code;
+ if (!rsp)
+ rsp = 1;
+
+ dev_err(dev,
+ "MBX command 0x%x failed with err:0x%x for VF %d\n",
+ opcode, mbx_err_code, func);
+ }
}
err_out:
@@ -1435,12 +1448,23 @@ free_cmd:
return rsp;
}
+
+static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
+{
+ if (cmd->type == QLC_83XX_MBX_CMD_NO_WAIT)
+ return qlcnic_sriov_async_issue_cmd(adapter, cmd);
+ else
+ return __qlcnic_sriov_issue_cmd(adapter, cmd);
+}
+
static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *adapter, u8 cmd_op)
{
struct qlcnic_cmd_args cmd;
struct qlcnic_vf_info *vf = &adapter->ahw->sriov->vf_info[0];
int ret;
+ memset(&cmd, 0, sizeof(cmd));
if (qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op))
return -ENOMEM;
@@ -1465,58 +1489,28 @@ out:
return ret;
}
-static void qlcnic_vf_add_mc_list(struct net_device *netdev)
+static void qlcnic_vf_add_mc_list(struct net_device *netdev, const u8 *mac)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_sriov *sriov = adapter->ahw->sriov;
- struct qlcnic_mac_vlan_list *cur;
- struct list_head *head, tmp_list;
struct qlcnic_vf_info *vf;
u16 vlan_id;
int i;
- static const u8 bcast_addr[ETH_ALEN] = {
- 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
- };
-
vf = &adapter->ahw->sriov->vf_info[0];
- INIT_LIST_HEAD(&tmp_list);
- head = &adapter->vf_mc_list;
- netif_addr_lock_bh(netdev);
- while (!list_empty(head)) {
- cur = list_entry(head->next, struct qlcnic_mac_vlan_list, list);
- list_move(&cur->list, &tmp_list);
- }
-
- netif_addr_unlock_bh(netdev);
-
- while (!list_empty(&tmp_list)) {
- cur = list_entry((&tmp_list)->next,
- struct qlcnic_mac_vlan_list, list);
- if (!qlcnic_sriov_check_any_vlan(vf)) {
- qlcnic_nic_add_mac(adapter, bcast_addr, 0);
- qlcnic_nic_add_mac(adapter, cur->mac_addr, 0);
- } else {
- mutex_lock(&vf->vlan_list_lock);
- for (i = 0; i < sriov->num_allowed_vlans; i++) {
- vlan_id = vf->sriov_vlans[i];
- if (vlan_id) {
- qlcnic_nic_add_mac(adapter, bcast_addr,
- vlan_id);
- qlcnic_nic_add_mac(adapter,
- cur->mac_addr,
- vlan_id);
- }
- }
- mutex_unlock(&vf->vlan_list_lock);
- if (qlcnic_84xx_check(adapter)) {
- qlcnic_nic_add_mac(adapter, bcast_addr, 0);
- qlcnic_nic_add_mac(adapter, cur->mac_addr, 0);
- }
+ if (!qlcnic_sriov_check_any_vlan(vf)) {
+ qlcnic_nic_add_mac(adapter, mac, 0);
+ } else {
+ spin_lock(&vf->vlan_list_lock);
+ for (i = 0; i < sriov->num_allowed_vlans; i++) {
+ vlan_id = vf->sriov_vlans[i];
+ if (vlan_id)
+ qlcnic_nic_add_mac(adapter, mac, vlan_id);
}
- list_del(&cur->list);
- kfree(cur);
+ spin_unlock(&vf->vlan_list_lock);
+ if (qlcnic_84xx_check(adapter))
+ qlcnic_nic_add_mac(adapter, mac, 0);
}
}
@@ -1525,6 +1519,7 @@ void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc)
struct list_head *head = &bc->async_list;
struct qlcnic_async_work_list *entry;
+ flush_workqueue(bc->bc_async_wq);
while (!list_empty(head)) {
entry = list_entry(head->next, struct qlcnic_async_work_list,
list);
@@ -1534,10 +1529,14 @@ void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc)
}
}
-static void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
+void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_hardware_context *ahw = adapter->ahw;
+ static const u8 bcast_addr[ETH_ALEN] = {
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ };
+ struct netdev_hw_addr *ha;
u32 mode = VPORT_MISS_MODE_DROP;
if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
@@ -1549,23 +1548,49 @@ static void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
} else if ((netdev->flags & IFF_ALLMULTI) ||
(netdev_mc_count(netdev) > ahw->max_mc_count)) {
mode = VPORT_MISS_MODE_ACCEPT_MULTI;
+ } else {
+ qlcnic_vf_add_mc_list(netdev, bcast_addr);
+ if (!netdev_mc_empty(netdev)) {
+ netdev_for_each_mc_addr(ha, netdev)
+ qlcnic_vf_add_mc_list(netdev, ha->addr);
+ }
}
- if (qlcnic_sriov_vf_check(adapter))
- qlcnic_vf_add_mc_list(netdev);
+ /* configure unicast MAC address, if there is not sufficient space
+ * to store all the unicast addresses then enable promiscuous mode
+ */
+ if (netdev_uc_count(netdev) > ahw->max_uc_count) {
+ mode = VPORT_MISS_MODE_ACCEPT_ALL;
+ } else if (!netdev_uc_empty(netdev)) {
+ netdev_for_each_uc_addr(ha, netdev)
+ qlcnic_vf_add_mc_list(netdev, ha->addr);
+ }
+
+ if (adapter->pdev->is_virtfn) {
+ if (mode == VPORT_MISS_MODE_ACCEPT_ALL &&
+ !adapter->fdb_mac_learn) {
+ qlcnic_alloc_lb_filters_mem(adapter);
+ adapter->drv_mac_learn = 1;
+ adapter->rx_mac_learn = true;
+ } else {
+ adapter->drv_mac_learn = 0;
+ adapter->rx_mac_learn = false;
+ }
+ }
qlcnic_nic_set_promisc(adapter, mode);
}
-static void qlcnic_sriov_handle_async_multi(struct work_struct *work)
+static void qlcnic_sriov_handle_async_issue_cmd(struct work_struct *work)
{
struct qlcnic_async_work_list *entry;
- struct net_device *netdev;
+ struct qlcnic_adapter *adapter;
+ struct qlcnic_cmd_args *cmd;
entry = container_of(work, struct qlcnic_async_work_list, work);
- netdev = (struct net_device *)entry->ptr;
-
- qlcnic_sriov_vf_set_multi(netdev);
+ adapter = entry->ptr;
+ cmd = entry->cmd;
+ __qlcnic_sriov_issue_cmd(adapter, cmd);
return;
}
@@ -1595,8 +1620,9 @@ qlcnic_sriov_get_free_node_async_work(struct qlcnic_back_channel *bc)
return entry;
}
-static void qlcnic_sriov_schedule_bc_async_work(struct qlcnic_back_channel *bc,
- work_func_t func, void *data)
+static void qlcnic_sriov_schedule_async_cmd(struct qlcnic_back_channel *bc,
+ work_func_t func, void *data,
+ struct qlcnic_cmd_args *cmd)
{
struct qlcnic_async_work_list *entry = NULL;
@@ -1605,21 +1631,23 @@ static void qlcnic_sriov_schedule_bc_async_work(struct qlcnic_back_channel *bc,
return;
entry->ptr = data;
+ entry->cmd = cmd;
INIT_WORK(&entry->work, func);
queue_work(bc->bc_async_wq, &entry->work);
}
-void qlcnic_sriov_vf_schedule_multi(struct net_device *netdev)
+static int qlcnic_sriov_async_issue_cmd(struct qlcnic_adapter *adapter,
+ struct qlcnic_cmd_args *cmd)
{
- struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_back_channel *bc = &adapter->ahw->sriov->bc;
if (adapter->need_fw_reset)
- return;
+ return -EIO;
- qlcnic_sriov_schedule_bc_async_work(bc, qlcnic_sriov_handle_async_multi,
- netdev);
+ qlcnic_sriov_schedule_async_cmd(bc, qlcnic_sriov_handle_async_issue_cmd,
+ adapter, cmd);
+ return 0;
}
static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter)
@@ -1843,6 +1871,12 @@ static int qlcnic_sriov_vf_idc_unknown_state(struct qlcnic_adapter *adapter)
return 0;
}
+static void qlcnic_sriov_vf_periodic_tasks(struct qlcnic_adapter *adapter)
+{
+ if (adapter->fhash.fnum)
+ qlcnic_prune_lb_filters(adapter);
+}
+
static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *work)
{
struct qlcnic_adapter *adapter;
@@ -1874,6 +1908,8 @@ static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *work)
}
idc->prev_state = idc->curr_state;
+ qlcnic_sriov_vf_periodic_tasks(adapter);
+
if (!ret && test_bit(QLC_83XX_MODULE_LOADED, &idc->status))
qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
idc->delay);
@@ -1897,7 +1933,7 @@ static int qlcnic_sriov_check_vlan_id(struct qlcnic_sriov *sriov,
if (!vf->sriov_vlans)
return err;
- mutex_lock(&vf->vlan_list_lock);
+ spin_lock_bh(&vf->vlan_list_lock);
for (i = 0; i < sriov->num_allowed_vlans; i++) {
if (vf->sriov_vlans[i] == vlan_id) {
@@ -1906,7 +1942,7 @@ static int qlcnic_sriov_check_vlan_id(struct qlcnic_sriov *sriov,
}
}
- mutex_unlock(&vf->vlan_list_lock);
+ spin_unlock_bh(&vf->vlan_list_lock);
return err;
}
@@ -1915,12 +1951,12 @@ static int qlcnic_sriov_validate_num_vlans(struct qlcnic_sriov *sriov,
{
int err = 0;
- mutex_lock(&vf->vlan_list_lock);
+ spin_lock_bh(&vf->vlan_list_lock);
if (vf->num_vlan >= sriov->num_allowed_vlans)
err = -EINVAL;
- mutex_unlock(&vf->vlan_list_lock);
+ spin_unlock_bh(&vf->vlan_list_lock);
return err;
}
@@ -1973,7 +2009,7 @@ static void qlcnic_sriov_vlan_operation(struct qlcnic_vf_info *vf, u16 vlan_id,
if (!vf->sriov_vlans)
return;
- mutex_lock(&vf->vlan_list_lock);
+ spin_lock_bh(&vf->vlan_list_lock);
switch (opcode) {
case QLC_VLAN_ADD:
@@ -1986,7 +2022,7 @@ static void qlcnic_sriov_vlan_operation(struct qlcnic_vf_info *vf, u16 vlan_id,
netdev_err(adapter->netdev, "Invalid VLAN operation\n");
}
- mutex_unlock(&vf->vlan_list_lock);
+ spin_unlock_bh(&vf->vlan_list_lock);
return;
}
@@ -1994,10 +2030,12 @@ int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *adapter,
u16 vid, u8 enable)
{
struct qlcnic_sriov *sriov = adapter->ahw->sriov;
+ struct net_device *netdev = adapter->netdev;
struct qlcnic_vf_info *vf;
struct qlcnic_cmd_args cmd;
int ret;
+ memset(&cmd, 0, sizeof(cmd));
if (vid == 0)
return 0;
@@ -2019,14 +2057,18 @@ int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *adapter,
dev_err(&adapter->pdev->dev,
"Failed to configure guest VLAN, err=%d\n", ret);
} else {
+ netif_addr_lock_bh(netdev);
qlcnic_free_mac_list(adapter);
+ netif_addr_unlock_bh(netdev);
if (enable)
qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_ADD);
else
qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_DELETE);
- qlcnic_set_multi(adapter->netdev);
+ netif_addr_lock_bh(netdev);
+ qlcnic_set_multi(netdev);
+ netif_addr_unlock_bh(netdev);
}
qlcnic_free_mbx_args(&cmd);
@@ -2157,11 +2199,11 @@ bool qlcnic_sriov_check_any_vlan(struct qlcnic_vf_info *vf)
{
bool err = false;
- mutex_lock(&vf->vlan_list_lock);
+ spin_lock_bh(&vf->vlan_list_lock);
if (vf->num_vlan)
err = true;
- mutex_unlock(&vf->vlan_list_lock);
+ spin_unlock_bh(&vf->vlan_list_lock);
return err;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index 2801379..a29538b 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -16,6 +16,7 @@
#define QLC_VF_FLOOD_BIT BIT_16
#define QLC_FLOOD_MODE 0x5
#define QLC_SRIOV_ALLOW_VLAN0 BIT_19
+#define QLC_INTR_COAL_TYPE_MASK 0x7
static int qlcnic_sriov_pf_get_vport_handle(struct qlcnic_adapter *, u8);
@@ -83,7 +84,7 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
info->max_tx_ques = res->num_tx_queues / max;
if (qlcnic_83xx_pf_check(adapter))
- num_macs = 1;
+ num_macs = QLCNIC_83XX_SRIOV_VF_MAX_MAC;
info->max_rx_mcast_mac_filters = res->num_rx_mcast_mac_filters;
@@ -337,9 +338,12 @@ static int qlcnic_sriov_pf_cfg_vlan_filtering(struct qlcnic_adapter *adapter,
cmd.req.arg[1] = 0x4;
if (enable) {
+ adapter->flags |= QLCNIC_VLAN_FILTERING;
cmd.req.arg[1] |= BIT_16;
if (qlcnic_84xx_check(adapter))
cmd.req.arg[1] |= QLC_SRIOV_ALLOW_VLAN0;
+ } else {
+ adapter->flags &= ~QLCNIC_VLAN_FILTERING;
}
err = qlcnic_issue_cmd(adapter, &cmd);
@@ -471,12 +475,12 @@ static int qlcnic_pci_sriov_disable(struct qlcnic_adapter *adapter)
return -EPERM;
}
+ qlcnic_sriov_pf_disable(adapter);
+
rtnl_lock();
if (netif_running(netdev))
__qlcnic_down(adapter, netdev);
- qlcnic_sriov_pf_disable(adapter);
-
qlcnic_sriov_free_vlans(adapter);
qlcnic_sriov_pf_cleanup(adapter);
@@ -595,7 +599,6 @@ static int __qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter,
qlcnic_sriov_alloc_vlans(adapter);
- err = qlcnic_sriov_pf_enable(adapter, num_vfs);
return err;
del_flr_queue:
@@ -626,25 +629,36 @@ static int qlcnic_pci_sriov_enable(struct qlcnic_adapter *adapter, int num_vfs)
__qlcnic_down(adapter, netdev);
err = __qlcnic_pci_sriov_enable(adapter, num_vfs);
- if (err) {
- netdev_info(netdev, "Failed to enable SR-IOV on port %d\n",
- adapter->portnum);
+ if (err)
+ goto error;
- err = -EIO;
- if (qlcnic_83xx_configure_opmode(adapter))
- goto error;
- } else {
+ if (netif_running(netdev))
+ __qlcnic_up(adapter, netdev);
+
+ rtnl_unlock();
+ err = qlcnic_sriov_pf_enable(adapter, num_vfs);
+ if (!err) {
netdev_info(netdev,
"SR-IOV is enabled successfully on port %d\n",
adapter->portnum);
/* Return number of vfs enabled */
- err = num_vfs;
+ return num_vfs;
}
+
+ rtnl_lock();
if (netif_running(netdev))
- __qlcnic_up(adapter, netdev);
+ __qlcnic_down(adapter, netdev);
error:
+ if (!qlcnic_83xx_configure_opmode(adapter)) {
+ if (netif_running(netdev))
+ __qlcnic_up(adapter, netdev);
+ }
+
rtnl_unlock();
+ netdev_info(netdev, "Failed to enable SR-IOV on port %d\n",
+ adapter->portnum);
+
return err;
}
@@ -773,7 +787,7 @@ static int qlcnic_sriov_cfg_vf_def_mac(struct qlcnic_adapter *adapter,
struct qlcnic_vf_info *vf,
u16 vlan, u8 op)
{
- struct qlcnic_cmd_args cmd;
+ struct qlcnic_cmd_args *cmd;
struct qlcnic_macvlan_mbx mv;
struct qlcnic_vport *vp;
u8 *addr;
@@ -783,21 +797,27 @@ static int qlcnic_sriov_cfg_vf_def_mac(struct qlcnic_adapter *adapter,
vp = vf->vp;
- if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN))
+ cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC);
+ if (!cmd)
return -ENOMEM;
+ err = qlcnic_alloc_mbx_args(cmd, adapter, QLCNIC_CMD_CONFIG_MAC_VLAN);
+ if (err)
+ goto free_cmd;
+
+ cmd->type = QLC_83XX_MBX_CMD_NO_WAIT;
vpid = qlcnic_sriov_pf_get_vport_handle(adapter, vf->pci_func);
if (vpid < 0) {
err = -EINVAL;
- goto out;
+ goto free_args;
}
if (vlan)
op = ((op == QLCNIC_MAC_ADD || op == QLCNIC_MAC_VLAN_ADD) ?
QLCNIC_MAC_VLAN_ADD : QLCNIC_MAC_VLAN_DEL);
- cmd.req.arg[1] = op | (1 << 8) | (3 << 6);
- cmd.req.arg[1] |= ((vpid & 0xffff) << 16) | BIT_31;
+ cmd->req.arg[1] = op | (1 << 8) | (3 << 6);
+ cmd->req.arg[1] |= ((vpid & 0xffff) << 16) | BIT_31;
addr = vp->mac;
mv.vlan = vlan;
@@ -807,18 +827,18 @@ static int qlcnic_sriov_cfg_vf_def_mac(struct qlcnic_adapter *adapter,
mv.mac_addr3 = addr[3];
mv.mac_addr4 = addr[4];
mv.mac_addr5 = addr[5];
- buf = &cmd.req.arg[2];
+ buf = &cmd->req.arg[2];
memcpy(buf, &mv, sizeof(struct qlcnic_macvlan_mbx));
- err = qlcnic_issue_cmd(adapter, &cmd);
+ err = qlcnic_issue_cmd(adapter, cmd);
- if (err)
- dev_err(&adapter->pdev->dev,
- "MAC-VLAN %s to CAM failed, err=%d.\n",
- ((op == 1) ? "add " : "delete "), err);
+ if (!err)
+ return err;
-out:
- qlcnic_free_mbx_args(&cmd);
+free_args:
+ qlcnic_free_mbx_args(cmd);
+free_cmd:
+ kfree(cmd);
return err;
}
@@ -840,7 +860,7 @@ static void qlcnic_83xx_cfg_default_mac_vlan(struct qlcnic_adapter *adapter,
sriov = adapter->ahw->sriov;
- mutex_lock(&vf->vlan_list_lock);
+ spin_lock_bh(&vf->vlan_list_lock);
if (vf->num_vlan) {
for (i = 0; i < sriov->num_allowed_vlans; i++) {
vlan = vf->sriov_vlans[i];
@@ -849,7 +869,7 @@ static void qlcnic_83xx_cfg_default_mac_vlan(struct qlcnic_adapter *adapter,
opcode);
}
}
- mutex_unlock(&vf->vlan_list_lock);
+ spin_unlock_bh(&vf->vlan_list_lock);
if (vf->vp->vlan_mode != QLC_PVID_MODE) {
if (qlcnic_83xx_pf_check(adapter) &&
@@ -1178,19 +1198,41 @@ static int qlcnic_sriov_validate_cfg_intrcoal(struct qlcnic_adapter *adapter,
{
struct qlcnic_nic_intr_coalesce *coal = &adapter->ahw->coal;
u16 ctx_id, pkts, time;
+ int err = -EINVAL;
+ u8 type;
+ type = cmd->req.arg[1] & QLC_INTR_COAL_TYPE_MASK;
ctx_id = cmd->req.arg[1] >> 16;
pkts = cmd->req.arg[2] & 0xffff;
time = cmd->req.arg[2] >> 16;
- if (ctx_id != vf->rx_ctx_id)
- return -EINVAL;
- if (pkts > coal->rx_packets)
- return -EINVAL;
- if (time < coal->rx_time_us)
- return -EINVAL;
+ switch (type) {
+ case QLCNIC_INTR_COAL_TYPE_RX:
+ if (ctx_id != vf->rx_ctx_id || pkts > coal->rx_packets ||
+ time < coal->rx_time_us)
+ goto err_label;
+ break;
+ case QLCNIC_INTR_COAL_TYPE_TX:
+ if (ctx_id != vf->tx_ctx_id || pkts > coal->tx_packets ||
+ time < coal->tx_time_us)
+ goto err_label;
+ break;
+ default:
+ netdev_err(adapter->netdev, "Invalid coalescing type 0x%x received\n",
+ type);
+ return err;
+ }
return 0;
+
+err_label:
+ netdev_err(adapter->netdev, "Expected: rx_ctx_id 0x%x rx_packets 0x%x rx_time_us 0x%x tx_ctx_id 0x%x tx_packets 0x%x tx_time_us 0x%x\n",
+ vf->rx_ctx_id, coal->rx_packets, coal->rx_time_us,
+ vf->tx_ctx_id, coal->tx_packets, coal->tx_time_us);
+ netdev_err(adapter->netdev, "Received: ctx_id 0x%x packets 0x%x time_us 0x%x type 0x%x\n",
+ ctx_id, pkts, time, type);
+
+ return err;
}
static int qlcnic_sriov_pf_cfg_intrcoal_cmd(struct qlcnic_bc_trans *tran,
@@ -1214,7 +1256,6 @@ static int qlcnic_sriov_validate_cfg_macvlan(struct qlcnic_adapter *adapter,
struct qlcnic_vf_info *vf,
struct qlcnic_cmd_args *cmd)
{
- struct qlcnic_macvlan_mbx *macvlan;
struct qlcnic_vport *vp = vf->vp;
u8 op, new_op;
@@ -1224,14 +1265,6 @@ static int qlcnic_sriov_validate_cfg_macvlan(struct qlcnic_adapter *adapter,
cmd->req.arg[1] |= (vf->vp->handle << 16);
cmd->req.arg[1] |= BIT_31;
- macvlan = (struct qlcnic_macvlan_mbx *)&cmd->req.arg[2];
- if (!(macvlan->mac_addr0 & BIT_0)) {
- dev_err(&adapter->pdev->dev,
- "MAC address change is not allowed from VF %d",
- vf->pci_func);
- return -EINVAL;
- }
-
if (vp->vlan_mode == QLC_PVID_MODE) {
op = cmd->req.arg[1] & 0x7;
cmd->req.arg[1] &= ~0x7;
@@ -1815,7 +1848,8 @@ int qlcnic_sriov_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
return 0;
}
-int qlcnic_sriov_set_vf_tx_rate(struct net_device *netdev, int vf, int tx_rate)
+int qlcnic_sriov_set_vf_tx_rate(struct net_device *netdev, int vf,
+ int min_tx_rate, int max_tx_rate)
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_sriov *sriov = adapter->ahw->sriov;
@@ -1830,35 +1864,52 @@ int qlcnic_sriov_set_vf_tx_rate(struct net_device *netdev, int vf, int tx_rate)
if (vf >= sriov->num_vfs)
return -EINVAL;
- if (tx_rate >= 10000 || tx_rate < 100) {
+ vf_info = &sriov->vf_info[vf];
+ vp = vf_info->vp;
+ vpid = vp->handle;
+
+ if (!min_tx_rate)
+ min_tx_rate = QLC_VF_MIN_TX_RATE;
+
+ if (max_tx_rate &&
+ (max_tx_rate >= 10000 || max_tx_rate < min_tx_rate)) {
netdev_err(netdev,
- "Invalid Tx rate, allowed range is [%d - %d]",
- QLC_VF_MIN_TX_RATE, QLC_VF_MAX_TX_RATE);
+ "Invalid max Tx rate, allowed range is [%d - %d]",
+ min_tx_rate, QLC_VF_MAX_TX_RATE);
return -EINVAL;
}
- if (tx_rate == 0)
- tx_rate = 10000;
+ if (!max_tx_rate)
+ max_tx_rate = 10000;
- vf_info = &sriov->vf_info[vf];
- vp = vf_info->vp;
- vpid = vp->handle;
+ if (min_tx_rate &&
+ (min_tx_rate > max_tx_rate || min_tx_rate < QLC_VF_MIN_TX_RATE)) {
+ netdev_err(netdev,
+ "Invalid min Tx rate, allowed range is [%d - %d]",
+ QLC_VF_MIN_TX_RATE, max_tx_rate);
+ return -EINVAL;
+ }
if (test_bit(QLC_BC_VF_STATE, &vf_info->state)) {
if (qlcnic_sriov_get_vf_vport_info(adapter, &nic_info, vpid))
return -EIO;
- nic_info.max_tx_bw = tx_rate / 100;
+ nic_info.max_tx_bw = max_tx_rate / 100;
+ nic_info.min_tx_bw = min_tx_rate / 100;
nic_info.bit_offsets = BIT_0;
if (qlcnic_sriov_pf_set_vport_info(adapter, &nic_info, vpid))
return -EIO;
}
- vp->max_tx_bw = tx_rate / 100;
+ vp->max_tx_bw = max_tx_rate / 100;
netdev_info(netdev,
- "Setting Tx rate %d (Mbps), %d %% of PF bandwidth, for VF %d\n",
- tx_rate, vp->max_tx_bw, vf);
+ "Setting Max Tx rate %d (Mbps), %d %% of PF bandwidth, for VF %d\n",
+ max_tx_rate, vp->max_tx_bw, vf);
+ vp->min_tx_bw = min_tx_rate / 100;
+ netdev_info(netdev,
+ "Setting Min Tx rate %d (Mbps), %d %% of PF bandwidth, for VF %d\n",
+ min_tx_rate, vp->min_tx_bw, vf);
return 0;
}
@@ -1957,9 +2008,13 @@ int qlcnic_sriov_get_vf_config(struct net_device *netdev,
ivi->qos = vp->qos;
ivi->spoofchk = vp->spoofchk;
if (vp->max_tx_bw == MAX_BW)
- ivi->tx_rate = 0;
+ ivi->max_tx_rate = 0;
+ else
+ ivi->max_tx_rate = vp->max_tx_bw * 100;
+ if (vp->min_tx_bw == MIN_BW)
+ ivi->min_tx_rate = 0;
else
- ivi->tx_rate = vp->max_tx_bw * 100;
+ ivi->min_tx_rate = vp->min_tx_bw * 100;
ivi->vf = vf;
return 0;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index cd346e2..f5786d5 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -19,6 +19,10 @@
#include <linux/sysfs.h>
#include <linux/aer.h>
#include <linux/log2.h>
+#ifdef CONFIG_QLCNIC_HWMON
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#endif
#define QLC_STATUS_UNSUPPORTED_CMD -2
@@ -358,6 +362,8 @@ int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func)
if (adapter->npars[i].pci_func == pci_func)
return i;
}
+
+ dev_err(&adapter->pdev->dev, "%s: Invalid nic function\n", __func__);
return -EINVAL;
}
@@ -1243,6 +1249,68 @@ static struct bin_attribute bin_attr_flash = {
.write = qlcnic_83xx_sysfs_flash_write_handler,
};
+#ifdef CONFIG_QLCNIC_HWMON
+
+static ssize_t qlcnic_hwmon_show_temp(struct device *dev,
+ struct device_attribute *dev_attr,
+ char *buf)
+{
+ struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
+ unsigned int temperature = 0, value = 0;
+
+ if (qlcnic_83xx_check(adapter))
+ value = QLCRDX(adapter->ahw, QLC_83XX_ASIC_TEMP);
+ else if (qlcnic_82xx_check(adapter))
+ value = QLC_SHARED_REG_RD32(adapter, QLCNIC_ASIC_TEMP);
+
+ temperature = qlcnic_get_temp_val(value);
+ /* display millidegree celcius */
+ temperature *= 1000;
+ return sprintf(buf, "%u\n", temperature);
+}
+
+/* hwmon-sysfs attributes */
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO,
+ qlcnic_hwmon_show_temp, NULL, 1);
+
+static struct attribute *qlcnic_hwmon_attrs[] = {
+ &sensor_dev_attr_temp1_input.dev_attr.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(qlcnic_hwmon);
+
+void qlcnic_register_hwmon_dev(struct qlcnic_adapter *adapter)
+{
+ struct device *dev = &adapter->pdev->dev;
+ struct device *hwmon_dev;
+
+ /* Skip hwmon registration for a VF device */
+ if (qlcnic_sriov_vf_check(adapter)) {
+ adapter->ahw->hwmon_dev = NULL;
+ return;
+ }
+ hwmon_dev = hwmon_device_register_with_groups(dev, qlcnic_driver_name,
+ adapter,
+ qlcnic_hwmon_groups);
+ if (IS_ERR(hwmon_dev)) {
+ dev_err(dev, "Cannot register with hwmon, err=%ld\n",
+ PTR_ERR(hwmon_dev));
+ hwmon_dev = NULL;
+ }
+ adapter->ahw->hwmon_dev = hwmon_dev;
+}
+
+void qlcnic_unregister_hwmon_dev(struct qlcnic_adapter *adapter)
+{
+ struct device *hwmon_dev = adapter->ahw->hwmon_dev;
+ if (hwmon_dev) {
+ hwmon_device_unregister(hwmon_dev);
+ adapter->ahw->hwmon_dev = NULL;
+ }
+}
+#endif
+
void qlcnic_create_sysfs_entries(struct qlcnic_adapter *adapter)
{
struct device *dev = &adapter->pdev->dev;
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 0a1d76ac..b40050e 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -3595,7 +3595,7 @@ static int ql_request_irq(struct ql_adapter *qdev)
}
return status;
err_irq:
- netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
+ netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!\n");
ql_free_irq(qdev);
return status;
}
@@ -4770,7 +4770,7 @@ static int qlge_probe(struct pci_dev *pdev,
ndev->irq = pdev->irq;
ndev->netdev_ops = &qlge_netdev_ops;
- SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
+ ndev->ethtool_ops = &qlge_ethtool_ops;
ndev->watchdog_timeo = 10 * HZ;
err = register_netdev(ndev);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index aa1c079..be425ad 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -7125,7 +7125,7 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
for (i = 0; i < ETH_ALEN; i++)
dev->dev_addr[i] = RTL_R8(MAC0 + i);
- SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops);
+ dev->ethtool_ops = &rtl8169_ethtool_ops;
dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 6a9509c..7622213 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -307,6 +307,27 @@ static const u16 sh_eth_offset_fast_sh4[SH_ETH_MAX_REGISTER_OFFSET] = {
};
static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
+ [EDMR] = 0x0000,
+ [EDTRR] = 0x0004,
+ [EDRRR] = 0x0008,
+ [TDLAR] = 0x000c,
+ [RDLAR] = 0x0010,
+ [EESR] = 0x0014,
+ [EESIPR] = 0x0018,
+ [TRSCER] = 0x001c,
+ [RMFCR] = 0x0020,
+ [TFTR] = 0x0024,
+ [FDR] = 0x0028,
+ [RMCR] = 0x002c,
+ [EDOCR] = 0x0030,
+ [FCFTR] = 0x0034,
+ [RPADIR] = 0x0038,
+ [TRIMD] = 0x003c,
+ [RBWAR] = 0x0040,
+ [RDFAR] = 0x0044,
+ [TBRAR] = 0x004c,
+ [TDFAR] = 0x0050,
+
[ECMR] = 0x0160,
[ECSR] = 0x0164,
[ECSIPR] = 0x0168,
@@ -546,7 +567,6 @@ static struct sh_eth_cpu_data sh7757_data = {
.register_type = SH_ETH_REG_FAST_SH4,
.eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
- .rmcr_value = RMCR_RNC,
.tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
.eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
@@ -624,7 +644,6 @@ static struct sh_eth_cpu_data sh7757_data_giga = {
EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
EESR_TDE | EESR_ECI,
.fdr_value = 0x0000072f,
- .rmcr_value = RMCR_RNC,
.irq_flags = IRQF_SHARED,
.apr = 1,
@@ -752,7 +771,6 @@ static struct sh_eth_cpu_data r8a7740_data = {
EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
EESR_TDE | EESR_ECI,
.fdr_value = 0x0000070f,
- .rmcr_value = RMCR_RNC,
.apr = 1,
.mpr = 1,
@@ -784,7 +802,6 @@ static struct sh_eth_cpu_data r7s72100_data = {
EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE |
EESR_TDE | EESR_ECI,
.fdr_value = 0x0000070f,
- .rmcr_value = RMCR_RNC,
.no_psr = 1,
.apr = 1,
@@ -833,9 +850,6 @@ static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
if (!cd->fdr_value)
cd->fdr_value = DEFAULT_FDR_INIT;
- if (!cd->rmcr_value)
- cd->rmcr_value = DEFAULT_RMCR_VALUE;
-
if (!cd->tx_check)
cd->tx_check = DEFAULT_TX_CHECK;
@@ -1287,8 +1301,8 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
sh_eth_write(ndev, 0, TFTR);
- /* Frame recv control */
- sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR);
+ /* Frame recv control (enable multiple-packets per rx irq) */
+ sh_eth_write(ndev, RMCR_RNC, RMCR);
sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER);
@@ -1385,7 +1399,6 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
int entry = mdp->cur_rx % mdp->num_rx_ring;
int boguscnt = (mdp->dirty_rx + mdp->num_rx_ring) - mdp->cur_rx;
struct sk_buff *skb;
- int exceeded = 0;
u16 pkt_len = 0;
u32 desc_status;
@@ -1397,10 +1410,9 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
if (--boguscnt < 0)
break;
- if (*quota <= 0) {
- exceeded = 1;
+ if (*quota <= 0)
break;
- }
+
(*quota)--;
if (!(desc_status & RDFEND))
@@ -1448,7 +1460,6 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
ndev->stats.rx_packets++;
ndev->stats.rx_bytes += pkt_len;
}
- rxdesc->status |= cpu_to_edmac(mdp, RD_RACT);
entry = (++mdp->cur_rx) % mdp->num_rx_ring;
rxdesc = &mdp->rx_ring[entry];
}
@@ -1494,7 +1505,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
sh_eth_write(ndev, EDRRR_R, EDRRR);
}
- return exceeded;
+ return *quota <= 0;
}
static void sh_eth_rcv_snd_disable(struct net_device *ndev)
@@ -2627,8 +2638,8 @@ static int sh_mdio_init(struct sh_eth_private *mdp,
pdev->name, pdev->id);
/* PHY IRQ */
- mdp->mii_bus->irq = devm_kzalloc(dev, sizeof(int) * PHY_MAX_ADDR,
- GFP_KERNEL);
+ mdp->mii_bus->irq = devm_kmalloc_array(dev, PHY_MAX_ADDR, sizeof(int),
+ GFP_KERNEL);
if (!mdp->mii_bus->irq) {
ret = -ENOMEM;
goto out_free_bus;
@@ -2843,7 +2854,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
ndev->netdev_ops = &sh_eth_netdev_ops_tsu;
else
ndev->netdev_ops = &sh_eth_netdev_ops;
- SET_ETHTOOL_OPS(ndev, &sh_eth_ethtool_ops);
+ ndev->ethtool_ops = &sh_eth_ethtool_ops;
ndev->watchdog_timeo = TX_TIMEOUT;
/* debug message level */
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index d55e37c..b37c427 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -319,7 +319,6 @@ enum TD_STS_BIT {
enum RMCR_BIT {
RMCR_RNC = 0x00000001,
};
-#define DEFAULT_RMCR_VALUE 0x00000000
/* ECMR */
enum FELIC_MODE_BIT {
@@ -466,7 +465,6 @@ struct sh_eth_cpu_data {
unsigned long fdr_value;
unsigned long fcftr_value;
unsigned long rpadir_value;
- unsigned long rmcr_value;
/* interrupt checking mask */
unsigned long tx_check;
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
index 0415fa5..c0981ae 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c
@@ -520,5 +520,5 @@ static const struct ethtool_ops sxgbe_ethtool_ops = {
void sxgbe_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &sxgbe_ethtool_ops);
+ netdev->ethtool_ops = &sxgbe_ethtool_ops;
}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 82a9a98..6984944 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -425,8 +425,8 @@ dmamem_err:
* @rx_rsize: ring size
* Description: this function initializes the DMA RX descriptor
*/
-void free_rx_ring(struct device *dev, struct sxgbe_rx_queue *rx_ring,
- int rx_rsize)
+static void free_rx_ring(struct device *dev, struct sxgbe_rx_queue *rx_ring,
+ int rx_rsize)
{
dma_free_coherent(dev, rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
rx_ring->dma_rx, rx_ring->dma_rx_phy);
@@ -519,8 +519,8 @@ error:
* @tx_rsize: ring size
* Description: this function initializes the DMA TX descriptor
*/
-void free_tx_ring(struct device *dev, struct sxgbe_tx_queue *tx_ring,
- int tx_rsize)
+static void free_tx_ring(struct device *dev, struct sxgbe_tx_queue *tx_ring,
+ int tx_rsize)
{
dma_free_coherent(dev, tx_rsize * sizeof(struct sxgbe_tx_norm_desc),
tx_ring->dma_tx, tx_ring->dma_tx_phy);
@@ -1221,11 +1221,10 @@ static int sxgbe_release(struct net_device *dev)
return 0;
}
-
/* Prepare first Tx descriptor for doing TSO operation */
-void sxgbe_tso_prepare(struct sxgbe_priv_data *priv,
- struct sxgbe_tx_norm_desc *first_desc,
- struct sk_buff *skb)
+static void sxgbe_tso_prepare(struct sxgbe_priv_data *priv,
+ struct sxgbe_tx_norm_desc *first_desc,
+ struct sk_buff *skb)
{
unsigned int total_hdr_len, tcp_hdr_len;
@@ -1914,40 +1913,6 @@ static void sxgbe_set_rx_mode(struct net_device *dev)
readl(ioaddr + SXGBE_HASH_LOW));
}
-/**
- * sxgbe_config - entry point for changing configuration mode passed on by
- * ifconfig
- * @dev : pointer to the device structure
- * @map : pointer to the device mapping structure
- * Description:
- * This function is a driver entry point which gets called by the kernel
- * whenever some device configuration is changed.
- * Return value:
- * This function returns 0 if success and appropriate error otherwise.
- */
-static int sxgbe_config(struct net_device *dev, struct ifmap *map)
-{
- struct sxgbe_priv_data *priv = netdev_priv(dev);
-
- /* Can't act on a running interface */
- if (dev->flags & IFF_UP)
- return -EBUSY;
-
- /* Don't allow changing the I/O address */
- if (map->base_addr != (unsigned long)priv->ioaddr) {
- netdev_warn(dev, "can't change I/O address\n");
- return -EOPNOTSUPP;
- }
-
- /* Don't allow changing the IRQ */
- if (map->irq != priv->irq) {
- netdev_warn(dev, "not change IRQ number %d\n", priv->irq);
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
#ifdef CONFIG_NET_POLL_CONTROLLER
/**
* sxgbe_poll_controller - entry point for polling receive by device
@@ -2009,7 +1974,6 @@ static const struct net_device_ops sxgbe_netdev_ops = {
.ndo_set_rx_mode = sxgbe_set_rx_mode,
.ndo_tx_timeout = sxgbe_tx_timeout,
.ndo_do_ioctl = sxgbe_ioctl,
- .ndo_set_config = sxgbe_config,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = sxgbe_poll_controller,
#endif
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h b/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h
index 56f8bf5..81437d9 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_reg.h
@@ -188,7 +188,6 @@
/* L3/L4 function registers */
#define SXGBE_CORE_L34_ADDCTL_REG 0x0C00
-#define SXGBE_CORE_L34_ADDCTL_REG 0x0C00
#define SXGBE_CORE_L34_DATA_REG 0x0C04
/* ARP registers */
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 63d595f..1e27404 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -2248,7 +2248,7 @@ static int efx_register_netdev(struct efx_nic *efx)
} else {
net_dev->netdev_ops = &efx_farch_netdev_ops;
}
- SET_ETHTOOL_OPS(net_dev, &efx_ethtool_ops);
+ net_dev->ethtool_ops = &efx_ethtool_ops;
net_dev->gso_max_segs = EFX_TSO_MAX_SEGS;
rtnl_lock();
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 0de8b07..74739c4 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -1033,7 +1033,7 @@ static u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev)
0 : ARRAY_SIZE(efx->rx_indir_table));
}
-static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev, u32 *indir)
+static int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key)
{
struct efx_nic *efx = netdev_priv(net_dev);
@@ -1041,8 +1041,8 @@ static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev, u32 *indir)
return 0;
}
-static int efx_ethtool_set_rxfh_indir(struct net_device *net_dev,
- const u32 *indir)
+static int efx_ethtool_set_rxfh(struct net_device *net_dev,
+ const u32 *indir, const u8 *key)
{
struct efx_nic *efx = netdev_priv(net_dev);
@@ -1125,8 +1125,8 @@ const struct ethtool_ops efx_ethtool_ops = {
.get_rxnfc = efx_ethtool_get_rxnfc,
.set_rxnfc = efx_ethtool_set_rxnfc,
.get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size,
- .get_rxfh_indir = efx_ethtool_get_rxfh_indir,
- .set_rxfh_indir = efx_ethtool_set_rxfh_indir,
+ .get_rxfh = efx_ethtool_get_rxfh,
+ .set_rxfh = efx_ethtool_set_rxfh,
.get_ts_info = efx_ethtool_get_ts_info,
.get_module_info = efx_ethtool_get_module_info,
.get_module_eeprom = efx_ethtool_get_module_eeprom,
diff --git a/drivers/net/ethernet/sfc/io.h b/drivers/net/ethernet/sfc/io.h
index 4d3f119..afb94aa 100644
--- a/drivers/net/ethernet/sfc/io.h
+++ b/drivers/net/ethernet/sfc/io.h
@@ -66,10 +66,17 @@
#define EFX_USE_QWORD_IO 1
#endif
+/* Hardware issue requires that only 64-bit naturally aligned writes
+ * are seen by hardware. Its not strictly necessary to restrict to
+ * x86_64 arch, but done for safety since unusual write combining behaviour
+ * can break PIO.
+ */
+#ifdef CONFIG_X86_64
/* PIO is a win only if write-combining is possible */
#ifdef ARCH_HAS_IOREMAP_WC
#define EFX_USE_PIO 1
#endif
+#endif
#ifdef EFX_USE_QWORD_IO
static inline void _efx_writeq(struct efx_nic *efx, __le64 value,
diff --git a/drivers/net/ethernet/sfc/siena_sriov.c b/drivers/net/ethernet/sfc/siena_sriov.c
index 9a9205e..43d2e64 100644
--- a/drivers/net/ethernet/sfc/siena_sriov.c
+++ b/drivers/net/ethernet/sfc/siena_sriov.c
@@ -1633,7 +1633,8 @@ int efx_sriov_get_vf_config(struct net_device *net_dev, int vf_i,
ivi->vf = vf_i;
ether_addr_copy(ivi->mac, vf->addr.mac_addr);
- ivi->tx_rate = 0;
+ ivi->max_tx_rate = 0;
+ ivi->min_tx_rate = 0;
tci = ntohs(vf->addr.tci);
ivi->vlan = tci & VLAN_VID_MASK;
ivi->qos = (tci >> VLAN_PRIO_SHIFT) & 0x7;
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index fa94753..ede8dcc 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -189,6 +189,18 @@ struct efx_short_copy_buffer {
u8 buf[L1_CACHE_BYTES];
};
+/* Copy in explicit 64-bit writes. */
+static void efx_memcpy_64(void __iomem *dest, void *src, size_t len)
+{
+ u64 *src64 = src;
+ u64 __iomem *dest64 = dest;
+ size_t l64 = len / 8;
+ size_t i;
+
+ for (i = 0; i < l64; i++)
+ writeq(src64[i], &dest64[i]);
+}
+
/* Copy to PIO, respecting that writes to PIO buffers must be dword aligned.
* Advances piobuf pointer. Leaves additional data in the copy buffer.
*/
@@ -198,7 +210,7 @@ static void efx_memcpy_toio_aligned(struct efx_nic *efx, u8 __iomem **piobuf,
{
int block_len = len & ~(sizeof(copy_buf->buf) - 1);
- memcpy_toio(*piobuf, data, block_len);
+ efx_memcpy_64(*piobuf, data, block_len);
*piobuf += block_len;
len -= block_len;
@@ -230,7 +242,7 @@ static void efx_memcpy_toio_aligned_cb(struct efx_nic *efx, u8 __iomem **piobuf,
if (copy_buf->used < sizeof(copy_buf->buf))
return;
- memcpy_toio(*piobuf, copy_buf->buf, sizeof(copy_buf->buf));
+ efx_memcpy_64(*piobuf, copy_buf->buf, sizeof(copy_buf->buf));
*piobuf += sizeof(copy_buf->buf);
data += copy_to_buf;
len -= copy_to_buf;
@@ -245,7 +257,7 @@ static void efx_flush_copy_buffer(struct efx_nic *efx, u8 __iomem *piobuf,
{
/* if there's anything in it, write the whole buffer, including junk */
if (copy_buf->used)
- memcpy_toio(piobuf, copy_buf->buf, sizeof(copy_buf->buf));
+ efx_memcpy_64(piobuf, copy_buf->buf, sizeof(copy_buf->buf));
}
/* Traverse skb structure and copy fragments in to PIO buffer.
@@ -304,8 +316,8 @@ efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
*/
BUILD_BUG_ON(L1_CACHE_BYTES >
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
- memcpy_toio(tx_queue->piobuf, skb->data,
- ALIGN(skb->len, L1_CACHE_BYTES));
+ efx_memcpy_64(tx_queue->piobuf, skb->data,
+ ALIGN(skb->len, L1_CACHE_BYTES));
}
EFX_POPULATE_QWORD_5(buffer->option,
diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c
index acbbe48..a863399 100644
--- a/drivers/net/ethernet/sis/sis190.c
+++ b/drivers/net/ethernet/sis/sis190.c
@@ -1877,7 +1877,7 @@ static int sis190_init_one(struct pci_dev *pdev,
dev->netdev_ops = &sis190_netdev_ops;
- SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops);
+ dev->ethtool_ops = &sis190_ethtool_ops;
dev->watchdog_timeo = SIS190_TX_TIMEOUT;
spin_lock_init(&tp->lock);
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index c7a4868..6b33127 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -318,7 +318,7 @@ static int smc91c92_probe(struct pcmcia_device *link)
/* The SMC91c92-specific entries in the device structure. */
dev->netdev_ops = &smc_netdev_ops;
- SET_ETHTOOL_OPS(dev, &ethtool_ops);
+ dev->ethtool_ops = &ethtool_ops;
dev->watchdog_timeo = TX_TIMEOUT;
smc->mii_if.dev = dev;
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index a0fc151..5e13fa5 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2477,6 +2477,8 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
goto out_disable_resources;
}
+ netif_carrier_off(dev);
+
retval = register_netdev(dev);
if (retval) {
SMSC_WARN(pdata, probe, "Error %i registering device", retval);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index c5f9cb8..c62e67f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -322,9 +322,7 @@ static int stmmac_ethtool_getsettings(struct net_device *dev,
return -EBUSY;
}
cmd->transceiver = XCVR_INTERNAL;
- spin_lock_irq(&priv->lock);
rc = phy_ethtool_gset(phy, cmd);
- spin_unlock_irq(&priv->lock);
return rc;
}
@@ -431,8 +429,6 @@ stmmac_get_pauseparam(struct net_device *netdev,
if (priv->pcs) /* FIXME */
return;
- spin_lock(&priv->lock);
-
pause->rx_pause = 0;
pause->tx_pause = 0;
pause->autoneg = priv->phydev->autoneg;
@@ -442,7 +438,6 @@ stmmac_get_pauseparam(struct net_device *netdev,
if (priv->flow_ctrl & FLOW_TX)
pause->tx_pause = 1;
- spin_unlock(&priv->lock);
}
static int
@@ -457,8 +452,6 @@ stmmac_set_pauseparam(struct net_device *netdev,
if (priv->pcs) /* FIXME */
return -EOPNOTSUPP;
- spin_lock(&priv->lock);
-
if (pause->rx_pause)
new_pause |= FLOW_RX;
if (pause->tx_pause)
@@ -473,7 +466,6 @@ stmmac_set_pauseparam(struct net_device *netdev,
} else
priv->hw->mac->flow_ctrl(priv->ioaddr, phy->duplex,
priv->flow_ctrl, priv->pause);
- spin_unlock(&priv->lock);
return ret;
}
@@ -784,5 +776,5 @@ static const struct ethtool_ops stmmac_ethtool_ops = {
void stmmac_set_ethtool_ops(struct net_device *netdev)
{
- SET_ETHTOOL_OPS(netdev, &stmmac_ethtool_ops);
+ netdev->ethtool_ops = &stmmac_ethtool_ops;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 0f4841d..057a120 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1753,7 +1753,7 @@ static int stmmac_open(struct net_device *dev)
}
/* Request the IRQ lines */
- if (priv->lpi_irq != -ENXIO) {
+ if (priv->lpi_irq > 0) {
ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
dev->name, dev);
if (unlikely(ret < 0)) {
@@ -1813,7 +1813,7 @@ static int stmmac_release(struct net_device *dev)
free_irq(dev->irq, dev);
if (priv->wol_irq != dev->irq)
free_irq(priv->wol_irq, dev);
- if (priv->lpi_irq != -ENXIO)
+ if (priv->lpi_irq > 0)
free_irq(priv->lpi_irq, dev);
/* Stop TX/RX DMA and clear the descriptors */
@@ -2212,27 +2212,6 @@ static void stmmac_tx_timeout(struct net_device *dev)
stmmac_tx_err(priv);
}
-/* Configuration changes (passed on by ifconfig) */
-static int stmmac_config(struct net_device *dev, struct ifmap *map)
-{
- if (dev->flags & IFF_UP) /* can't act on a running interface */
- return -EBUSY;
-
- /* Don't allow changing the I/O address */
- if (map->base_addr != dev->base_addr) {
- pr_warn("%s: can't change I/O address\n", dev->name);
- return -EOPNOTSUPP;
- }
-
- /* Don't allow changing the IRQ */
- if (map->irq != dev->irq) {
- pr_warn("%s: not change IRQ number %d\n", dev->name, dev->irq);
- return -EOPNOTSUPP;
- }
-
- return 0;
-}
-
/**
* stmmac_set_rx_mode - entry point for multicast addressing
* @dev : pointer to the device structure
@@ -2598,7 +2577,6 @@ static const struct net_device_ops stmmac_netdev_ops = {
.ndo_set_rx_mode = stmmac_set_rx_mode,
.ndo_tx_timeout = stmmac_tx_timeout,
.ndo_do_ioctl = stmmac_ioctl,
- .ndo_set_config = stmmac_config,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = stmmac_poll_controller,
#endif
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
index a468eb1..a5b1e1b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
@@ -205,10 +205,13 @@ int stmmac_mdio_register(struct net_device *ndev)
if (new_bus == NULL)
return -ENOMEM;
- if (mdio_bus_data->irqs)
+ if (mdio_bus_data->irqs) {
irqlist = mdio_bus_data->irqs;
- else
+ } else {
+ for (addr = 0; addr < PHY_MAX_ADDR; addr++)
+ priv->mii_irq[addr] = PHY_POLL;
irqlist = priv->mii_irq;
+ }
#ifdef CONFIG_OF
if (priv->device->of_node)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 46aef510..ea7a65b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -237,10 +237,12 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
/* Get the MAC information */
priv->dev->irq = platform_get_irq_byname(pdev, "macirq");
- if (priv->dev->irq == -ENXIO) {
- pr_err("%s: ERROR: MAC IRQ configuration "
- "information not found\n", __func__);
- return -ENXIO;
+ if (priv->dev->irq < 0) {
+ if (priv->dev->irq != -EPROBE_DEFER) {
+ netdev_err(priv->dev,
+ "MAC IRQ configuration information not found\n");
+ }
+ return priv->dev->irq;
}
/*
@@ -252,10 +254,15 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
* so the driver will continue to use the mac irq (ndev->irq)
*/
priv->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq");
- if (priv->wol_irq == -ENXIO)
+ if (priv->wol_irq < 0) {
+ if (priv->wol_irq == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
priv->wol_irq = priv->dev->irq;
+ }
priv->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi");
+ if (priv->lpi_irq == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
platform_set_drvdata(pdev, priv->dev);
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index 2ead877..38da73a 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -2413,7 +2413,7 @@ static void bdx_set_ethtool_ops(struct net_device *netdev)
.get_ethtool_stats = bdx_get_ethtool_stats,
};
- SET_ETHTOOL_OPS(netdev, &bdx_ethtool_ops);
+ netdev->ethtool_ops = &bdx_ethtool_ops;
}
/**
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index 73f74f3..7399a52 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -313,19 +313,6 @@ static int mii_irqs[PHY_MAX_ADDR] = { PHY_POLL, };
static struct mii_bus *cpmac_mii;
-static int cpmac_config(struct net_device *dev, struct ifmap *map)
-{
- if (dev->flags & IFF_UP)
- return -EBUSY;
-
- /* Don't allow changing the I/O address */
- if (map->base_addr != dev->base_addr)
- return -EOPNOTSUPP;
-
- /* ignore other fields */
- return 0;
-}
-
static void cpmac_set_multicast_list(struct net_device *dev)
{
struct netdev_hw_addr *ha;
@@ -1100,7 +1087,6 @@ static const struct net_device_ops cpmac_netdev_ops = {
.ndo_tx_timeout = cpmac_tx_timeout,
.ndo_set_rx_mode = cpmac_set_multicast_list,
.ndo_do_ioctl = cpmac_ioctl,
- .ndo_set_config = cpmac_config,
.ndo_change_mtu = eth_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c
index 148da9a..aa8bf45 100644
--- a/drivers/net/ethernet/ti/cpsw-phy-sel.c
+++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c
@@ -29,6 +29,8 @@
#define AM33XX_GMII_SEL_RMII2_IO_CLK_EN BIT(7)
#define AM33XX_GMII_SEL_RMII1_IO_CLK_EN BIT(6)
+#define GMII_SEL_MODE_MASK 0x3
+
struct cpsw_phy_sel_priv {
struct device *dev;
u32 __iomem *gmii_sel;
@@ -65,7 +67,7 @@ static void cpsw_gmii_sel_am3352(struct cpsw_phy_sel_priv *priv,
break;
};
- mask = 0x3 << (slave * 2) | BIT(slave + 6);
+ mask = GMII_SEL_MODE_MASK << (slave * 2) | BIT(slave + 6);
mode <<= slave * 2;
if (priv->rmii_clock_external) {
@@ -81,6 +83,55 @@ static void cpsw_gmii_sel_am3352(struct cpsw_phy_sel_priv *priv,
writel(reg, priv->gmii_sel);
}
+static void cpsw_gmii_sel_dra7xx(struct cpsw_phy_sel_priv *priv,
+ phy_interface_t phy_mode, int slave)
+{
+ u32 reg;
+ u32 mask;
+ u32 mode = 0;
+
+ reg = readl(priv->gmii_sel);
+
+ switch (phy_mode) {
+ case PHY_INTERFACE_MODE_RMII:
+ mode = AM33XX_GMII_SEL_MODE_RMII;
+ break;
+
+ case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ mode = AM33XX_GMII_SEL_MODE_RGMII;
+ break;
+
+ case PHY_INTERFACE_MODE_MII:
+ default:
+ mode = AM33XX_GMII_SEL_MODE_MII;
+ break;
+ };
+
+ switch (slave) {
+ case 0:
+ mask = GMII_SEL_MODE_MASK;
+ break;
+ case 1:
+ mask = GMII_SEL_MODE_MASK << 4;
+ mode <<= 4;
+ break;
+ default:
+ dev_err(priv->dev, "invalid slave number...\n");
+ return;
+ }
+
+ if (priv->rmii_clock_external)
+ dev_err(priv->dev, "RMII External clock is not supported\n");
+
+ reg &= ~mask;
+ reg |= mode;
+
+ writel(reg, priv->gmii_sel);
+}
+
static struct platform_driver cpsw_phy_sel_driver;
static int match(struct device *dev, void *data)
{
@@ -112,6 +163,14 @@ static const struct of_device_id cpsw_phy_sel_id_table[] = {
.compatible = "ti,am3352-cpsw-phy-sel",
.data = &cpsw_gmii_sel_am3352,
},
+ {
+ .compatible = "ti,dra7xx-cpsw-phy-sel",
+ .data = &cpsw_gmii_sel_dra7xx,
+ },
+ {
+ .compatible = "ti,am43xx-cpsw-phy-sel",
+ .data = &cpsw_gmii_sel_am3352,
+ },
{}
};
MODULE_DEVICE_TABLE(of, cpsw_phy_sel_id_table);
@@ -132,6 +191,7 @@ static int cpsw_phy_sel_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ priv->dev = &pdev->dev;
priv->cpsw_phy_sel = of_id->data;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gmii-sel");
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index c331b7e..ff380da 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -143,13 +143,13 @@ do { \
u32 i; \
for (i = 0; i < priv->num_irqs; i++) \
enable_irq(priv->irqs_table[i]); \
- } while (0);
+ } while (0)
#define cpsw_disable_irq(priv) \
do { \
u32 i; \
for (i = 0; i < priv->num_irqs; i++) \
disable_irq_nosync(priv->irqs_table[i]); \
- } while (0);
+ } while (0)
#define cpsw_slave_index(priv) \
((priv->data.dual_emac) ? priv->emac_port : \
@@ -248,20 +248,31 @@ struct cpsw_ss_regs {
#define TS_131 (1<<11) /* Time Sync Dest IP Addr 131 enable */
#define TS_130 (1<<10) /* Time Sync Dest IP Addr 130 enable */
#define TS_129 (1<<9) /* Time Sync Dest IP Addr 129 enable */
-#define TS_BIT8 (1<<8) /* ts_ttl_nonzero? */
+#define TS_TTL_NONZERO (1<<8) /* Time Sync Time To Live Non-zero enable */
+#define TS_ANNEX_F_EN (1<<6) /* Time Sync Annex F enable */
#define TS_ANNEX_D_EN (1<<4) /* Time Sync Annex D enable */
#define TS_LTYPE2_EN (1<<3) /* Time Sync LTYPE 2 enable */
#define TS_LTYPE1_EN (1<<2) /* Time Sync LTYPE 1 enable */
#define TS_TX_EN (1<<1) /* Time Sync Transmit Enable */
#define TS_RX_EN (1<<0) /* Time Sync Receive Enable */
-#define CTRL_TS_BITS \
- (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 | TS_BIT8 | \
- TS_ANNEX_D_EN | TS_LTYPE1_EN)
+#define CTRL_V2_TS_BITS \
+ (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
+ TS_TTL_NONZERO | TS_ANNEX_D_EN | TS_LTYPE1_EN)
+
+#define CTRL_V2_ALL_TS_MASK (CTRL_V2_TS_BITS | TS_TX_EN | TS_RX_EN)
+#define CTRL_V2_TX_TS_BITS (CTRL_V2_TS_BITS | TS_TX_EN)
+#define CTRL_V2_RX_TS_BITS (CTRL_V2_TS_BITS | TS_RX_EN)
+
-#define CTRL_ALL_TS_MASK (CTRL_TS_BITS | TS_TX_EN | TS_RX_EN)
-#define CTRL_TX_TS_BITS (CTRL_TS_BITS | TS_TX_EN)
-#define CTRL_RX_TS_BITS (CTRL_TS_BITS | TS_RX_EN)
+#define CTRL_V3_TS_BITS \
+ (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\
+ TS_TTL_NONZERO | TS_ANNEX_F_EN | TS_ANNEX_D_EN |\
+ TS_LTYPE1_EN)
+
+#define CTRL_V3_ALL_TS_MASK (CTRL_V3_TS_BITS | TS_TX_EN | TS_RX_EN)
+#define CTRL_V3_TX_TS_BITS (CTRL_V3_TS_BITS | TS_TX_EN)
+#define CTRL_V3_RX_TS_BITS (CTRL_V3_TS_BITS | TS_RX_EN)
/* Bit definitions for the CPSW2_TS_SEQ_MTYPE register */
#define TS_SEQ_ID_OFFSET_SHIFT (16) /* Time Sync Sequence ID Offset */
@@ -1376,13 +1387,27 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
slave = &priv->slaves[priv->data.active_slave];
ctrl = slave_read(slave, CPSW2_CONTROL);
- ctrl &= ~CTRL_ALL_TS_MASK;
+ switch (priv->version) {
+ case CPSW_VERSION_2:
+ ctrl &= ~CTRL_V2_ALL_TS_MASK;
- if (priv->cpts->tx_enable)
- ctrl |= CTRL_TX_TS_BITS;
+ if (priv->cpts->tx_enable)
+ ctrl |= CTRL_V2_TX_TS_BITS;
- if (priv->cpts->rx_enable)
- ctrl |= CTRL_RX_TS_BITS;
+ if (priv->cpts->rx_enable)
+ ctrl |= CTRL_V2_RX_TS_BITS;
+ break;
+ case CPSW_VERSION_3:
+ default:
+ ctrl &= ~CTRL_V3_ALL_TS_MASK;
+
+ if (priv->cpts->tx_enable)
+ ctrl |= CTRL_V3_TX_TS_BITS;
+
+ if (priv->cpts->rx_enable)
+ ctrl |= CTRL_V3_RX_TS_BITS;
+ break;
+ }
mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS;
@@ -1398,7 +1423,8 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
struct hwtstamp_config cfg;
if (priv->version != CPSW_VERSION_1 &&
- priv->version != CPSW_VERSION_2)
+ priv->version != CPSW_VERSION_2 &&
+ priv->version != CPSW_VERSION_3)
return -EOPNOTSUPP;
if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
@@ -1443,6 +1469,7 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
cpsw_hwtstamp_v1(priv);
break;
case CPSW_VERSION_2:
+ case CPSW_VERSION_3:
cpsw_hwtstamp_v2(priv);
break;
default:
@@ -1459,7 +1486,8 @@ static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
struct hwtstamp_config cfg;
if (priv->version != CPSW_VERSION_1 &&
- priv->version != CPSW_VERSION_2)
+ priv->version != CPSW_VERSION_2 &&
+ priv->version != CPSW_VERSION_3)
return -EOPNOTSUPP;
cfg.flags = 0;
@@ -1780,25 +1808,25 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
return -EINVAL;
if (of_property_read_u32(node, "slaves", &prop)) {
- pr_err("Missing slaves property in the DT.\n");
+ dev_err(&pdev->dev, "Missing slaves property in the DT.\n");
return -EINVAL;
}
data->slaves = prop;
if (of_property_read_u32(node, "active_slave", &prop)) {
- pr_err("Missing active_slave property in the DT.\n");
+ dev_err(&pdev->dev, "Missing active_slave property in the DT.\n");
return -EINVAL;
}
data->active_slave = prop;
if (of_property_read_u32(node, "cpts_clock_mult", &prop)) {
- pr_err("Missing cpts_clock_mult property in the DT.\n");
+ dev_err(&pdev->dev, "Missing cpts_clock_mult property in the DT.\n");
return -EINVAL;
}
data->cpts_clock_mult = prop;
if (of_property_read_u32(node, "cpts_clock_shift", &prop)) {
- pr_err("Missing cpts_clock_shift property in the DT.\n");
+ dev_err(&pdev->dev, "Missing cpts_clock_shift property in the DT.\n");
return -EINVAL;
}
data->cpts_clock_shift = prop;
@@ -1810,31 +1838,31 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
return -ENOMEM;
if (of_property_read_u32(node, "cpdma_channels", &prop)) {
- pr_err("Missing cpdma_channels property in the DT.\n");
+ dev_err(&pdev->dev, "Missing cpdma_channels property in the DT.\n");
return -EINVAL;
}
data->channels = prop;
if (of_property_read_u32(node, "ale_entries", &prop)) {
- pr_err("Missing ale_entries property in the DT.\n");
+ dev_err(&pdev->dev, "Missing ale_entries property in the DT.\n");
return -EINVAL;
}
data->ale_entries = prop;
if (of_property_read_u32(node, "bd_ram_size", &prop)) {
- pr_err("Missing bd_ram_size property in the DT.\n");
+ dev_err(&pdev->dev, "Missing bd_ram_size property in the DT.\n");
return -EINVAL;
}
data->bd_ram_size = prop;
if (of_property_read_u32(node, "rx_descs", &prop)) {
- pr_err("Missing rx_descs property in the DT.\n");
+ dev_err(&pdev->dev, "Missing rx_descs property in the DT.\n");
return -EINVAL;
}
data->rx_descs = prop;
if (of_property_read_u32(node, "mac_control", &prop)) {
- pr_err("Missing mac_control property in the DT.\n");
+ dev_err(&pdev->dev, "Missing mac_control property in the DT.\n");
return -EINVAL;
}
data->mac_control = prop;
@@ -1848,7 +1876,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
ret = of_platform_populate(node, NULL, NULL, &pdev->dev);
/* We do not want to force this, as in some cases may not have child */
if (ret)
- pr_warn("Doesn't have any child node\n");
+ dev_warn(&pdev->dev, "Doesn't have any child node\n");
for_each_child_of_node(node, slave_node) {
struct cpsw_slave_data *slave_data = data->slave_data + i;
@@ -1865,7 +1893,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
parp = of_get_property(slave_node, "phy_id", &lenp);
if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) {
- pr_err("Missing slave[%d] phy_id property\n", i);
+ dev_err(&pdev->dev, "Missing slave[%d] phy_id property\n", i);
return -EINVAL;
}
mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
@@ -1885,18 +1913,18 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
slave_data->phy_if = of_get_phy_mode(slave_node);
if (slave_data->phy_if < 0) {
- pr_err("Missing or malformed slave[%d] phy-mode property\n",
- i);
+ dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n",
+ i);
return slave_data->phy_if;
}
if (data->dual_emac) {
if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
&prop)) {
- pr_err("Missing dual_emac_res_vlan in DT.\n");
+ dev_err(&pdev->dev, "Missing dual_emac_res_vlan in DT.\n");
slave_data->dual_emac_res_vlan = i+1;
- pr_err("Using %d as Reserved VLAN for %d slave\n",
- slave_data->dual_emac_res_vlan, i);
+ dev_err(&pdev->dev, "Using %d as Reserved VLAN for %d slave\n",
+ slave_data->dual_emac_res_vlan, i);
} else {
slave_data->dual_emac_res_vlan = prop;
}
@@ -1920,7 +1948,7 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
ndev = alloc_etherdev(sizeof(struct cpsw_priv));
if (!ndev) {
- pr_err("cpsw: error allocating net_device\n");
+ dev_err(&pdev->dev, "cpsw: error allocating net_device\n");
return -ENOMEM;
}
@@ -1936,10 +1964,10 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
if (is_valid_ether_addr(data->slave_data[1].mac_addr)) {
memcpy(priv_sl2->mac_addr, data->slave_data[1].mac_addr,
ETH_ALEN);
- pr_info("cpsw: Detected MACID = %pM\n", priv_sl2->mac_addr);
+ dev_info(&pdev->dev, "cpsw: Detected MACID = %pM\n", priv_sl2->mac_addr);
} else {
random_ether_addr(priv_sl2->mac_addr);
- pr_info("cpsw: Random MACID = %pM\n", priv_sl2->mac_addr);
+ dev_info(&pdev->dev, "cpsw: Random MACID = %pM\n", priv_sl2->mac_addr);
}
memcpy(ndev->dev_addr, priv_sl2->mac_addr, ETH_ALEN);
@@ -1970,14 +1998,14 @@ static int cpsw_probe_dual_emac(struct platform_device *pdev,
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
ndev->netdev_ops = &cpsw_netdev_ops;
- SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops);
+ ndev->ethtool_ops = &cpsw_ethtool_ops;
netif_napi_add(ndev, &priv_sl2->napi, cpsw_poll, CPSW_POLL_WEIGHT);
/* register the network device */
SET_NETDEV_DEV(ndev, &pdev->dev);
ret = register_netdev(ndev);
if (ret) {
- pr_err("cpsw: error registering net device\n");
+ dev_err(&pdev->dev, "cpsw: error registering net device\n");
free_netdev(ndev);
ret = -ENODEV;
}
@@ -1999,7 +2027,7 @@ static int cpsw_probe(struct platform_device *pdev)
ndev = alloc_etherdev(sizeof(struct cpsw_priv));
if (!ndev) {
- pr_err("error allocating net_device\n");
+ dev_err(&pdev->dev, "error allocating net_device\n");
return -ENOMEM;
}
@@ -2014,7 +2042,7 @@ static int cpsw_probe(struct platform_device *pdev)
priv->cpts = devm_kzalloc(&pdev->dev, sizeof(struct cpts), GFP_KERNEL);
priv->irq_enabled = true;
if (!priv->cpts) {
- pr_err("error allocating cpts\n");
+ dev_err(&pdev->dev, "error allocating cpts\n");
goto clean_ndev_ret;
}
@@ -2027,7 +2055,7 @@ static int cpsw_probe(struct platform_device *pdev)
pinctrl_pm_select_default_state(&pdev->dev);
if (cpsw_probe_dt(&priv->data, pdev)) {
- pr_err("cpsw: platform data missing\n");
+ dev_err(&pdev->dev, "cpsw: platform data missing\n");
ret = -ENODEV;
goto clean_runtime_disable_ret;
}
@@ -2035,10 +2063,10 @@ static int cpsw_probe(struct platform_device *pdev)
if (is_valid_ether_addr(data->slave_data[0].mac_addr)) {
memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN);
- pr_info("Detected MACID = %pM\n", priv->mac_addr);
+ dev_info(&pdev->dev, "Detected MACID = %pM\n", priv->mac_addr);
} else {
eth_random_addr(priv->mac_addr);
- pr_info("Random MACID = %pM\n", priv->mac_addr);
+ dev_info(&pdev->dev, "Random MACID = %pM\n", priv->mac_addr);
}
memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
@@ -2199,7 +2227,7 @@ static int cpsw_probe(struct platform_device *pdev)
ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
ndev->netdev_ops = &cpsw_netdev_ops;
- SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops);
+ ndev->ethtool_ops = &cpsw_ethtool_ops;
netif_napi_add(ndev, &priv->napi, cpsw_poll, CPSW_POLL_WEIGHT);
/* register the network device */
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index 2435139..6b56f85 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -236,13 +236,11 @@ static void cpts_overflow_check(struct work_struct *work)
schedule_delayed_work(&cpts->overflow_work, CPTS_OVERFLOW_PERIOD);
}
-#define CPTS_REF_CLOCK_NAME "cpsw_cpts_rft_clk"
-
-static void cpts_clk_init(struct cpts *cpts)
+static void cpts_clk_init(struct device *dev, struct cpts *cpts)
{
- cpts->refclk = clk_get(NULL, CPTS_REF_CLOCK_NAME);
+ cpts->refclk = devm_clk_get(dev, "cpts");
if (IS_ERR(cpts->refclk)) {
- pr_err("Failed to clk_get %s\n", CPTS_REF_CLOCK_NAME);
+ dev_err(dev, "Failed to get cpts refclk\n");
cpts->refclk = NULL;
return;
}
@@ -252,7 +250,6 @@ static void cpts_clk_init(struct cpts *cpts)
static void cpts_clk_release(struct cpts *cpts)
{
clk_disable(cpts->refclk);
- clk_put(cpts->refclk);
}
static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
@@ -390,7 +387,7 @@ int cpts_register(struct device *dev, struct cpts *cpts,
for (i = 0; i < CPTS_MAX_EVENTS; i++)
list_add(&cpts->pool_data[i].list, &cpts->pool);
- cpts_clk_init(cpts);
+ cpts_clk_init(dev, cpts);
cpts_write32(cpts, CPTS_EN, control);
cpts_write32(cpts, TS_PEND_EN, int_enable);
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 88ef270..4a000f6 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -158,9 +158,9 @@ cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr,
int bitmap_size;
struct cpdma_desc_pool *pool;
- pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+ pool = devm_kzalloc(dev, sizeof(*pool), GFP_KERNEL);
if (!pool)
- return NULL;
+ goto fail;
spin_lock_init(&pool->lock);
@@ -170,7 +170,7 @@ cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr,
pool->num_desc = size / pool->desc_size;
bitmap_size = (pool->num_desc / BITS_PER_LONG) * sizeof(long);
- pool->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+ pool->bitmap = devm_kzalloc(dev, bitmap_size, GFP_KERNEL);
if (!pool->bitmap)
goto fail;
@@ -187,10 +187,7 @@ cpdma_desc_pool_create(struct device *dev, u32 phys, u32 hw_addr,
if (pool->iomap)
return pool;
-
fail:
- kfree(pool->bitmap);
- kfree(pool);
return NULL;
}
@@ -203,7 +200,6 @@ static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
spin_lock_irqsave(&pool->lock, flags);
WARN_ON(pool->used_desc);
- kfree(pool->bitmap);
if (pool->cpumap) {
dma_free_coherent(pool->dev, pool->mem_size, pool->cpumap,
pool->phys);
@@ -211,7 +207,6 @@ static void cpdma_desc_pool_destroy(struct cpdma_desc_pool *pool)
iounmap(pool->iomap);
}
spin_unlock_irqrestore(&pool->lock, flags);
- kfree(pool);
}
static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
@@ -276,7 +271,7 @@ struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
{
struct cpdma_ctlr *ctlr;
- ctlr = kzalloc(sizeof(*ctlr), GFP_KERNEL);
+ ctlr = devm_kzalloc(params->dev, sizeof(*ctlr), GFP_KERNEL);
if (!ctlr)
return NULL;
@@ -290,10 +285,8 @@ struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
ctlr->params.desc_hw_addr,
ctlr->params.desc_mem_size,
ctlr->params.desc_align);
- if (!ctlr->pool) {
- kfree(ctlr);
+ if (!ctlr->pool)
return NULL;
- }
if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS))
ctlr->num_chan = CPDMA_MAX_CHANNELS;
@@ -468,7 +461,6 @@ int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
cpdma_desc_pool_destroy(ctlr->pool);
spin_unlock_irqrestore(&ctlr->lock, flags);
- kfree(ctlr);
return ret;
}
EXPORT_SYMBOL_GPL(cpdma_ctlr_destroy);
@@ -507,21 +499,22 @@ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
cpdma_handler_fn handler)
{
struct cpdma_chan *chan;
- int ret, offset = (chan_num % CPDMA_MAX_CHANNELS) * 4;
+ int offset = (chan_num % CPDMA_MAX_CHANNELS) * 4;
unsigned long flags;
if (__chan_linear(chan_num) >= ctlr->num_chan)
return NULL;
- ret = -ENOMEM;
- chan = kzalloc(sizeof(*chan), GFP_KERNEL);
+ chan = devm_kzalloc(ctlr->dev, sizeof(*chan), GFP_KERNEL);
if (!chan)
- goto err_chan_alloc;
+ return ERR_PTR(-ENOMEM);
spin_lock_irqsave(&ctlr->lock, flags);
- ret = -EBUSY;
- if (ctlr->channels[chan_num])
- goto err_chan_busy;
+ if (ctlr->channels[chan_num]) {
+ spin_unlock_irqrestore(&ctlr->lock, flags);
+ devm_kfree(ctlr->dev, chan);
+ return ERR_PTR(-EBUSY);
+ }
chan->ctlr = ctlr;
chan->state = CPDMA_STATE_IDLE;
@@ -551,12 +544,6 @@ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
ctlr->channels[chan_num] = chan;
spin_unlock_irqrestore(&ctlr->lock, flags);
return chan;
-
-err_chan_busy:
- spin_unlock_irqrestore(&ctlr->lock, flags);
- kfree(chan);
-err_chan_alloc:
- return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(cpdma_chan_create);
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 8f0e69c..35a139e 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1567,7 +1567,6 @@ static int emac_dev_open(struct net_device *ndev)
while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ,
res_num))) {
for (irq_num = res->start; irq_num <= res->end; irq_num++) {
- dev_err(emac_dev, "Request IRQ %d\n", irq_num);
if (request_irq(irq_num, emac_irq, 0, ndev->name,
ndev)) {
dev_err(emac_dev,
@@ -1865,7 +1864,6 @@ static int davinci_emac_probe(struct platform_device *pdev)
struct emac_priv *priv;
unsigned long hw_ram_addr;
struct emac_platform_data *pdata;
- struct device *emac_dev;
struct cpdma_params dma_params;
struct clk *emac_clk;
unsigned long emac_bus_frequency;
@@ -1911,7 +1909,6 @@ static int davinci_emac_probe(struct platform_device *pdev)
priv->coal_intvl = 0;
priv->bus_freq_mhz = (u32)(emac_bus_frequency / 1000000);
- emac_dev = &ndev->dev;
/* Get EMAC platform data */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
priv->emac_base_phys = res->start + pdata->ctrl_reg_offset;
@@ -1930,7 +1927,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
hw_ram_addr = (u32 __force)res->start + pdata->ctrl_ram_offset;
memset(&dma_params, 0, sizeof(dma_params));
- dma_params.dev = emac_dev;
+ dma_params.dev = &pdev->dev;
dma_params.dmaregs = priv->emac_base;
dma_params.rxthresh = priv->emac_base + 0x120;
dma_params.rxfree = priv->emac_base + 0x140;
@@ -1980,7 +1977,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
}
ndev->netdev_ops = &emac_netdev_ops;
- SET_ETHTOOL_OPS(ndev, &ethtool_ops);
+ ndev->ethtool_ops = &ethtool_ops;
netif_napi_add(ndev, &priv->napi, emac_poll, EMAC_POLL_WEIGHT);
/* register the network device */
@@ -1994,7 +1991,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
if (netif_msg_probe(priv)) {
- dev_notice(emac_dev, "DaVinci EMAC Probe found device "\
+ dev_notice(&pdev->dev, "DaVinci EMAC Probe found device "
"(regs: %p, irq: %d)\n",
(void *)priv->emac_base_phys, ndev->irq);
}
diff --git a/drivers/net/ethernet/ti/davinci_mdio.c b/drivers/net/ethernet/ti/davinci_mdio.c
index 0cca9de..735dc53 100644
--- a/drivers/net/ethernet/ti/davinci_mdio.c
+++ b/drivers/net/ethernet/ti/davinci_mdio.c
@@ -303,7 +303,7 @@ static int davinci_mdio_probe_dt(struct mdio_platform_data *data,
return -EINVAL;
if (of_property_read_u32(node, "bus_freq", &prop)) {
- pr_err("Missing bus_freq property in the DT.\n");
+ dev_err(&pdev->dev, "Missing bus_freq property in the DT.\n");
return -EINVAL;
}
data->bus_freq = prop;
@@ -321,15 +321,14 @@ static int davinci_mdio_probe(struct platform_device *pdev)
struct phy_device *phy;
int ret, addr;
- data = kzalloc(sizeof(*data), GFP_KERNEL);
+ data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
- data->bus = mdiobus_alloc();
+ data->bus = devm_mdiobus_alloc(dev);
if (!data->bus) {
dev_err(dev, "failed to alloc mii bus\n");
- ret = -ENOMEM;
- goto bail_out;
+ return -ENOMEM;
}
if (dev->of_node) {
@@ -349,12 +348,9 @@ static int davinci_mdio_probe(struct platform_device *pdev)
data->bus->parent = dev;
data->bus->priv = data;
- /* Select default pin state */
- pinctrl_pm_select_default_state(&pdev->dev);
-
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
- data->clk = clk_get(&pdev->dev, "fck");
+ data->clk = devm_clk_get(dev, "fck");
if (IS_ERR(data->clk)) {
dev_err(dev, "failed to get device clock\n");
ret = PTR_ERR(data->clk);
@@ -367,24 +363,9 @@ static int davinci_mdio_probe(struct platform_device *pdev)
spin_lock_init(&data->lock);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(dev, "could not find register map resource\n");
- ret = -ENOENT;
- goto bail_out;
- }
-
- res = devm_request_mem_region(dev, res->start, resource_size(res),
- dev_name(dev));
- if (!res) {
- dev_err(dev, "could not allocate register map resource\n");
- ret = -ENXIO;
- goto bail_out;
- }
-
- data->regs = devm_ioremap_nocache(dev, res->start, resource_size(res));
- if (!data->regs) {
- dev_err(dev, "could not map mdio registers\n");
- ret = -ENOMEM;
+ data->regs = devm_ioremap_resource(dev, res);
+ if (IS_ERR(data->regs)) {
+ ret = PTR_ERR(data->regs);
goto bail_out;
}
@@ -406,16 +387,9 @@ static int davinci_mdio_probe(struct platform_device *pdev)
return 0;
bail_out:
- if (data->bus)
- mdiobus_free(data->bus);
-
- if (data->clk)
- clk_put(data->clk);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- kfree(data);
-
return ret;
}
@@ -423,18 +397,12 @@ static int davinci_mdio_remove(struct platform_device *pdev)
{
struct davinci_mdio_data *data = platform_get_drvdata(pdev);
- if (data->bus) {
+ if (data->bus)
mdiobus_unregister(data->bus);
- mdiobus_free(data->bus);
- }
- if (data->clk)
- clk_put(data->clk);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
- kfree(data);
-
return 0;
}
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 449011b..14389f8 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -2192,7 +2192,6 @@ static void tile_net_dev_init(const char *name, const uint8_t *mac)
{
int ret;
int i;
- int nz_addr = 0;
struct net_device *dev;
struct tile_net_priv *priv;
@@ -2212,7 +2211,6 @@ static void tile_net_dev_init(const char *name, const uint8_t *mac)
/* Initialize "priv". */
priv = netdev_priv(dev);
- memset(priv, 0, sizeof(*priv));
priv->dev = dev;
priv->channel = -1;
priv->loopify_channel = -1;
@@ -2223,15 +2221,10 @@ static void tile_net_dev_init(const char *name, const uint8_t *mac)
* be done before the device is opened. If the MAC is all zeroes,
* we use a random address, since we're probably on the simulator.
*/
- for (i = 0; i < 6; i++)
- nz_addr |= mac[i];
-
- if (nz_addr) {
- memcpy(dev->dev_addr, mac, ETH_ALEN);
- dev->addr_len = 6;
- } else {
+ if (!is_zero_ether_addr(mac))
+ ether_addr_copy(dev->dev_addr, mac);
+ else
eth_hw_addr_random(dev);
- }
/* Register the network device. */
ret = register_netdev(dev);
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index d899d00..bb79928 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -1561,7 +1561,7 @@ static struct gelic_card *gelic_alloc_card_net(struct net_device **netdev)
* alloc netdev
*/
*netdev = alloc_etherdev(sizeof(struct gelic_port));
- if (!netdev) {
+ if (!*netdev) {
kfree(card->unalign);
return NULL;
}
diff --git a/drivers/net/ethernet/via/Kconfig b/drivers/net/ethernet/via/Kconfig
index 8a049a2..f66ddae 100644
--- a/drivers/net/ethernet/via/Kconfig
+++ b/drivers/net/ethernet/via/Kconfig
@@ -19,7 +19,7 @@ if NET_VENDOR_VIA
config VIA_RHINE
tristate "VIA Rhine support"
- depends on PCI
+ depends on (PCI || USE_OF)
select CRC32
select MII
---help---
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index f61dc2b..2d72f96 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -94,6 +94,10 @@ static const int multicast_filter_limit = 32;
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -116,13 +120,6 @@ static const int multicast_filter_limit = 32;
static const char version[] =
"v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker";
-/* This driver was written to use PCI memory space. Some early versions
- of the Rhine may only work correctly with I/O space accesses. */
-#ifdef CONFIG_VIA_RHINE_MMIO
-#define USE_MMIO
-#else
-#endif
-
MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
MODULE_LICENSE("GPL");
@@ -260,6 +257,12 @@ enum rhine_quirks {
rq6patterns = 0x0040, /* 6 instead of 4 patterns for WOL */
rqStatusWBRace = 0x0080, /* Tx Status Writeback Error possible */
rqRhineI = 0x0100, /* See comment below */
+ rqIntPHY = 0x0200, /* Integrated PHY */
+ rqMgmt = 0x0400, /* Management adapter */
+ rqNeedEnMMIO = 0x0800, /* Whether the core needs to be
+ * switched from PIO mode to MMIO
+ * (only applies to PCI)
+ */
};
/*
* rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
@@ -279,6 +282,15 @@ static DEFINE_PCI_DEVICE_TABLE(rhine_pci_tbl) = {
};
MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
+/* OpenFirmware identifiers for platform-bus devices
+ * The .data field is currently only used to store quirks
+ */
+static u32 vt8500_quirks = rqWOL | rqForceReset | rq6patterns;
+static struct of_device_id rhine_of_tbl[] = {
+ { .compatible = "via,vt8500-rhine", .data = &vt8500_quirks },
+ { } /* terminate list */
+};
+MODULE_DEVICE_TABLE(of, rhine_of_tbl);
/* Offsets to the device registers. */
enum register_offsets {
@@ -338,13 +350,11 @@ enum bcr1_bits {
BCR1_MED1=0x80, /* for VT6102 */
};
-#ifdef USE_MMIO
/* Registers we check that mmio and reg are the same. */
static const int mmio_verify_registers[] = {
RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
0
};
-#endif
/* Bits in the interrupt status/mask registers. */
enum intr_status_bits {
@@ -446,7 +456,7 @@ struct rhine_private {
unsigned char *tx_bufs;
dma_addr_t tx_bufs_dma;
- struct pci_dev *pdev;
+ int irq;
long pioaddr;
struct net_device *dev;
struct napi_struct napi;
@@ -649,20 +659,46 @@ static void rhine_chip_reset(struct net_device *dev)
"failed" : "succeeded");
}
-#ifdef USE_MMIO
static void enable_mmio(long pioaddr, u32 quirks)
{
int n;
- if (quirks & rqRhineI) {
- /* More recent docs say that this bit is reserved ... */
- n = inb(pioaddr + ConfigA) | 0x20;
- outb(n, pioaddr + ConfigA);
- } else {
- n = inb(pioaddr + ConfigD) | 0x80;
- outb(n, pioaddr + ConfigD);
+
+ if (quirks & rqNeedEnMMIO) {
+ if (quirks & rqRhineI) {
+ /* More recent docs say that this bit is reserved */
+ n = inb(pioaddr + ConfigA) | 0x20;
+ outb(n, pioaddr + ConfigA);
+ } else {
+ n = inb(pioaddr + ConfigD) | 0x80;
+ outb(n, pioaddr + ConfigD);
+ }
}
}
-#endif
+
+static inline int verify_mmio(struct device *hwdev,
+ long pioaddr,
+ void __iomem *ioaddr,
+ u32 quirks)
+{
+ if (quirks & rqNeedEnMMIO) {
+ int i = 0;
+
+ /* Check that selected MMIO registers match the PIO ones */
+ while (mmio_verify_registers[i]) {
+ int reg = mmio_verify_registers[i++];
+ unsigned char a = inb(pioaddr+reg);
+ unsigned char b = readb(ioaddr+reg);
+
+ if (a != b) {
+ dev_err(hwdev,
+ "MMIO do not match PIO [%02x] (%02x != %02x)\n",
+ reg, a, b);
+ return -EIO;
+ }
+ }
+ }
+ return 0;
+}
/*
* Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
@@ -682,14 +718,12 @@ static void rhine_reload_eeprom(long pioaddr, struct net_device *dev)
if (i > 512)
pr_info("%4d cycles used @ %s:%d\n", i, __func__, __LINE__);
-#ifdef USE_MMIO
/*
* Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
* MMIO. If reloading EEPROM was done first this could be avoided, but
* it is not known if that still works with the "win98-reboot" problem.
*/
enable_mmio(pioaddr, rp->quirks);
-#endif
/* Turn off EEPROM-controlled wake-up (magic packet) */
if (rp->quirks & rqWOL)
@@ -701,7 +735,7 @@ static void rhine_reload_eeprom(long pioaddr, struct net_device *dev)
static void rhine_poll(struct net_device *dev)
{
struct rhine_private *rp = netdev_priv(dev);
- const int irq = rp->pdev->irq;
+ const int irq = rp->irq;
disable_irq(irq);
rhine_interrupt(irq, dev);
@@ -846,7 +880,8 @@ static void rhine_hw_init(struct net_device *dev, long pioaddr)
msleep(5);
/* Reload EEPROM controlled bytes cleared by soft reset */
- rhine_reload_eeprom(pioaddr, dev);
+ if (dev_is_pci(dev->dev.parent))
+ rhine_reload_eeprom(pioaddr, dev);
}
static const struct net_device_ops rhine_netdev_ops = {
@@ -867,125 +902,37 @@ static const struct net_device_ops rhine_netdev_ops = {
#endif
};
-static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
+static int rhine_init_one_common(struct device *hwdev, u32 quirks,
+ long pioaddr, void __iomem *ioaddr, int irq)
{
struct net_device *dev;
struct rhine_private *rp;
- int i, rc;
- u32 quirks;
- long pioaddr;
- long memaddr;
- void __iomem *ioaddr;
- int io_size, phy_id;
+ int i, rc, phy_id;
const char *name;
-#ifdef USE_MMIO
- int bar = 1;
-#else
- int bar = 0;
-#endif
-
-/* when built into the kernel, we only print version if device is found */
-#ifndef MODULE
- pr_info_once("%s\n", version);
-#endif
-
- io_size = 256;
- phy_id = 0;
- quirks = 0;
- name = "Rhine";
- if (pdev->revision < VTunknown0) {
- quirks = rqRhineI;
- io_size = 128;
- }
- else if (pdev->revision >= VT6102) {
- quirks = rqWOL | rqForceReset;
- if (pdev->revision < VT6105) {
- name = "Rhine II";
- quirks |= rqStatusWBRace; /* Rhine-II exclusive */
- }
- else {
- phy_id = 1; /* Integrated PHY, phy_id fixed to 1 */
- if (pdev->revision >= VT6105_B0)
- quirks |= rq6patterns;
- if (pdev->revision < VT6105M)
- name = "Rhine III";
- else
- name = "Rhine III (Management Adapter)";
- }
- }
-
- rc = pci_enable_device(pdev);
- if (rc)
- goto err_out;
/* this should always be supported */
- rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+ rc = dma_set_mask(hwdev, DMA_BIT_MASK(32));
if (rc) {
- dev_err(&pdev->dev,
- "32-bit PCI DMA addresses not supported by the card!?\n");
- goto err_out_pci_disable;
- }
-
- /* sanity check */
- if ((pci_resource_len(pdev, 0) < io_size) ||
- (pci_resource_len(pdev, 1) < io_size)) {
- rc = -EIO;
- dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
- goto err_out_pci_disable;
+ dev_err(hwdev, "32-bit DMA addresses not supported by the card!?\n");
+ goto err_out;
}
- pioaddr = pci_resource_start(pdev, 0);
- memaddr = pci_resource_start(pdev, 1);
-
- pci_set_master(pdev);
-
dev = alloc_etherdev(sizeof(struct rhine_private));
if (!dev) {
rc = -ENOMEM;
- goto err_out_pci_disable;
+ goto err_out;
}
- SET_NETDEV_DEV(dev, &pdev->dev);
+ SET_NETDEV_DEV(dev, hwdev);
rp = netdev_priv(dev);
rp->dev = dev;
rp->quirks = quirks;
rp->pioaddr = pioaddr;
- rp->pdev = pdev;
+ rp->base = ioaddr;
+ rp->irq = irq;
rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT);
- rc = pci_request_regions(pdev, DRV_NAME);
- if (rc)
- goto err_out_free_netdev;
-
- ioaddr = pci_iomap(pdev, bar, io_size);
- if (!ioaddr) {
- rc = -EIO;
- dev_err(&pdev->dev,
- "ioremap failed for device %s, region 0x%X @ 0x%lX\n",
- pci_name(pdev), io_size, memaddr);
- goto err_out_free_res;
- }
-
-#ifdef USE_MMIO
- enable_mmio(pioaddr, quirks);
-
- /* Check that selected MMIO registers match the PIO ones */
- i = 0;
- while (mmio_verify_registers[i]) {
- int reg = mmio_verify_registers[i++];
- unsigned char a = inb(pioaddr+reg);
- unsigned char b = readb(ioaddr+reg);
- if (a != b) {
- rc = -EIO;
- dev_err(&pdev->dev,
- "MMIO do not match PIO [%02x] (%02x != %02x)\n",
- reg, a, b);
- goto err_out_unmap;
- }
- }
-#endif /* USE_MMIO */
-
- rp->base = ioaddr;
+ phy_id = rp->quirks & rqIntPHY ? 1 : 0;
u64_stats_init(&rp->tx_stats.syncp);
u64_stats_init(&rp->rx_stats.syncp);
@@ -1030,7 +977,7 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rp->quirks & rqRhineI)
dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
- if (pdev->revision >= VT6105M)
+ if (rp->quirks & rqMgmt)
dev->features |= NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER;
@@ -1038,18 +985,21 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* dev->name not defined before register_netdev()! */
rc = register_netdev(dev);
if (rc)
- goto err_out_unmap;
+ goto err_out_free_netdev;
+
+ if (rp->quirks & rqRhineI)
+ name = "Rhine";
+ else if (rp->quirks & rqStatusWBRace)
+ name = "Rhine II";
+ else if (rp->quirks & rqMgmt)
+ name = "Rhine III (Management Adapter)";
+ else
+ name = "Rhine III";
netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n",
- name,
-#ifdef USE_MMIO
- memaddr,
-#else
- (long)ioaddr,
-#endif
- dev->dev_addr, pdev->irq);
+ name, (long)ioaddr, dev->dev_addr, rp->irq);
- pci_set_drvdata(pdev, dev);
+ dev_set_drvdata(hwdev, dev);
{
u16 mii_cmd;
@@ -1078,41 +1028,158 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
+err_out_free_netdev:
+ free_netdev(dev);
+err_out:
+ return rc;
+}
+
+static int rhine_init_one_pci(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+{
+ struct device *hwdev = &pdev->dev;
+ int rc;
+ long pioaddr, memaddr;
+ void __iomem *ioaddr;
+ int io_size = pdev->revision < VTunknown0 ? 128 : 256;
+
+/* This driver was written to use PCI memory space. Some early versions
+ * of the Rhine may only work correctly with I/O space accesses.
+ * TODO: determine for which revisions this is true and assign the flag
+ * in code as opposed to this Kconfig option (???)
+ */
+#ifdef CONFIG_VIA_RHINE_MMIO
+ u32 quirks = rqNeedEnMMIO;
+#else
+ u32 quirks = 0;
+#endif
+
+/* when built into the kernel, we only print version if device is found */
+#ifndef MODULE
+ pr_info_once("%s\n", version);
+#endif
+
+ rc = pci_enable_device(pdev);
+ if (rc)
+ goto err_out;
+
+ if (pdev->revision < VTunknown0) {
+ quirks |= rqRhineI;
+ } else if (pdev->revision >= VT6102) {
+ quirks |= rqWOL | rqForceReset;
+ if (pdev->revision < VT6105) {
+ quirks |= rqStatusWBRace;
+ } else {
+ quirks |= rqIntPHY;
+ if (pdev->revision >= VT6105_B0)
+ quirks |= rq6patterns;
+ if (pdev->revision >= VT6105M)
+ quirks |= rqMgmt;
+ }
+ }
+
+ /* sanity check */
+ if ((pci_resource_len(pdev, 0) < io_size) ||
+ (pci_resource_len(pdev, 1) < io_size)) {
+ rc = -EIO;
+ dev_err(hwdev, "Insufficient PCI resources, aborting\n");
+ goto err_out_pci_disable;
+ }
+
+ pioaddr = pci_resource_start(pdev, 0);
+ memaddr = pci_resource_start(pdev, 1);
+
+ pci_set_master(pdev);
+
+ rc = pci_request_regions(pdev, DRV_NAME);
+ if (rc)
+ goto err_out_pci_disable;
+
+ ioaddr = pci_iomap(pdev, (quirks & rqNeedEnMMIO ? 1 : 0), io_size);
+ if (!ioaddr) {
+ rc = -EIO;
+ dev_err(hwdev,
+ "ioremap failed for device %s, region 0x%X @ 0x%lX\n",
+ dev_name(hwdev), io_size, memaddr);
+ goto err_out_free_res;
+ }
+
+ enable_mmio(pioaddr, quirks);
+
+ rc = verify_mmio(hwdev, pioaddr, ioaddr, quirks);
+ if (rc)
+ goto err_out_unmap;
+
+ rc = rhine_init_one_common(&pdev->dev, quirks,
+ pioaddr, ioaddr, pdev->irq);
+ if (!rc)
+ return 0;
+
err_out_unmap:
pci_iounmap(pdev, ioaddr);
err_out_free_res:
pci_release_regions(pdev);
-err_out_free_netdev:
- free_netdev(dev);
err_out_pci_disable:
pci_disable_device(pdev);
err_out:
return rc;
}
+static int rhine_init_one_platform(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ const u32 *quirks;
+ int irq;
+ struct resource *res;
+ void __iomem *ioaddr;
+
+ match = of_match_device(rhine_of_tbl, &pdev->dev);
+ if (!match)
+ return -EINVAL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ ioaddr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ioaddr))
+ return PTR_ERR(ioaddr);
+
+ irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ if (!irq)
+ return -EINVAL;
+
+ quirks = match->data;
+ if (!quirks)
+ return -EINVAL;
+
+ return rhine_init_one_common(&pdev->dev, *quirks,
+ (long)ioaddr, ioaddr, irq);
+}
+
static int alloc_ring(struct net_device* dev)
{
struct rhine_private *rp = netdev_priv(dev);
+ struct device *hwdev = dev->dev.parent;
void *ring;
dma_addr_t ring_dma;
- ring = pci_alloc_consistent(rp->pdev,
- RX_RING_SIZE * sizeof(struct rx_desc) +
- TX_RING_SIZE * sizeof(struct tx_desc),
- &ring_dma);
+ ring = dma_alloc_coherent(hwdev,
+ RX_RING_SIZE * sizeof(struct rx_desc) +
+ TX_RING_SIZE * sizeof(struct tx_desc),
+ &ring_dma,
+ GFP_ATOMIC);
if (!ring) {
netdev_err(dev, "Could not allocate DMA memory\n");
return -ENOMEM;
}
if (rp->quirks & rqRhineI) {
- rp->tx_bufs = pci_alloc_consistent(rp->pdev,
- PKT_BUF_SZ * TX_RING_SIZE,
- &rp->tx_bufs_dma);
+ rp->tx_bufs = dma_alloc_coherent(hwdev,
+ PKT_BUF_SZ * TX_RING_SIZE,
+ &rp->tx_bufs_dma,
+ GFP_ATOMIC);
if (rp->tx_bufs == NULL) {
- pci_free_consistent(rp->pdev,
- RX_RING_SIZE * sizeof(struct rx_desc) +
- TX_RING_SIZE * sizeof(struct tx_desc),
- ring, ring_dma);
+ dma_free_coherent(hwdev,
+ RX_RING_SIZE * sizeof(struct rx_desc) +
+ TX_RING_SIZE * sizeof(struct tx_desc),
+ ring, ring_dma);
return -ENOMEM;
}
}
@@ -1128,16 +1195,17 @@ static int alloc_ring(struct net_device* dev)
static void free_ring(struct net_device* dev)
{
struct rhine_private *rp = netdev_priv(dev);
+ struct device *hwdev = dev->dev.parent;
- pci_free_consistent(rp->pdev,
- RX_RING_SIZE * sizeof(struct rx_desc) +
- TX_RING_SIZE * sizeof(struct tx_desc),
- rp->rx_ring, rp->rx_ring_dma);
+ dma_free_coherent(hwdev,
+ RX_RING_SIZE * sizeof(struct rx_desc) +
+ TX_RING_SIZE * sizeof(struct tx_desc),
+ rp->rx_ring, rp->rx_ring_dma);
rp->tx_ring = NULL;
if (rp->tx_bufs)
- pci_free_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE,
- rp->tx_bufs, rp->tx_bufs_dma);
+ dma_free_coherent(hwdev, PKT_BUF_SZ * TX_RING_SIZE,
+ rp->tx_bufs, rp->tx_bufs_dma);
rp->tx_bufs = NULL;
@@ -1146,6 +1214,7 @@ static void free_ring(struct net_device* dev)
static void alloc_rbufs(struct net_device *dev)
{
struct rhine_private *rp = netdev_priv(dev);
+ struct device *hwdev = dev->dev.parent;
dma_addr_t next;
int i;
@@ -1174,9 +1243,9 @@ static void alloc_rbufs(struct net_device *dev)
break;
rp->rx_skbuff_dma[i] =
- pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz,
- PCI_DMA_FROMDEVICE);
- if (dma_mapping_error(&rp->pdev->dev, rp->rx_skbuff_dma[i])) {
+ dma_map_single(hwdev, skb->data, rp->rx_buf_sz,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(hwdev, rp->rx_skbuff_dma[i])) {
rp->rx_skbuff_dma[i] = 0;
dev_kfree_skb(skb);
break;
@@ -1190,6 +1259,7 @@ static void alloc_rbufs(struct net_device *dev)
static void free_rbufs(struct net_device* dev)
{
struct rhine_private *rp = netdev_priv(dev);
+ struct device *hwdev = dev->dev.parent;
int i;
/* Free all the skbuffs in the Rx queue. */
@@ -1197,9 +1267,9 @@ static void free_rbufs(struct net_device* dev)
rp->rx_ring[i].rx_status = 0;
rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
if (rp->rx_skbuff[i]) {
- pci_unmap_single(rp->pdev,
+ dma_unmap_single(hwdev,
rp->rx_skbuff_dma[i],
- rp->rx_buf_sz, PCI_DMA_FROMDEVICE);
+ rp->rx_buf_sz, DMA_FROM_DEVICE);
dev_kfree_skb(rp->rx_skbuff[i]);
}
rp->rx_skbuff[i] = NULL;
@@ -1230,6 +1300,7 @@ static void alloc_tbufs(struct net_device* dev)
static void free_tbufs(struct net_device* dev)
{
struct rhine_private *rp = netdev_priv(dev);
+ struct device *hwdev = dev->dev.parent;
int i;
for (i = 0; i < TX_RING_SIZE; i++) {
@@ -1238,10 +1309,10 @@ static void free_tbufs(struct net_device* dev)
rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
if (rp->tx_skbuff[i]) {
if (rp->tx_skbuff_dma[i]) {
- pci_unmap_single(rp->pdev,
+ dma_unmap_single(hwdev,
rp->tx_skbuff_dma[i],
rp->tx_skbuff[i]->len,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
}
dev_kfree_skb(rp->tx_skbuff[i]);
}
@@ -1278,8 +1349,9 @@ static void rhine_set_carrier(struct mii_if_info *mii)
/* autoneg is off: Link is always assumed to be up */
if (!netif_carrier_ok(dev))
netif_carrier_on(dev);
- } else /* Let MMI library update carrier status */
- rhine_check_media(dev, 0);
+ }
+
+ rhine_check_media(dev, 0);
netif_info(rp, link, dev, "force_media %d, carrier %d\n",
mii->force_media, netif_carrier_ok(dev));
@@ -1469,7 +1541,7 @@ static void init_registers(struct net_device *dev)
rhine_set_rx_mode(dev);
- if (rp->pdev->revision >= VT6105M)
+ if (rp->quirks & rqMgmt)
rhine_init_cam_filter(dev);
napi_enable(&rp->napi);
@@ -1581,16 +1653,15 @@ static int rhine_open(struct net_device *dev)
void __iomem *ioaddr = rp->base;
int rc;
- rc = request_irq(rp->pdev->irq, rhine_interrupt, IRQF_SHARED, dev->name,
- dev);
+ rc = request_irq(rp->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev);
if (rc)
return rc;
- netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->pdev->irq);
+ netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->irq);
rc = alloc_ring(dev);
if (rc) {
- free_irq(rp->pdev->irq, dev);
+ free_irq(rp->irq, dev);
return rc;
}
alloc_rbufs(dev);
@@ -1659,6 +1730,7 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
struct net_device *dev)
{
struct rhine_private *rp = netdev_priv(dev);
+ struct device *hwdev = dev->dev.parent;
void __iomem *ioaddr = rp->base;
unsigned entry;
@@ -1695,9 +1767,9 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
rp->tx_bufs));
} else {
rp->tx_skbuff_dma[entry] =
- pci_map_single(rp->pdev, skb->data, skb->len,
- PCI_DMA_TODEVICE);
- if (dma_mapping_error(&rp->pdev->dev, rp->tx_skbuff_dma[entry])) {
+ dma_map_single(hwdev, skb->data, skb->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(hwdev, rp->tx_skbuff_dma[entry])) {
dev_kfree_skb_any(skb);
rp->tx_skbuff_dma[entry] = 0;
dev->stats.tx_dropped++;
@@ -1788,6 +1860,7 @@ static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
static void rhine_tx(struct net_device *dev)
{
struct rhine_private *rp = netdev_priv(dev);
+ struct device *hwdev = dev->dev.parent;
int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
/* find and cleanup dirty tx descriptors */
@@ -1831,10 +1904,10 @@ static void rhine_tx(struct net_device *dev)
}
/* Free the original skb. */
if (rp->tx_skbuff_dma[entry]) {
- pci_unmap_single(rp->pdev,
+ dma_unmap_single(hwdev,
rp->tx_skbuff_dma[entry],
rp->tx_skbuff[entry]->len,
- PCI_DMA_TODEVICE);
+ DMA_TO_DEVICE);
}
dev_consume_skb_any(rp->tx_skbuff[entry]);
rp->tx_skbuff[entry] = NULL;
@@ -1863,6 +1936,7 @@ static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
static int rhine_rx(struct net_device *dev, int limit)
{
struct rhine_private *rp = netdev_priv(dev);
+ struct device *hwdev = dev->dev.parent;
int count;
int entry = rp->cur_rx % RX_RING_SIZE;
@@ -1924,19 +1998,19 @@ static int rhine_rx(struct net_device *dev, int limit)
if (pkt_len < rx_copybreak)
skb = netdev_alloc_skb_ip_align(dev, pkt_len);
if (skb) {
- pci_dma_sync_single_for_cpu(rp->pdev,
- rp->rx_skbuff_dma[entry],
- rp->rx_buf_sz,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_cpu(hwdev,
+ rp->rx_skbuff_dma[entry],
+ rp->rx_buf_sz,
+ DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb,
rp->rx_skbuff[entry]->data,
pkt_len);
skb_put(skb, pkt_len);
- pci_dma_sync_single_for_device(rp->pdev,
- rp->rx_skbuff_dma[entry],
- rp->rx_buf_sz,
- PCI_DMA_FROMDEVICE);
+ dma_sync_single_for_device(hwdev,
+ rp->rx_skbuff_dma[entry],
+ rp->rx_buf_sz,
+ DMA_FROM_DEVICE);
} else {
skb = rp->rx_skbuff[entry];
if (skb == NULL) {
@@ -1945,10 +2019,10 @@ static int rhine_rx(struct net_device *dev, int limit)
}
rp->rx_skbuff[entry] = NULL;
skb_put(skb, pkt_len);
- pci_unmap_single(rp->pdev,
+ dma_unmap_single(hwdev,
rp->rx_skbuff_dma[entry],
rp->rx_buf_sz,
- PCI_DMA_FROMDEVICE);
+ DMA_FROM_DEVICE);
}
if (unlikely(desc_length & DescTag))
@@ -1979,10 +2053,11 @@ static int rhine_rx(struct net_device *dev, int limit)
if (skb == NULL)
break; /* Better luck next round. */
rp->rx_skbuff_dma[entry] =
- pci_map_single(rp->pdev, skb->data,
+ dma_map_single(hwdev, skb->data,
rp->rx_buf_sz,
- PCI_DMA_FROMDEVICE);
- if (dma_mapping_error(&rp->pdev->dev, rp->rx_skbuff_dma[entry])) {
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(hwdev,
+ rp->rx_skbuff_dma[entry])) {
dev_kfree_skb(skb);
rp->rx_skbuff_dma[entry] = 0;
break;
@@ -2103,7 +2178,7 @@ static void rhine_set_rx_mode(struct net_device *dev)
/* Too many to match, or accept all multicasts. */
iowrite32(0xffffffff, ioaddr + MulticastFilter0);
iowrite32(0xffffffff, ioaddr + MulticastFilter1);
- } else if (rp->pdev->revision >= VT6105M) {
+ } else if (rp->quirks & rqMgmt) {
int i = 0;
u32 mCAMmask = 0; /* 32 mCAMs (6105M and better) */
netdev_for_each_mc_addr(ha, dev) {
@@ -2125,7 +2200,7 @@ static void rhine_set_rx_mode(struct net_device *dev)
iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
}
/* enable/disable VLAN receive filtering */
- if (rp->pdev->revision >= VT6105M) {
+ if (rp->quirks & rqMgmt) {
if (dev->flags & IFF_PROMISC)
BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
else
@@ -2136,11 +2211,11 @@ static void rhine_set_rx_mode(struct net_device *dev)
static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
- struct rhine_private *rp = netdev_priv(dev);
+ struct device *hwdev = dev->dev.parent;
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
- strlcpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info));
+ strlcpy(info->bus_info, dev_name(hwdev), sizeof(info->bus_info));
}
static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
@@ -2277,7 +2352,7 @@ static int rhine_close(struct net_device *dev)
/* Stop the chip's Tx and Rx processes. */
iowrite16(CmdStop, ioaddr + ChipCmd);
- free_irq(rp->pdev->irq, dev);
+ free_irq(rp->irq, dev);
free_rbufs(dev);
free_tbufs(dev);
free_ring(dev);
@@ -2286,7 +2361,7 @@ static int rhine_close(struct net_device *dev)
}
-static void rhine_remove_one(struct pci_dev *pdev)
+static void rhine_remove_one_pci(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct rhine_private *rp = netdev_priv(dev);
@@ -2300,7 +2375,21 @@ static void rhine_remove_one(struct pci_dev *pdev)
pci_disable_device(pdev);
}
-static void rhine_shutdown (struct pci_dev *pdev)
+static int rhine_remove_one_platform(struct platform_device *pdev)
+{
+ struct net_device *dev = platform_get_drvdata(pdev);
+ struct rhine_private *rp = netdev_priv(dev);
+
+ unregister_netdev(dev);
+
+ iounmap(rp->base);
+
+ free_netdev(dev);
+
+ return 0;
+}
+
+static void rhine_shutdown_pci(struct pci_dev *pdev)
{
struct net_device *dev = pci_get_drvdata(pdev);
struct rhine_private *rp = netdev_priv(dev);
@@ -2354,8 +2443,7 @@ static void rhine_shutdown (struct pci_dev *pdev)
#ifdef CONFIG_PM_SLEEP
static int rhine_suspend(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(device);
struct rhine_private *rp = netdev_priv(dev);
if (!netif_running(dev))
@@ -2367,23 +2455,21 @@ static int rhine_suspend(struct device *device)
netif_device_detach(dev);
- rhine_shutdown(pdev);
+ if (dev_is_pci(device))
+ rhine_shutdown_pci(to_pci_dev(device));
return 0;
}
static int rhine_resume(struct device *device)
{
- struct pci_dev *pdev = to_pci_dev(device);
- struct net_device *dev = pci_get_drvdata(pdev);
+ struct net_device *dev = dev_get_drvdata(device);
struct rhine_private *rp = netdev_priv(dev);
if (!netif_running(dev))
return 0;
-#ifdef USE_MMIO
enable_mmio(rp->pioaddr, rp->quirks);
-#endif
rhine_power_init(dev);
free_tbufs(dev);
free_rbufs(dev);
@@ -2408,15 +2494,26 @@ static SIMPLE_DEV_PM_OPS(rhine_pm_ops, rhine_suspend, rhine_resume);
#endif /* !CONFIG_PM_SLEEP */
-static struct pci_driver rhine_driver = {
+static struct pci_driver rhine_driver_pci = {
.name = DRV_NAME,
.id_table = rhine_pci_tbl,
- .probe = rhine_init_one,
- .remove = rhine_remove_one,
- .shutdown = rhine_shutdown,
+ .probe = rhine_init_one_pci,
+ .remove = rhine_remove_one_pci,
+ .shutdown = rhine_shutdown_pci,
.driver.pm = RHINE_PM_OPS,
};
+static struct platform_driver rhine_driver_platform = {
+ .probe = rhine_init_one_platform,
+ .remove = rhine_remove_one_platform,
+ .driver = {
+ .name = DRV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = rhine_of_tbl,
+ .pm = RHINE_PM_OPS,
+ }
+};
+
static struct dmi_system_id rhine_dmi_table[] __initdata = {
{
.ident = "EPIA-M",
@@ -2437,6 +2534,8 @@ static struct dmi_system_id rhine_dmi_table[] __initdata = {
static int __init rhine_init(void)
{
+ int ret_pci, ret_platform;
+
/* when a module, this is printed whether or not devices are found in probe */
#ifdef MODULE
pr_info("%s\n", version);
@@ -2449,13 +2548,19 @@ static int __init rhine_init(void)
else if (avoid_D3)
pr_info("avoid_D3 set\n");
- return pci_register_driver(&rhine_driver);
+ ret_pci = pci_register_driver(&rhine_driver_pci);
+ ret_platform = platform_driver_register(&rhine_driver_platform);
+ if ((ret_pci < 0) && (ret_platform < 0))
+ return ret_pci;
+
+ return 0;
}
static void __exit rhine_cleanup(void)
{
- pci_unregister_driver(&rhine_driver);
+ platform_driver_unregister(&rhine_driver_platform);
+ pci_unregister_driver(&rhine_driver_pci);
}
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index fa193c4..4ef818a 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -75,7 +75,7 @@ int temac_indirect_busywait(struct temac_local *lp)
long end = jiffies + 2;
while (!(temac_ior(lp, XTE_RDY0_OFFSET) & XTE_RDY0_HARD_ACS_RDY_MASK)) {
- if (end - jiffies <= 0) {
+ if (time_before_eq(end, jiffies)) {
WARN_ON(1);
return -ETIMEDOUT;
}
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
index 64b4639..d4abf47 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
@@ -22,7 +22,7 @@ int axienet_mdio_wait_until_ready(struct axienet_local *lp)
long end = jiffies + 2;
while (!(axienet_ior(lp, XAE_MDIO_MCR_OFFSET) &
XAE_MDIO_MCR_READY_MASK)) {
- if (end - jiffies <= 0) {
+ if (time_before_eq(end, jiffies)) {
WARN_ON(1);
return -ETIMEDOUT;
}
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 0d87c67..8c4aed3 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -702,7 +702,7 @@ static int xemaclite_mdio_wait(struct net_local *lp)
*/
while (__raw_readl(lp->base_addr + XEL_MDIOCTRL_OFFSET) &
XEL_MDIOCTRL_MDIOSTS_MASK) {
- if (end - jiffies <= 0) {
+ if (time_before_eq(end, jiffies)) {
WARN_ON(1);
return -ETIMEDOUT;
}
OpenPOWER on IntegriCloud