summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-bus-mdio20
-rw-r--r--Documentation/devicetree/bindings/net/allwinner,sun4i-emac.txt6
-rw-r--r--Documentation/devicetree/bindings/net/arc_emac.txt11
-rw-r--r--Documentation/devicetree/bindings/net/broadcom-bcmgenet.txt121
-rw-r--r--Documentation/devicetree/bindings/net/can/sja1000.txt4
-rw-r--r--Documentation/devicetree/bindings/net/cavium-mix.txt7
-rw-r--r--Documentation/devicetree/bindings/net/cavium-pip.txt7
-rw-r--r--Documentation/devicetree/bindings/net/cdns-emac.txt6
-rw-r--r--Documentation/devicetree/bindings/net/cpsw.txt5
-rw-r--r--Documentation/devicetree/bindings/net/davicom-dm9000.txt2
-rw-r--r--Documentation/devicetree/bindings/net/davinci_emac.txt3
-rw-r--r--Documentation/devicetree/bindings/net/ethernet.txt25
-rw-r--r--Documentation/devicetree/bindings/net/fsl-fec.txt5
-rw-r--r--Documentation/devicetree/bindings/net/fsl-tsec-phy.txt13
-rw-r--r--Documentation/devicetree/bindings/net/lpc-eth.txt5
-rw-r--r--Documentation/devicetree/bindings/net/macb.txt6
-rw-r--r--Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt6
-rw-r--r--Documentation/devicetree/bindings/net/marvell-orion-net.txt4
-rw-r--r--Documentation/devicetree/bindings/net/micrel-ks8851.txt3
-rw-r--r--Documentation/devicetree/bindings/net/micrel.txt18
-rw-r--r--Documentation/devicetree/bindings/net/sh_eth.txt55
-rw-r--r--Documentation/devicetree/bindings/net/smsc-lan91c111.txt3
-rw-r--r--Documentation/devicetree/bindings/net/smsc911x.txt5
-rw-r--r--Documentation/devicetree/bindings/net/stmmac.txt7
-rw-r--r--Documentation/networking/bonding.txt96
-rw-r--r--Documentation/networking/gianfar.txt30
-rw-r--r--Documentation/networking/phy.txt9
-rw-r--r--Documentation/networking/pktgen.txt24
-rw-r--r--Documentation/networking/tcp.txt2
-rw-r--r--MAINTAINERS14
-rw-r--r--drivers/atm/ambassador.c2
-rw-r--r--drivers/atm/firestream.c4
-rw-r--r--drivers/atm/nicstar.c24
-rw-r--r--drivers/atm/solos-pci.c2
-rw-r--r--drivers/connector/connector.c1
-rw-r--r--drivers/isdn/divert/divert_procfs.c7
-rw-r--r--drivers/isdn/hisax/elsa.c9
-rw-r--r--drivers/isdn/hisax/elsa_ser.c3
-rw-r--r--drivers/isdn/hysdn/hysdn_proclog.c7
-rw-r--r--drivers/isdn/i4l/isdn_common.c13
-rw-r--r--drivers/isdn/pcbit/drv.c6
-rw-r--r--drivers/net/bonding/bond_3ad.c38
-rw-r--r--drivers/net/bonding/bond_3ad.h175
-rw-r--r--drivers/net/bonding/bond_alb.c134
-rw-r--r--drivers/net/bonding/bond_debugfs.c10
-rw-r--r--drivers/net/bonding/bond_main.c330
-rw-r--r--drivers/net/bonding/bond_netlink.c6
-rw-r--r--drivers/net/bonding/bond_options.c124
-rw-r--r--drivers/net/bonding/bond_procfs.c12
-rw-r--r--drivers/net/bonding/bond_sysfs.c8
-rw-r--r--drivers/net/bonding/bonding.h26
-rw-r--r--drivers/net/caif/caif_serial.c1
-rw-r--r--drivers/net/caif/caif_spi.c1
-rw-r--r--drivers/net/can/at91_can.c9
-rw-r--r--drivers/net/can/dev.c24
-rw-r--r--drivers/net/can/sja1000/Kconfig13
-rw-r--r--drivers/net/can/sja1000/Makefile1
-rw-r--r--drivers/net/can/sja1000/sja1000.c3
-rw-r--r--drivers/net/can/sja1000/sja1000_of_platform.c220
-rw-r--r--drivers/net/can/sja1000/sja1000_platform.c194
-rw-r--r--drivers/net/dummy.c8
-rw-r--r--drivers/net/ethernet/3com/3c589_cs.c1127
-rw-r--r--drivers/net/ethernet/amd/pcnet32.c124
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig11
-rw-r--r--drivers/net/ethernet/broadcom/Makefile1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c15
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h24
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c153
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h48
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c235
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c155
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h21
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c289
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h13
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c127
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h22
-rw-r--r--drivers/net/ethernet/broadcom/genet/Makefile2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c2578
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h629
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c464
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c6
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c23
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c26
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c123
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c38
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c46
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c35
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c6
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h17
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c22
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h38
-rw-r--r--drivers/net/ethernet/emulex/benet/be_ethtool.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h6
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c234
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_roce.h2
-rw-r--r--drivers/net/ethernet/freescale/Makefile3
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c1041
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h62
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c165
-rw-r--r--drivers/net/ethernet/freescale/gianfar_sysfs.c340
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c11
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c102
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c33
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h1
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h2
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_txrx.c8
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40e_type.h4
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf.h47
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c13
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_main.c242
-rw-r--r--drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c27
-rw-r--r--drivers/net/ethernet/intel/igb/Makefile5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c9
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.h7
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_hw.h5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c25
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.h14
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.c5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mac.h5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.c5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_mbx.h5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.c5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_nvm.h5
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c76
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.h6
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h5
-rw-r--r--drivers/net/ethernet/intel/igb/igb.h7
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ethtool.c5
-rw-r--r--drivers/net/ethernet/intel/igb/igb_hwmon.c5
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c16
-rw-r--r--drivers/net/ethernet/intel/igb/igb_ptp.c11
-rw-r--r--drivers/net/ethernet/intel/igbvf/netdev.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe.h6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c189
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c188
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.h11
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c9
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c16
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c87
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c44
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h3
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c31
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_type.h22
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c37
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/Kconfig2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c45
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_selftest.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c24
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c16
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c40
-rw-r--r--drivers/net/ethernet/neterion/s2io.c5
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c15
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c41
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h77
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c7
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c17
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c22
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c29
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c233
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c102
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c16
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c73
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h3
-rw-r--r--drivers/net/ethernet/sfc/ef10.c4
-rw-r--r--drivers/net/ethernet/sfc/ef10_regs.h61
-rw-r--r--drivers/net/ethernet/sfc/efx.c26
-rw-r--r--drivers/net/ethernet/sfc/efx.h2
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c21
-rw-r--r--drivers/net/ethernet/sfc/falcon.c4
-rw-r--r--drivers/net/ethernet/sfc/farch.c2
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h1
-rw-r--r--drivers/net/ethernet/sfc/nic.c1
-rw-r--r--drivers/net/ethernet/sfc/ptp.c92
-rw-r--r--drivers/net/ethernet/sfc/tx.c21
-rw-r--r--drivers/net/ethernet/sun/niu.c11
-rw-r--r--drivers/net/ethernet/ti/cpsw.c10
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c9
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c9
-rw-r--r--drivers/net/hyperv/hyperv_net.h61
-rw-r--r--drivers/net/hyperv/netvsc.c87
-rw-r--r--drivers/net/hyperv/netvsc_drv.c4
-rw-r--r--drivers/net/hyperv/rndis_filter.c41
-rw-r--r--drivers/net/ieee802154/Kconfig34
-rw-r--r--drivers/net/ieee802154/at86rf230.c428
-rw-r--r--drivers/net/loopback.c12
-rw-r--r--drivers/net/macvlan.c9
-rw-r--r--drivers/net/nlmon.c11
-rw-r--r--drivers/net/phy/Kconfig6
-rw-r--r--drivers/net/phy/Makefile1
-rw-r--r--drivers/net/phy/bcm7xxx.c343
-rw-r--r--drivers/net/phy/broadcom.c52
-rw-r--r--drivers/net/phy/mdio_bus.c20
-rw-r--r--drivers/net/phy/micrel.c49
-rw-r--r--drivers/net/phy/phy.c51
-rw-r--r--drivers/net/phy/phy_device.c57
-rw-r--r--drivers/net/team/team.c8
-rw-r--r--drivers/net/usb/r8152.c711
-rw-r--r--drivers/net/veth.c19
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c110
-rw-r--r--drivers/net/vxlan.c13
-rw-r--r--drivers/net/wireless/ath/ath.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/txrx.c4
-rw-r--r--drivers/net/wireless/ath/ath6kl/cfg80211.c27
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig12
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c235
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h11
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c29
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c244
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h19
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/dfs_debug.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c176
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c36
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.h12
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c1
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c16
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h9
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c16
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c1495
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.h248
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c259
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c2
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.c10
-rw-r--r--drivers/net/wireless/ath/wcn36xx/dxe.h4
-rw-r--r--drivers/net/wireless/ath/wcn36xx/hal.h4
-rw-r--r--drivers/net/wireless/ath/wcn36xx/main.c72
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c64
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.h5
-rw-r--r--drivers/net/wireless/ath/wcn36xx/txrx.c7
-rw-r--r--drivers/net/wireless/ath/wcn36xx/wcn36xx.h10
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c32
-rw-r--r--drivers/net/wireless/b43/main.h35
-rw-r--r--drivers/net/wireless/b43/xmit.c12
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/Makefile4
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c8
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/chip.c1029
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/chip.h91
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c536
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwil.c5
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/fwil.h2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c972
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h231
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h89
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c83
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h3
-rw-r--r--drivers/net/wireless/iwlegacy/3945-rs.c3
-rw-r--r--drivers/net/wireless/iwlegacy/4965-rs.c3
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig14
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.c23
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rs.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-7000.c23
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-8000.c132
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-config.h18
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h32
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw.h7
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.c15
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-op-mode.h24
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-phy-db.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h46
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h38
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/Makefile2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/bt-coex.c138
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/constants.h4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/d3.c31
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c112
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs.c285
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h17
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h6
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-power.h33
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h31
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api.h128
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw.c67
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c76
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c476
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h168
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/nvm.c34
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c257
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/power.c385
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/power_legacy.c319
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/quota.c119
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.c29
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rs.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/rx.c12
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c15
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c203
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.h62
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tt.c7
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c23
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/utils.c99
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c82
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c3
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c267
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c5
-rw-r--r--drivers/net/wireless/libertas/cfg.c3
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c90
-rw-r--r--drivers/net/wireless/mac80211_hwsim.h2
-rw-r--r--drivers/net/wireless/mwifiex/11ac.c192
-rw-r--r--drivers/net/wireless/mwifiex/11ac.h2
-rw-r--r--drivers/net/wireless/mwifiex/11n.c51
-rw-r--r--drivers/net/wireless/mwifiex/11n.h44
-rw-r--r--drivers/net/wireless/mwifiex/11n_rxreorder.c27
-rw-r--r--drivers/net/wireless/mwifiex/Makefile1
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c184
-rw-r--r--drivers/net/wireless/mwifiex/cfp.c203
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c54
-rw-r--r--drivers/net/wireless/mwifiex/decl.h23
-rw-r--r--drivers/net/wireless/mwifiex/fw.h181
-rw-r--r--drivers/net/wireless/mwifiex/init.c5
-rw-r--r--drivers/net/wireless/mwifiex/ioctl.h22
-rw-r--r--drivers/net/wireless/mwifiex/join.c14
-rw-r--r--drivers/net/wireless/mwifiex/main.h90
-rw-r--r--drivers/net/wireless/mwifiex/pcie.c138
-rw-r--r--drivers/net/wireless/mwifiex/scan.c598
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmd.c373
-rw-r--r--drivers/net/wireless/mwifiex/sta_cmdresp.c112
-rw-r--r--drivers/net/wireless/mwifiex/sta_event.c12
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c40
-rw-r--r--drivers/net/wireless/mwifiex/sta_rx.c13
-rw-r--r--drivers/net/wireless/mwifiex/sta_tx.c3
-rw-r--r--drivers/net/wireless/mwifiex/tdls.c1044
-rw-r--r--drivers/net/wireless/mwifiex/uap_event.c118
-rw-r--r--drivers/net/wireless/mwifiex/util.c114
-rw-r--r--drivers/net/wireless/mwifiex/util.h20
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c96
-rw-r--r--drivers/net/wireless/mwifiex/wmm.h18
-rw-r--r--drivers/net/wireless/rndis_wlan.c4
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c10
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/dev.c8
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/dev.c14
-rw-r--r--drivers/net/wireless/rtl818x/rtl818x.h10
-rw-r--r--drivers/net/wireless/rtlwifi/rc.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8188ee/trx.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/trx.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723ae/trx.c2
-rw-r--r--drivers/net/wireless/ti/wl12xx/main.c67
-rw-r--r--drivers/net/wireless/ti/wl12xx/wl12xx.h53
-rw-r--r--drivers/net/wireless/ti/wl18xx/main.c85
-rw-r--r--drivers/net/wireless/ti/wl18xx/tx.c4
-rw-r--r--drivers/net/wireless/ti/wl18xx/wl18xx.h62
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.c7
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.h6
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.c24
-rw-r--r--drivers/net/wireless/ti/wlcore/cmd.h9
-rw-r--r--drivers/net/wireless/ti/wlcore/event.c4
-rw-r--r--drivers/net/wireless/ti/wlcore/hw_ops.h9
-rw-r--r--drivers/net/wireless/ti/wlcore/init.c6
-rw-r--r--drivers/net/wireless/ti/wlcore/io.h8
-rw-r--r--drivers/net/wireless/ti/wlcore/main.c192
-rw-r--r--drivers/net/wireless/ti/wlcore/ps.c6
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.c19
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.h2
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.c45
-rw-r--r--drivers/net/wireless/ti/wlcore/tx.h1
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore.h27
-rw-r--r--drivers/net/wireless/ti/wlcore/wlcore_i.h86
-rw-r--r--drivers/net/xen-netfront.c8
-rw-r--r--drivers/of/of_net.c26
-rw-r--r--drivers/staging/rtl8821ae/rc.c1
-rw-r--r--include/linux/brcmphy.h60
-rw-r--r--include/linux/can/dev.h2
-rw-r--r--include/linux/ethtool.h3
-rw-r--r--include/linux/ieee80211.h199
-rw-r--r--include/linux/mlx4/driver.h12
-rw-r--r--include/linux/mlx4/qp.h11
-rw-r--r--include/linux/mpls.h6
-rw-r--r--include/linux/netdevice.h109
-rw-r--r--include/linux/nl802154.h12
-rw-r--r--include/linux/phy.h56
-rw-r--r--include/linux/skbuff.h61
-rw-r--r--include/linux/tcp.h8
-rw-r--r--include/net/act_api.h22
-rw-r--r--include/net/addrconf.h6
-rw-r--r--include/net/cfg80211.h43
-rw-r--r--include/net/flow.h5
-rw-r--r--include/net/flowcache.h25
-rw-r--r--include/net/ieee80211_radiotap.h4
-rw-r--r--include/net/ieee802154_netdev.h7
-rw-r--r--include/net/ip.h12
-rw-r--r--include/net/ip6_route.h9
-rw-r--r--include/net/mac80211.h52
-rw-r--r--include/net/mac802154.h35
-rw-r--r--include/net/net_namespace.h4
-rw-r--r--include/net/netns/ieee802154_6lowpan.h22
-rw-r--r--include/net/netns/xfrm.h12
-rw-r--r--include/net/rtnetlink.h2
-rw-r--r--include/net/tc_act/tc_csum.h4
-rw-r--r--include/net/tc_act/tc_defact.h4
-rw-r--r--include/net/tc_act/tc_gact.h4
-rw-r--r--include/net/tc_act/tc_ipt.h4
-rw-r--r--include/net/tc_act/tc_mirred.h4
-rw-r--r--include/net/tc_act/tc_nat.h4
-rw-r--r--include/net/tc_act/tc_pedit.h4
-rw-r--r--include/net/tc_act/tc_skbedit.h4
-rw-r--r--include/net/tcp.h13
-rw-r--r--include/net/wpan-phy.h19
-rw-r--r--include/net/xfrm.h108
-rw-r--r--include/uapi/linux/can.h32
-rw-r--r--include/uapi/linux/ethtool.h439
-rw-r--r--include/uapi/linux/if.h134
-rw-r--r--include/uapi/linux/if_ether.h5
-rw-r--r--include/uapi/linux/in.h4
-rw-r--r--include/uapi/linux/in6.h4
-rw-r--r--include/uapi/linux/mpls.h34
-rw-r--r--include/uapi/linux/nl80211.h52
-rw-r--r--include/uapi/linux/pfkeyv2.h15
-rw-r--r--include/uapi/linux/snmp.h6
-rw-r--r--include/uapi/linux/tcp.h3
-rw-r--r--include/uapi/linux/tcp_metrics.h7
-rw-r--r--include/uapi/linux/xfrm.h10
-rw-r--r--net/8021q/vlan_dev.c11
-rw-r--r--net/appletalk/aarp.c4
-rw-r--r--net/appletalk/ddp.c100
-rw-r--r--net/batman-adv/gateway_client.c2
-rw-r--r--net/bridge/br_device.c11
-rw-r--r--net/bridge/br_multicast.c4
-rw-r--r--net/bridge/br_netfilter.c2
-rw-r--r--net/bridge/br_private.h4
-rw-r--r--net/bridge/netfilter/ebt_among.c2
-rw-r--r--net/bridge/netfilter/ebt_dnat.c2
-rw-r--r--net/bridge/netfilter/ebt_redirect.c6
-rw-r--r--net/bridge/netfilter/ebt_snat.c2
-rw-r--r--net/ceph/osd_client.c2
-rw-r--r--net/core/flow.c127
-rw-r--r--net/core/neighbour.c9
-rw-r--r--net/core/net-sysfs.c11
-rw-r--r--net/core/pktgen.c32
-rw-r--r--net/core/request_sock.c1
-rw-r--r--net/core/rtnetlink.c107
-rw-r--r--net/core/skbuff.c26
-rw-r--r--net/hsr/hsr_device.c10
-rw-r--r--net/hsr/hsr_framereg.c20
-rw-r--r--net/hsr/hsr_main.c4
-rw-r--r--net/ieee802154/6lowpan.h113
-rw-r--r--net/ieee802154/6lowpan_rtnl.c (renamed from net/ieee802154/6lowpan.c)327
-rw-r--r--net/ieee802154/Kconfig2
-rw-r--r--net/ieee802154/Makefile1
-rw-r--r--net/ieee802154/ieee802154.h1
-rw-r--r--net/ieee802154/netlink.c1
-rw-r--r--net/ieee802154/nl-phy.c200
-rw-r--r--net/ieee802154/nl_policy.c10
-rw-r--r--net/ieee802154/reassembly.c564
-rw-r--r--net/ieee802154/reassembly.h66
-rw-r--r--net/ieee802154/wpan-class.c10
-rw-r--r--net/ipv4/Makefile2
-rw-r--r--net/ipv4/ah4.c78
-rw-r--r--net/ipv4/esp4.c26
-rw-r--r--net/ipv4/ip_forward.c7
-rw-r--r--net/ipv4/ip_output.c12
-rw-r--r--net/ipv4/ip_sockglue.c21
-rw-r--r--net/ipv4/ip_tunnel.c31
-rw-r--r--net/ipv4/ip_vti.c310
-rw-r--r--net/ipv4/ipcomp.c26
-rw-r--r--net/ipv4/ping.c2
-rw-r--r--net/ipv4/proc.c6
-rw-r--r--net/ipv4/raw.c2
-rw-r--r--net/ipv4/route.c1
-rw-r--r--net/ipv4/tcp.c13
-rw-r--r--net/ipv4/tcp_cong.c10
-rw-r--r--net/ipv4/tcp_cubic.c4
-rw-r--r--net/ipv4/tcp_highspeed.c1
-rw-r--r--net/ipv4/tcp_hybla.c13
-rw-r--r--net/ipv4/tcp_illinois.c2
-rw-r--r--net/ipv4/tcp_input.c186
-rw-r--r--net/ipv4/tcp_ipv4.c6
-rw-r--r--net/ipv4/tcp_lp.c2
-rw-r--r--net/ipv4/tcp_metrics.c83
-rw-r--r--net/ipv4/tcp_minisocks.c4
-rw-r--r--net/ipv4/tcp_output.c53
-rw-r--r--net/ipv4/tcp_probe.c2
-rw-r--r--net/ipv4/tcp_scalable.c1
-rw-r--r--net/ipv4/tcp_timer.c3
-rw-r--r--net/ipv4/tcp_vegas.c2
-rw-r--r--net/ipv4/tcp_veno.c1
-rw-r--r--net/ipv4/tcp_westwood.c1
-rw-r--r--net/ipv4/tcp_yeah.c2
-rw-r--r--net/ipv4/udp.c3
-rw-r--r--net/ipv4/xfrm4_input.c9
-rw-r--r--net/ipv4/xfrm4_mode_tunnel.c68
-rw-r--r--net/ipv4/xfrm4_protocol.c275
-rw-r--r--net/ipv6/addrlabel.c59
-rw-r--r--net/ipv6/ah6.c56
-rw-r--r--net/ipv6/ip6_checksum.c4
-rw-r--r--net/ipv6/ip6_flowlabel.c6
-rw-r--r--net/ipv6/ip6_gre.c9
-rw-r--r--net/ipv6/ip6_output.c9
-rw-r--r--net/ipv6/ip6_tunnel.c9
-rw-r--r--net/ipv6/ip6_vti.c8
-rw-r--r--net/ipv6/ipv6_sockglue.c2
-rw-r--r--net/ipv6/sit.c18
-rw-r--r--net/ipv6/tcp_ipv6.c4
-rw-r--r--net/ipx/af_ipx.c28
-rw-r--r--net/key/af_key.c39
-rw-r--r--net/l2tp/l2tp_core.c2
-rw-r--r--net/l2tp/l2tp_ppp.c3
-rw-r--r--net/mac80211/agg-tx.c2
-rw-r--r--net/mac80211/cfg.c198
-rw-r--r--net/mac80211/cfg.h2
-rw-r--r--net/mac80211/chan.c2
-rw-r--r--net/mac80211/debugfs_sta.c2
-rw-r--r--net/mac80211/ht.c2
-rw-r--r--net/mac80211/ibss.c28
-rw-r--r--net/mac80211/ieee80211_i.h14
-rw-r--r--net/mac80211/iface.c2
-rw-r--r--net/mac80211/main.c9
-rw-r--r--net/mac80211/mesh.c96
-rw-r--r--net/mac80211/mlme.c41
-rw-r--r--net/mac80211/rate.c46
-rw-r--r--net/mac80211/rate.h2
-rw-r--r--net/mac80211/rc80211_minstrel.c2
-rw-r--r--net/mac80211/rc80211_minstrel.h2
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c7
-rw-r--r--net/mac80211/rc80211_pid_algo.c2
-rw-r--r--net/mac80211/rx.c101
-rw-r--r--net/mac80211/sta_info.h2
-rw-r--r--net/mac80211/status.c3
-rw-r--r--net/mac80211/tx.c33
-rw-r--r--net/mac80211/util.c42
-rw-r--r--net/mac80211/vht.c26
-rw-r--r--net/mac80211/wpa.c9
-rw-r--r--net/mac802154/ieee802154_dev.c67
-rw-r--r--net/mac802154/mib.c4
-rw-r--r--net/mac802154/rx.c1
-rw-r--r--net/netlink/af_netlink.c4
-rw-r--r--net/openvswitch/datapath.c20
-rw-r--r--net/openvswitch/datapath.h2
-rw-r--r--net/openvswitch/vport.c10
-rw-r--r--net/packet/af_packet.c16
-rw-r--r--net/rfkill/core.c9
-rw-r--r--net/sched/act_api.c142
-rw-r--r--net/sched/act_csum.c31
-rw-r--r--net/sched/act_gact.c34
-rw-r--r--net/sched/act_ipt.c68
-rw-r--r--net/sched/act_mirred.c56
-rw-r--r--net/sched/act_nat.c33
-rw-r--r--net/sched/act_pedit.c45
-rw-r--r--net/sched/act_police.c22
-rw-r--r--net/sched/act_simple.c64
-rw-r--r--net/sched/act_skbedit.c36
-rw-r--r--net/sched/sch_netem.c79
-rw-r--r--net/sched/sch_tbf.c23
-rw-r--r--net/sctp/transport.c1
-rw-r--r--net/socket.c13
-rw-r--r--net/tipc/addr.h2
-rw-r--r--net/tipc/bcast.c28
-rw-r--r--net/tipc/bcast.h4
-rw-r--r--net/tipc/bearer.c42
-rw-r--r--net/tipc/bearer.h7
-rw-r--r--net/tipc/core.c2
-rw-r--r--net/tipc/discover.c4
-rw-r--r--net/tipc/discover.h2
-rw-r--r--net/tipc/link.c608
-rw-r--r--net/tipc/link.h57
-rw-r--r--net/tipc/name_distr.c8
-rw-r--r--net/tipc/name_distr.h2
-rw-r--r--net/tipc/net.c10
-rw-r--r--net/tipc/node.c14
-rw-r--r--net/tipc/port.c97
-rw-r--r--net/tipc/port.h26
-rw-r--r--net/tipc/socket.c188
-rw-r--r--net/wireless/ap.c3
-rw-r--r--net/wireless/chan.c25
-rw-r--r--net/wireless/core.c2
-rw-r--r--net/wireless/core.h7
-rw-r--r--net/wireless/ibss.c19
-rw-r--r--net/wireless/mesh.c6
-rw-r--r--net/wireless/mlme.c2
-rw-r--r--net/wireless/nl80211.c229
-rw-r--r--net/wireless/nl80211.h2
-rw-r--r--net/wireless/reg.c188
-rw-r--r--net/wireless/reg.h2
-rw-r--r--net/wireless/trace.h23
-rw-r--r--net/wireless/util.c21
-rw-r--r--net/xfrm/xfrm_input.c22
-rw-r--r--net/xfrm/xfrm_policy.c35
-rw-r--r--net/xfrm/xfrm_state.c72
-rw-r--r--net/xfrm/xfrm_user.c37
-rw-r--r--security/selinux/include/xfrm.h5
-rw-r--r--tools/net/bpf_dbg.c119
614 files changed, 24538 insertions, 13598 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-mdio b/Documentation/ABI/testing/sysfs-bus-mdio
index 6349749..491baaf 100644
--- a/Documentation/ABI/testing/sysfs-bus-mdio
+++ b/Documentation/ABI/testing/sysfs-bus-mdio
@@ -7,3 +7,23 @@ Description:
by the device during bus enumeration, encoded in hexadecimal.
This ID is used to match the device with the appropriate
driver.
+
+What: /sys/bus/mdio_bus/devices/.../phy_interface
+Date: February 2014
+KernelVersion: 3.15
+Contact: netdev@vger.kernel.org
+Description:
+ This attribute contains the PHY interface as configured by the
+ Ethernet driver during bus enumeration, encoded in string.
+ This interface mode is used to configure the Ethernet MAC with the
+ appropriate mode for its data lines to the PHY hardware.
+
+What: /sys/bus/mdio_bus/devices/.../phy_has_fixups
+Date: February 2014
+KernelVersion: 3.15
+Contact: netdev@vger.kernel.org
+Description:
+ This attribute contains the boolean value whether a given PHY
+ device has had any "fixup" workaround running on it, encoded as
+ a boolean. This information is provided to help troubleshooting
+ PHY configurations.
diff --git a/Documentation/devicetree/bindings/net/allwinner,sun4i-emac.txt b/Documentation/devicetree/bindings/net/allwinner,sun4i-emac.txt
index 863d5b81..10640b1 100644
--- a/Documentation/devicetree/bindings/net/allwinner,sun4i-emac.txt
+++ b/Documentation/devicetree/bindings/net/allwinner,sun4i-emac.txt
@@ -5,13 +5,9 @@ Required properties:
"allwinner,sun4i-emac")
- reg: address and length of the register set for the device.
- interrupts: interrupt for the device
-- phy: A phandle to a phy node defining the PHY address (as the reg
- property, a single integer).
+- phy: see ethernet.txt file in the same directory.
- clocks: A phandle to the reference clock for this device
-Optional properties:
-- (local-)mac-address: mac address to be used by this driver
-
Example:
emac: ethernet@01c0b000 {
diff --git a/Documentation/devicetree/bindings/net/arc_emac.txt b/Documentation/devicetree/bindings/net/arc_emac.txt
index bcbc3f0..7fbb027 100644
--- a/Documentation/devicetree/bindings/net/arc_emac.txt
+++ b/Documentation/devicetree/bindings/net/arc_emac.txt
@@ -6,19 +6,12 @@ Required properties:
- interrupts: Should contain the EMAC interrupts
- clock-frequency: CPU frequency. It is needed to calculate and set polling
period of EMAC.
-- max-speed: Maximum supported data-rate in Mbit/s. In some HW configurations
-bandwidth of external memory controller might be a limiting factor. That's why
-it's required to specify which data-rate is supported on current SoC or FPGA.
-For example if only 10 Mbit/s is supported (10BASE-T) set "10". If 100 Mbit/s is
-supported (100BASE-TX) set "100".
-- phy: PHY device attached to the EMAC via MDIO bus
+- max-speed: see ethernet.txt file in the same directory.
+- phy: see ethernet.txt file in the same directory.
Child nodes of the driver are the individual PHY devices connected to the
MDIO bus. They must have a "reg" property given the PHY address on the MDIO bus.
-Optional properties:
-- mac-address: 6 bytes, mac address
-
Examples:
ethernet@c0fc2000 {
diff --git a/Documentation/devicetree/bindings/net/broadcom-bcmgenet.txt b/Documentation/devicetree/bindings/net/broadcom-bcmgenet.txt
new file mode 100644
index 0000000..f2febb9
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/broadcom-bcmgenet.txt
@@ -0,0 +1,121 @@
+* Broadcom BCM7xxx Ethernet Controller (GENET)
+
+Required properties:
+- compatible: should contain one of "brcm,genet-v1", "brcm,genet-v2",
+ "brcm,genet-v3", "brcm,genet-v4".
+- reg: address and length of the register set for the device
+- interrupts: must be two cells, the first cell is the general purpose
+ interrupt line, while the second cell is the interrupt for the ring
+ RX and TX queues operating in ring mode
+- phy-mode: see ethernet.txt file in the same directory
+- #address-cells: should be 1
+- #size-cells: should be 1
+
+Optional properties:
+- clocks: When provided, must be two phandles to the functional clocks nodes
+ of the GENET block. The first phandle is the main GENET clock used during
+ normal operation, while the second phandle is the Wake-on-LAN clock.
+- clock-names: When provided, names of the functional clock phandles, first
+ name should be "enet" and second should be "enet-wol".
+
+- phy-handle: See ethernet.txt file in the same directory; used to describe
+ configurations where a PHY (internal or external) is used.
+
+- fixed-link: When the GENET interface is connected to a MoCA hardware block or
+ when operating in a RGMII to RGMII type of connection, or when the MDIO bus is
+ voluntarily disabled, this property should be used to describe the "fixed link".
+ See Documentation/devicetree/bindings/net/fsl-tsec-phy.txt for information on
+ the property specifics
+
+Required child nodes:
+
+- mdio bus node: this node should always be present regarless of the PHY
+ configuration of the GENET instance
+
+MDIO bus node required properties:
+
+- compatible: should contain one of "brcm,genet-mdio-v1", "brcm,genet-mdio-v2"
+ "brcm,genet-mdio-v3", "brcm,genet-mdio-v4", the version has to match the
+ parent node compatible property (e.g: brcm,genet-v4 pairs with
+ brcm,genet-mdio-v4)
+- reg: address and length relative to the parent node base register address
+- #address-cells: address cell for MDIO bus addressing, should be 1
+- #size-cells: size of the cells for MDIO bus addressing, should be 0
+
+Ethernet PHY node properties:
+
+See Documentation/devicetree/bindings/net/phy.txt for the list of required and
+optional properties.
+
+Internal Gigabit PHY example:
+
+ethernet@f0b60000 {
+ phy-mode = "internal";
+ phy-handle = <&phy1>;
+ mac-address = [ 00 10 18 36 23 1a ];
+ compatible = "brcm,genet-v4";
+ #address-cells = <0x1>;
+ #size-cells = <0x1>;
+ reg = <0xf0b60000 0xfc4c>;
+ interrupts = <0x0 0x14 0x0>, <0x0 0x15 0x0>;
+
+ mdio@e14 {
+ compatible = "brcm,genet-mdio-v4";
+ #address-cells = <0x1>;
+ #size-cells = <0x0>;
+ reg = <0xe14 0x8>;
+
+ phy1: ethernet-phy@1 {
+ max-speed = <1000>;
+ reg = <0x1>;
+ compatible = "brcm,28nm-gphy", "ethernet-phy-ieee802.3-c22";
+ };
+ };
+};
+
+MoCA interface / MAC to MAC example:
+
+ethernet@f0b80000 {
+ phy-mode = "moca";
+ fixed-link = <1 0 1000 0 0>;
+ mac-address = [ 00 10 18 36 24 1a ];
+ compatible = "brcm,genet-v4";
+ #address-cells = <0x1>;
+ #size-cells = <0x1>;
+ reg = <0xf0b80000 0xfc4c>;
+ interrupts = <0x0 0x16 0x0>, <0x0 0x17 0x0>;
+
+ mdio@e14 {
+ compatible = "brcm,genet-mdio-v4";
+ #address-cells = <0x1>;
+ #size-cells = <0x0>;
+ reg = <0xe14 0x8>;
+ };
+};
+
+
+External MDIO-connected Gigabit PHY/switch:
+
+ethernet@f0ba0000 {
+ phy-mode = "rgmii";
+ phy-handle = <&phy0>;
+ mac-address = [ 00 10 18 36 26 1a ];
+ compatible = "brcm,genet-v4";
+ #address-cells = <0x1>;
+ #size-cells = <0x1>;
+ reg = <0xf0ba0000 0xfc4c>;
+ interrupts = <0x0 0x18 0x0>, <0x0 0x19 0x0>;
+
+ mdio@0e14 {
+ compatible = "brcm,genet-mdio-v4";
+ #address-cells = <0x1>;
+ #size-cells = <0x0>;
+ reg = <0xe14 0x8>;
+
+ phy0: ethernet-phy@0 {
+ max-speed = <1000>;
+ reg = <0x0>;
+ compatible = "brcm,bcm53125", "ethernet-phy-ieee802.3-c22";
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/net/can/sja1000.txt b/Documentation/devicetree/bindings/net/can/sja1000.txt
index f2105a4..b4a6d53 100644
--- a/Documentation/devicetree/bindings/net/can/sja1000.txt
+++ b/Documentation/devicetree/bindings/net/can/sja1000.txt
@@ -12,6 +12,10 @@ Required properties:
Optional properties:
+- reg-io-width : Specify the size (in bytes) of the IO accesses that
+ should be performed on the device. Valid value is 1, 2 or 4.
+ Default to 1 (8 bits).
+
- nxp,external-clock-frequency : Frequency of the external oscillator
clock in Hz. Note that the internal clock frequency used by the
SJA1000 is half of that value. If not specified, a default value
diff --git a/Documentation/devicetree/bindings/net/cavium-mix.txt b/Documentation/devicetree/bindings/net/cavium-mix.txt
index 5da628d..8d7c309 100644
--- a/Documentation/devicetree/bindings/net/cavium-mix.txt
+++ b/Documentation/devicetree/bindings/net/cavium-mix.txt
@@ -18,12 +18,7 @@ Properties:
- interrupts: Two interrupt specifiers. The first is the MIX
interrupt routing and the second the routing for the AGL interrupts.
-- mac-address: Optional, the MAC address to assign to the device.
-
-- local-mac-address: Optional, the MAC address to assign to the device
- if mac-address is not specified.
-
-- phy-handle: Optional, a phandle for the PHY device connected to this device.
+- phy-handle: Optional, see ethernet.txt file in the same directory.
Example:
ethernet@1070000100800 {
diff --git a/Documentation/devicetree/bindings/net/cavium-pip.txt b/Documentation/devicetree/bindings/net/cavium-pip.txt
index d4c53ba..7dbd158 100644
--- a/Documentation/devicetree/bindings/net/cavium-pip.txt
+++ b/Documentation/devicetree/bindings/net/cavium-pip.txt
@@ -35,12 +35,7 @@ Properties for PIP port which is a child the PIP interface:
- reg: The port number within the interface group.
-- mac-address: Optional, the MAC address to assign to the device.
-
-- local-mac-address: Optional, the MAC address to assign to the device
- if mac-address is not specified.
-
-- phy-handle: Optional, a phandle for the PHY device connected to this device.
+- phy-handle: Optional, see ethernet.txt file in the same directory.
Example:
diff --git a/Documentation/devicetree/bindings/net/cdns-emac.txt b/Documentation/devicetree/bindings/net/cdns-emac.txt
index 09055c2..abd67c1 100644
--- a/Documentation/devicetree/bindings/net/cdns-emac.txt
+++ b/Documentation/devicetree/bindings/net/cdns-emac.txt
@@ -6,11 +6,7 @@ Required properties:
or the generic form: "cdns,emac".
- reg: Address and length of the register set for the device
- interrupts: Should contain macb interrupt
-- phy-mode: String, operation mode of the PHY interface.
- Supported values are: "mii", "rmii".
-
-Optional properties:
-- local-mac-address: 6 bytes, mac address
+- phy-mode: see ethernet.txt file in the same directory.
Examples:
diff --git a/Documentation/devicetree/bindings/net/cpsw.txt b/Documentation/devicetree/bindings/net/cpsw.txt
index 05d660e..ae2b8b7 100644
--- a/Documentation/devicetree/bindings/net/cpsw.txt
+++ b/Documentation/devicetree/bindings/net/cpsw.txt
@@ -28,9 +28,8 @@ Optional properties:
Slave Properties:
Required properties:
- phy_id : Specifies slave phy id
-- phy-mode : The interface between the SoC and the PHY (a string
- that of_get_phy_mode() can understand)
-- mac-address : Specifies slave MAC address
+- phy-mode : See ethernet.txt file in the same directory
+- mac-address : See ethernet.txt file in the same directory
Optional properties:
- dual_emac_res_vlan : Specifies VID to be used to segregate the ports
diff --git a/Documentation/devicetree/bindings/net/davicom-dm9000.txt b/Documentation/devicetree/bindings/net/davicom-dm9000.txt
index 2d39c99..28767ed 100644
--- a/Documentation/devicetree/bindings/net/davicom-dm9000.txt
+++ b/Documentation/devicetree/bindings/net/davicom-dm9000.txt
@@ -9,8 +9,6 @@ Required properties:
- interrupts : interrupt specifier specific to interrupt controller
Optional properties:
-- local-mac-address : A bytestring of 6 bytes specifying Ethernet MAC address
- to use (from firmware or bootloader)
- davicom,no-eeprom : Configuration EEPROM is not available
- davicom,ext-phy : Use external PHY
diff --git a/Documentation/devicetree/bindings/net/davinci_emac.txt b/Documentation/devicetree/bindings/net/davinci_emac.txt
index 6e356d1..0328088 100644
--- a/Documentation/devicetree/bindings/net/davinci_emac.txt
+++ b/Documentation/devicetree/bindings/net/davinci_emac.txt
@@ -17,9 +17,8 @@ Required properties:
Miscellaneous Interrupt>
Optional properties:
-- phy-handle: Contains a phandle to an Ethernet PHY.
+- phy-handle: See ethernet.txt file in the same directory.
If absent, davinci_emac driver defaults to 100/FULL.
-- local-mac-address : 6 bytes, mac address
- ti,davinci-rmii-en: 1 byte, 1 means use RMII
- ti,davinci-no-bd-ram: boolean, does EMAC have BD RAM?
diff --git a/Documentation/devicetree/bindings/net/ethernet.txt b/Documentation/devicetree/bindings/net/ethernet.txt
new file mode 100644
index 0000000..9ecd43d
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/ethernet.txt
@@ -0,0 +1,25 @@
+The following properties are common to the Ethernet controllers:
+
+- local-mac-address: array of 6 bytes, specifies the MAC address that was
+ assigned to the network device;
+- mac-address: array of 6 bytes, specifies the MAC address that was last used by
+ the boot program; should be used in cases where the MAC address assigned to
+ the device by the boot program is different from the "local-mac-address"
+ property;
+- max-speed: number, specifies maximum speed in Mbit/s supported by the device;
+- max-frame-size: number, maximum transfer unit (IEEE defined MTU), rather than
+ the maximum frame size (there's contradiction in ePAPR).
+- phy-mode: string, operation mode of the PHY interface; supported values are
+ "mii", "gmii", "sgmii", "tbi", "rev-mii", "rmii", "rgmii", "rgmii-id",
+ "rgmii-rxid", "rgmii-txid", "rtbi", "smii", "xgmii"; this is now a de-facto
+ standard property;
+- phy-connection-type: the same as "phy-mode" property but described in ePAPR;
+- phy-handle: phandle, specifies a reference to a node representing a PHY
+ device; this property is described in ePAPR and so preferred;
+- phy: the same as "phy-handle" property, not recommended for new bindings.
+- phy-device: the same as "phy-handle" property, not recommended for new
+ bindings.
+
+Child nodes of the Ethernet controller are typically the individual PHY devices
+connected via the MDIO bus (sometimes the MDIO bus controller is separate).
+They are described in the phy.txt file in this same directory.
diff --git a/Documentation/devicetree/bindings/net/fsl-fec.txt b/Documentation/devicetree/bindings/net/fsl-fec.txt
index 845ff84..6bc84ad 100644
--- a/Documentation/devicetree/bindings/net/fsl-fec.txt
+++ b/Documentation/devicetree/bindings/net/fsl-fec.txt
@@ -4,12 +4,9 @@ Required properties:
- compatible : Should be "fsl,<soc>-fec"
- reg : Address and length of the register set for the device
- interrupts : Should contain fec interrupt
-- phy-mode : String, operation mode of the PHY interface.
- Supported values are: "mii", "gmii", "sgmii", "tbi", "rmii",
- "rgmii", "rgmii-id", "rgmii-rxid", "rgmii-txid", "rtbi", "smii".
+- phy-mode : See ethernet.txt file in the same directory
Optional properties:
-- local-mac-address : 6 bytes, mac address
- phy-reset-gpios : Should specify the gpio for phy reset
- phy-reset-duration : Reset duration in milliseconds. Should present
only if property "phy-reset-gpios" is available. Missing the property
diff --git a/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt b/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
index d2ea460..737cdef 100644
--- a/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
+++ b/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt
@@ -38,22 +38,17 @@ Properties:
- model : Model of the device. Can be "TSEC", "eTSEC", or "FEC"
- compatible : Should be "gianfar"
- reg : Offset and length of the register set for the device
- - local-mac-address : List of bytes representing the ethernet address of
- this controller
- interrupts : For FEC devices, the first interrupt is the device's
interrupt. For TSEC and eTSEC devices, the first interrupt is
transmit, the second is receive, and the third is error.
- - phy-handle : The phandle for the PHY connected to this ethernet
- controller.
+ - phy-handle : See ethernet.txt file in the same directory.
- fixed-link : <a b c d e> where a is emulated phy id - choose any,
but unique to the all specified fixed-links, b is duplex - 0 half,
1 full, c is link speed - d#10/d#100/d#1000, d is pause - 0 no
pause, 1 pause, e is asym_pause - 0 no asym_pause, 1 asym_pause.
- - phy-connection-type : a string naming the controller/PHY interface type,
- i.e., "mii" (default), "rmii", "gmii", "rgmii", "rgmii-id", "sgmii",
- "tbi", or "rtbi". This property is only really needed if the connection
- is of type "rgmii-id", as all other connection types are detected by
- hardware.
+ - phy-connection-type : See ethernet.txt file in the same directory.
+ This property is only really needed if the connection is of type
+ "rgmii-id", as all other connection types are detected by hardware.
- fsl,magic-packet : If present, indicates that the hardware supports
waking up via magic packet.
- bd-stash : If present, indicates that the hardware supports stashing
diff --git a/Documentation/devicetree/bindings/net/lpc-eth.txt b/Documentation/devicetree/bindings/net/lpc-eth.txt
index 585021a..b92e927 100644
--- a/Documentation/devicetree/bindings/net/lpc-eth.txt
+++ b/Documentation/devicetree/bindings/net/lpc-eth.txt
@@ -6,10 +6,9 @@ Required properties:
- interrupts: Should contain ethernet controller interrupt
Optional properties:
-- phy-mode: String, operation mode of the PHY interface.
- Supported values are: "mii", "rmii" (default)
+- phy-mode: See ethernet.txt file in the same directory. If the property is
+ absent, "rmii" is assumed.
- use-iram: Use LPC32xx internal SRAM (IRAM) for DMA buffering
-- local-mac-address : 6 bytes, mac address
Example:
diff --git a/Documentation/devicetree/bindings/net/macb.txt b/Documentation/devicetree/bindings/net/macb.txt
index 70af2ec..aaa6964 100644
--- a/Documentation/devicetree/bindings/net/macb.txt
+++ b/Documentation/devicetree/bindings/net/macb.txt
@@ -8,16 +8,12 @@ Required properties:
the Cadence GEM, or the generic form: "cdns,gem".
- reg: Address and length of the register set for the device
- interrupts: Should contain macb interrupt
-- phy-mode: String, operation mode of the PHY interface.
- Supported values are: "mii", "rmii", "gmii", "rgmii".
+- phy-mode: See ethernet.txt file in the same directory.
- clock-names: Tuple listing input clock names.
Required elements: 'pclk', 'hclk'
Optional elements: 'tx_clk'
- clocks: Phandles to input clocks.
-Optional properties:
-- local-mac-address: 6 bytes, mac address
-
Examples:
macb0: ethernet@fffc4000 {
diff --git a/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt b/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt
index 859a6fa..750d577 100644
--- a/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt
+++ b/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt
@@ -4,10 +4,8 @@ Required properties:
- compatible: should be "marvell,armada-370-neta".
- reg: address and length of the register set for the device.
- interrupts: interrupt for the device
-- phy: A phandle to a phy node defining the PHY address (as the reg
- property, a single integer).
-- phy-mode: The interface between the SoC and the PHY (a string that
- of_get_phy_mode() can understand)
+- phy: See ethernet.txt file in the same directory.
+- phy-mode: See ethernet.txt file in the same directory
- clocks: a pointer to the reference clock for this device.
Example:
diff --git a/Documentation/devicetree/bindings/net/marvell-orion-net.txt b/Documentation/devicetree/bindings/net/marvell-orion-net.txt
index c233b61..bce52b2 100644
--- a/Documentation/devicetree/bindings/net/marvell-orion-net.txt
+++ b/Documentation/devicetree/bindings/net/marvell-orion-net.txt
@@ -36,7 +36,7 @@ Required port properties:
"marvell,kirkwood-eth-port".
- reg: port number relative to ethernet controller, shall be 0, 1, or 2.
- interrupts: port interrupt.
- - local-mac-address: 6 bytes MAC address.
+ - local-mac-address: See ethernet.txt file in the same directory.
Optional port properties:
- marvell,tx-queue-size: size of the transmit ring buffer.
@@ -48,7 +48,7 @@ Optional port properties:
and
- - phy-handle: phandle reference to ethernet PHY.
+ - phy-handle: See ethernet.txt file in the same directory.
or
diff --git a/Documentation/devicetree/bindings/net/micrel-ks8851.txt b/Documentation/devicetree/bindings/net/micrel-ks8851.txt
index 11ace3c..8e20c04 100644
--- a/Documentation/devicetree/bindings/net/micrel-ks8851.txt
+++ b/Documentation/devicetree/bindings/net/micrel-ks8851.txt
@@ -4,6 +4,3 @@ Required properties:
- compatible = "micrel,ks8851-ml" of parallel interface
- reg : 2 physical address and size of registers for data and command
- interrupts : interrupt connection
-
-Optional properties:
-- local-mac-address : Ethernet mac address to use
diff --git a/Documentation/devicetree/bindings/net/micrel.txt b/Documentation/devicetree/bindings/net/micrel.txt
new file mode 100644
index 0000000..98a3e61
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/micrel.txt
@@ -0,0 +1,18 @@
+Micrel PHY properties.
+
+These properties cover the base properties Micrel PHYs.
+
+Optional properties:
+
+ - micrel,led-mode : LED mode value to set for PHYs with configurable LEDs.
+
+ Configure the LED mode with single value. The list of PHYs and
+ the bits that are currently supported:
+
+ KSZ8001: register 0x1e, bits 15..14
+ KSZ8041: register 0x1e, bits 15..14
+ KSZ8021: register 0x1f, bits 5..4
+ KSZ8031: register 0x1f, bits 5..4
+ KSZ8051: register 0x1f, bits 5..4
+
+ See the respective PHY datasheet for the mode values.
diff --git a/Documentation/devicetree/bindings/net/sh_eth.txt b/Documentation/devicetree/bindings/net/sh_eth.txt
new file mode 100644
index 0000000..e7106b5
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/sh_eth.txt
@@ -0,0 +1,55 @@
+* Renesas Electronics SH EtherMAC
+
+This file provides information on what the device node for the SH EtherMAC
+interface contains.
+
+Required properties:
+- compatible: "renesas,gether-r8a7740" if the device is a part of R8A7740 SoC.
+ "renesas,ether-r8a7778" if the device is a part of R8A7778 SoC.
+ "renesas,ether-r8a7779" if the device is a part of R8A7779 SoC.
+ "renesas,ether-r8a7790" if the device is a part of R8A7790 SoC.
+ "renesas,ether-r8a7791" if the device is a part of R8A7791 SoC.
+ "renesas,ether-r7s72100" if the device is a part of R7S72100 SoC.
+- reg: offset and length of (1) the E-DMAC/feLic register block (required),
+ (2) the TSU register block (optional).
+- interrupts: interrupt specifier for the sole interrupt.
+- phy-mode: see ethernet.txt file in the same directory.
+- phy-handle: see ethernet.txt file in the same directory.
+- #address-cells: number of address cells for the MDIO bus, must be equal to 1.
+- #size-cells: number of size cells on the MDIO bus, must be equal to 0.
+- clocks: clock phandle and specifier pair.
+- pinctrl-0: phandle, referring to a default pin configuration node.
+
+Optional properties:
+- interrupt-parent: the phandle for the interrupt controller that services
+ interrupts for this device.
+- pinctrl-names: pin configuration state name ("default").
+- renesas,no-ether-link: boolean, specify when a board does not provide a proper
+ Ether LINK signal.
+- renesas,ether-link-active-low: boolean, specify when the Ether LINK signal is
+ active-low instead of normal active-high.
+
+Example (Lager board):
+
+ ethernet@ee700000 {
+ compatible = "renesas,ether-r8a7790";
+ reg = <0 0xee700000 0 0x400>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 162 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp8_clks R8A7790_CLK_ETHER>;
+ phy-mode = "rmii";
+ phy-handle = <&phy1>;
+ pinctrl-0 = <&ether_pins>;
+ pinctrl-names = "default";
+ renesas,ether-link-active-low;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ phy1: ethernet-phy@1 {
+ reg = <1>;
+ interrupt-parent = <&irqc0>;
+ interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ pinctrl-0 = <&phy1_pins>;
+ pinctrl-names = "default";
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/smsc-lan91c111.txt b/Documentation/devicetree/bindings/net/smsc-lan91c111.txt
index 5a41a865..0f8487b8 100644
--- a/Documentation/devicetree/bindings/net/smsc-lan91c111.txt
+++ b/Documentation/devicetree/bindings/net/smsc-lan91c111.txt
@@ -6,8 +6,7 @@ Required properties:
- interrupts : interrupt connection
Optional properties:
-- phy-device : phandle to Ethernet phy
-- local-mac-address : Ethernet mac address to use
+- phy-device : see ethernet.txt file in the same directory
- reg-io-width : Mask of sizes (in bytes) of the IO accesses that
are supported on the device. Valid value for SMSC LAN91c111 are
1, 2 or 4. If it's omitted or invalid, the size would be 2 meaning
diff --git a/Documentation/devicetree/bindings/net/smsc911x.txt b/Documentation/devicetree/bindings/net/smsc911x.txt
index adb5b57..3fed3c1 100644
--- a/Documentation/devicetree/bindings/net/smsc911x.txt
+++ b/Documentation/devicetree/bindings/net/smsc911x.txt
@@ -6,9 +6,7 @@ Required properties:
- interrupts : Should contain SMSC LAN interrupt line
- interrupt-parent : Should be the phandle for the interrupt controller
that services interrupts for this device
-- phy-mode : String, operation mode of the PHY interface.
- Supported values are: "mii", "gmii", "sgmii", "tbi", "rmii",
- "rgmii", "rgmii-id", "rgmii-rxid", "rgmii-txid", "rtbi", "smii".
+- phy-mode : See ethernet.txt file in the same directory
Optional properties:
- reg-shift : Specify the quantity to shift the register offsets by
@@ -23,7 +21,6 @@ Optional properties:
external PHY
- smsc,save-mac-address : Indicates that mac address needs to be saved
before resetting the controller
-- local-mac-address : 6 bytes, mac address
Examples:
diff --git a/Documentation/devicetree/bindings/net/stmmac.txt b/Documentation/devicetree/bindings/net/stmmac.txt
index 9d92d42..5748351 100644
--- a/Documentation/devicetree/bindings/net/stmmac.txt
+++ b/Documentation/devicetree/bindings/net/stmmac.txt
@@ -10,8 +10,7 @@ Required properties:
- interrupt-names: Should contain the interrupt names "macirq"
"eth_wake_irq" if this interrupt is supported in the "interrupts"
property
-- phy-mode: String, operation mode of the PHY interface.
- Supported values are: "mii", "rmii", "gmii", "rgmii".
+- phy-mode: See ethernet.txt file in the same directory.
- snps,reset-gpio gpio number for phy reset.
- snps,reset-active-low boolean flag to indicate if phy reset is active low.
- snps,reset-delays-us is triplet of delays
@@ -28,12 +27,10 @@ Required properties:
ignored if force_thresh_dma_mode is set.
Optional properties:
-- mac-address: 6 bytes, mac address
- resets: Should contain a phandle to the STMMAC reset signal, if any
- reset-names: Should contain the reset signal name "stmmaceth", if a
reset phandle is given
-- max-frame-size: Maximum Transfer Unit (IEEE defined MTU), rather
- than the maximum frame size.
+- max-frame-size: See ethernet.txt file in the same directory
Examples:
diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt
index 5cdb229..a383c00 100644
--- a/Documentation/networking/bonding.txt
+++ b/Documentation/networking/bonding.txt
@@ -270,16 +270,15 @@ arp_ip_target
arp_validate
Specifies whether or not ARP probes and replies should be
- validated in the active-backup mode. This causes the ARP
- monitor to examine the incoming ARP requests and replies, and
- only consider a slave to be up if it is receiving the
- appropriate ARP traffic.
+ validated in any mode that supports arp monitoring, or whether
+ non-ARP traffic should be filtered (disregarded) for link
+ monitoring purposes.
Possible values are:
none or 0
- No validation is performed. This is the default.
+ No validation or filtering is performed.
active or 1
@@ -293,31 +292,68 @@ arp_validate
Validation is performed for all slaves.
- For the active slave, the validation checks ARP replies to
- confirm that they were generated by an arp_ip_target. Since
- backup slaves do not typically receive these replies, the
- validation performed for backup slaves is on the ARP request
- sent out via the active slave. It is possible that some
- switch or network configurations may result in situations
- wherein the backup slaves do not receive the ARP requests; in
- such a situation, validation of backup slaves must be
- disabled.
-
- The validation of ARP requests on backup slaves is mainly
- helping bonding to decide which slaves are more likely to
- work in case of the active slave failure, it doesn't really
- guarantee that the backup slave will work if it's selected
- as the next active slave.
-
- This option is useful in network configurations in which
- multiple bonding hosts are concurrently issuing ARPs to one or
- more targets beyond a common switch. Should the link between
- the switch and target fail (but not the switch itself), the
- probe traffic generated by the multiple bonding instances will
- fool the standard ARP monitor into considering the links as
- still up. Use of the arp_validate option can resolve this, as
- the ARP monitor will only consider ARP requests and replies
- associated with its own instance of bonding.
+ filter or 4
+
+ Filtering is applied to all slaves. No validation is
+ performed.
+
+ filter_active or 5
+
+ Filtering is applied to all slaves, validation is performed
+ only for the active slave.
+
+ filter_backup or 6
+
+ Filtering is applied to all slaves, validation is performed
+ only for backup slaves.
+
+ Validation:
+
+ Enabling validation causes the ARP monitor to examine the incoming
+ ARP requests and replies, and only consider a slave to be up if it
+ is receiving the appropriate ARP traffic.
+
+ For an active slave, the validation checks ARP replies to confirm
+ that they were generated by an arp_ip_target. Since backup slaves
+ do not typically receive these replies, the validation performed
+ for backup slaves is on the broadcast ARP request sent out via the
+ active slave. It is possible that some switch or network
+ configurations may result in situations wherein the backup slaves
+ do not receive the ARP requests; in such a situation, validation
+ of backup slaves must be disabled.
+
+ The validation of ARP requests on backup slaves is mainly helping
+ bonding to decide which slaves are more likely to work in case of
+ the active slave failure, it doesn't really guarantee that the
+ backup slave will work if it's selected as the next active slave.
+
+ Validation is useful in network configurations in which multiple
+ bonding hosts are concurrently issuing ARPs to one or more targets
+ beyond a common switch. Should the link between the switch and
+ target fail (but not the switch itself), the probe traffic
+ generated by the multiple bonding instances will fool the standard
+ ARP monitor into considering the links as still up. Use of
+ validation can resolve this, as the ARP monitor will only consider
+ ARP requests and replies associated with its own instance of
+ bonding.
+
+ Filtering:
+
+ Enabling filtering causes the ARP monitor to only use incoming ARP
+ packets for link availability purposes. Arriving packets that are
+ not ARPs are delivered normally, but do not count when determining
+ if a slave is available.
+
+ Filtering operates by only considering the reception of ARP
+ packets (any ARP packet, regardless of source or destination) when
+ determining if a slave has received traffic for link availability
+ purposes.
+
+ Filtering is useful in network configurations in which significant
+ levels of third party broadcast traffic would fool the standard
+ ARP monitor into considering the links as still up. Use of
+ filtering can resolve this, as only ARP traffic is considered for
+ link availability purposes.
This option was added in bonding version 3.1.0.
diff --git a/Documentation/networking/gianfar.txt b/Documentation/networking/gianfar.txt
index ad474ea..ba1daea 100644
--- a/Documentation/networking/gianfar.txt
+++ b/Documentation/networking/gianfar.txt
@@ -1,38 +1,8 @@
The Gianfar Ethernet Driver
-Sysfs File description
Author: Andy Fleming <afleming@freescale.com>
Updated: 2005-07-28
-SYSFS
-
-Several of the features of the gianfar driver are controlled
-through sysfs files. These are:
-
-bd_stash:
-To stash RX Buffer Descriptors in the L2, echo 'on' or '1' to
-bd_stash, echo 'off' or '0' to disable
-
-rx_stash_len:
-To stash the first n bytes of the packet in L2, echo the number
-of bytes to buf_stash_len. echo 0 to disable.
-
-WARNING: You could really screw these up if you set them too low or high!
-fifo_threshold:
-To change the number of bytes the controller needs in the
-fifo before it starts transmission, echo the number of bytes to
-fifo_thresh. Range should be 0-511.
-
-fifo_starve:
-When the FIFO has less than this many bytes during a transmit, it
-enters starve mode, and increases the priority of TX memory
-transactions. To change, echo the number of bytes to
-fifo_starve. Range should be 0-511.
-
-fifo_starve_off:
-Once in starve mode, the FIFO remains there until it has this
-many bytes. To change, echo the number of bytes to
-fifo_starve_off. Range should be 0-511.
CHECKSUM OFFLOADING
diff --git a/Documentation/networking/phy.txt b/Documentation/networking/phy.txt
index ebf2707..e602c6f 100644
--- a/Documentation/networking/phy.txt
+++ b/Documentation/networking/phy.txt
@@ -253,16 +253,25 @@ Writing a PHY driver
Each driver consists of a number of function pointers:
+ soft_reset: perform a PHY software reset
config_init: configures PHY into a sane state after a reset.
For instance, a Davicom PHY requires descrambling disabled.
probe: Allocate phy->priv, optionally refuse to bind.
PHY may not have been reset or had fixups run yet.
suspend/resume: power management
config_aneg: Changes the speed/duplex/negotiation settings
+ aneg_done: Determines the auto-negotiation result
read_status: Reads the current speed/duplex/negotiation settings
ack_interrupt: Clear a pending interrupt
+ did_interrupt: Checks if the PHY generated an interrupt
config_intr: Enable or disable interrupts
remove: Does any driver take-down
+ ts_info: Queries about the HW timestamping status
+ hwtstamp: Set the PHY HW timestamping configuration
+ rxtstamp: Requests a receive timestamp at the PHY level for a 'skb'
+ txtsamp: Requests a transmit timestamp at the PHY level for a 'skb'
+ set_wol: Enable Wake-on-LAN at the PHY level
+ get_wol: Get the Wake-on-LAN status at the PHY level
Of these, only config_aneg and read_status are required to be
assigned by the driver code. The rest are optional. Also, it is
diff --git a/Documentation/networking/pktgen.txt b/Documentation/networking/pktgen.txt
index 5a61a240..0e30c78 100644
--- a/Documentation/networking/pktgen.txt
+++ b/Documentation/networking/pktgen.txt
@@ -102,13 +102,18 @@ Examples:
The 'minimum' MAC is what you set with dstmac.
pgset "flag [name]" Set a flag to determine behaviour. Current flags
- are: IPSRC_RND #IP Source is random (between min/max),
- IPDST_RND, UDPSRC_RND,
- UDPDST_RND, MACSRC_RND, MACDST_RND
+ are: IPSRC_RND # IP source is random (between min/max)
+ IPDST_RND # IP destination is random
+ UDPSRC_RND, UDPDST_RND,
+ MACSRC_RND, MACDST_RND
+ TXSIZE_RND, IPV6,
MPLS_RND, VID_RND, SVID_RND
+ FLOW_SEQ,
QUEUE_MAP_RND # queue map random
QUEUE_MAP_CPU # queue map mirrors smp_processor_id()
- IPSEC # Make IPsec encapsulation for packet
+ UDPCSUM,
+ IPSEC # IPsec encapsulation (needs CONFIG_XFRM)
+ NODE_ALLOC # node specific memory allocation
pgset spi SPI_VALUE Set specific SA used to transform packet.
@@ -233,13 +238,22 @@ udp_dst_max
flag
IPSRC_RND
- TXSIZE_RND
IPDST_RND
UDPSRC_RND
UDPDST_RND
MACSRC_RND
MACDST_RND
+ TXSIZE_RND
+ IPV6
+ MPLS_RND
+ VID_RND
+ SVID_RND
+ FLOW_SEQ
+ QUEUE_MAP_RND
+ QUEUE_MAP_CPU
+ UDPCSUM
IPSEC
+ NODE_ALLOC
dst_min
dst_max
diff --git a/Documentation/networking/tcp.txt b/Documentation/networking/tcp.txt
index 7d11bb5..bdc4c0d 100644
--- a/Documentation/networking/tcp.txt
+++ b/Documentation/networking/tcp.txt
@@ -30,7 +30,7 @@ A congestion control mechanism can be registered through functions in
tcp_cong.c. The functions used by the congestion control mechanism are
registered via passing a tcp_congestion_ops struct to
tcp_register_congestion_control. As a minimum name, ssthresh,
-cong_avoid, min_cwnd must be valid.
+cong_avoid must be valid.
Private data for a congestion control mechanism is stored in tp->ca_priv.
tcp_ca(tp) returns a pointer to this space. This is preallocated space - it
diff --git a/MAINTAINERS b/MAINTAINERS
index e1297ff..e4ee191 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1846,6 +1846,12 @@ L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/broadcom/b44.*
+BROADCOM GENET ETHERNET DRIVER
+M: Florian Fainelli <f.fainelli@gmail.com>
+L: netdev@vger.kernel.org
+S: Supported
+F: drivers/net/ethernet/broadcom/genet/
+
BROADCOM BNX2 GIGABIT ETHERNET DRIVER
M: Michael Chan <mchan@broadcom.com>
L: netdev@vger.kernel.org
@@ -6025,6 +6031,7 @@ L: netdev@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec-next.git
S: Maintained
+F: net/core/flow.c
F: net/xfrm/
F: net/key/
F: net/ipv4/xfrm*
@@ -7053,13 +7060,8 @@ F: Documentation/networking/LICENSE.qla3xxx
F: drivers/net/ethernet/qlogic/qla3xxx.*
QLOGIC QLCNIC (1/10)Gb ETHERNET DRIVER
-M: Himanshu Madhani <himanshu.madhani@qlogic.com>
-M: Rajesh Borundia <rajesh.borundia@qlogic.com>
M: Shahed Shaikh <shahed.shaikh@qlogic.com>
-M: Jitendra Kalsaria <jitendra.kalsaria@qlogic.com>
-M: Sony Chacko <sony.chacko@qlogic.com>
-M: Sucheta Chakraborty <sucheta.chakraborty@qlogic.com>
-M: linux-driver@qlogic.com
+M: Dept-HSGLinuxNICDev@qlogic.com
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/qlogic/qlcnic/
diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
index 62a7607..f1a9198 100644
--- a/drivers/atm/ambassador.c
+++ b/drivers/atm/ambassador.c
@@ -1925,7 +1925,7 @@ static int ucode_init(loader_block *lb, amb_dev *dev)
const struct firmware *fw;
unsigned long start_address;
const struct ihex_binrec *rec;
- const char *errmsg = 0;
+ const char *errmsg = NULL;
int res;
res = request_ihex_firmware(&fw, "atmsar11.fw", &dev->pci_dev->dev);
diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
index b41c948..f43e1c1 100644
--- a/drivers/atm/firestream.c
+++ b/drivers/atm/firestream.c
@@ -736,8 +736,8 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
skb = td->skb;
if (skb == FS_VCC (ATM_SKB(skb)->vcc)->last_skb) {
- wake_up_interruptible (& FS_VCC (ATM_SKB(skb)->vcc)->close_wait);
FS_VCC (ATM_SKB(skb)->vcc)->last_skb = NULL;
+ wake_up_interruptible (& FS_VCC (ATM_SKB(skb)->vcc)->close_wait);
}
td->dev->ntxpckts--;
@@ -1123,7 +1123,7 @@ static void fs_close(struct atm_vcc *atm_vcc)
this sleep_on, we'll lose any reference to these packets. Memory leak!
On the other hand, it's awfully convenient that we can abort a "close" that
is taking too long. Maybe just use non-interruptible sleep on? -- REW */
- interruptible_sleep_on (& vcc->close_wait);
+ wait_event_interruptible(vcc->close_wait, !vcc->last_skb);
}
txtp = &atm_vcc->qos.txtp;
diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
index 9587e95..9988ac9 100644
--- a/drivers/atm/nicstar.c
+++ b/drivers/atm/nicstar.c
@@ -639,9 +639,9 @@ static int ns_init_card(int i, struct pci_dev *pcidev)
card->hbnr.init = NUM_HB;
card->hbnr.max = MAX_HB;
- card->sm_handle = 0x00000000;
+ card->sm_handle = NULL;
card->sm_addr = 0x00000000;
- card->lg_handle = 0x00000000;
+ card->lg_handle = NULL;
card->lg_addr = 0x00000000;
card->efbie = 1; /* To prevent push_rxbufs from enabling the interrupt */
@@ -979,7 +979,7 @@ static void push_rxbufs(ns_dev * card, struct sk_buff *skb)
addr2 = card->sm_addr;
handle2 = card->sm_handle;
card->sm_addr = 0x00000000;
- card->sm_handle = 0x00000000;
+ card->sm_handle = NULL;
} else { /* (!sm_addr) */
card->sm_addr = addr1;
@@ -993,7 +993,7 @@ static void push_rxbufs(ns_dev * card, struct sk_buff *skb)
addr2 = card->lg_addr;
handle2 = card->lg_handle;
card->lg_addr = 0x00000000;
- card->lg_handle = 0x00000000;
+ card->lg_handle = NULL;
} else { /* (!lg_addr) */
card->lg_addr = addr1;
@@ -1739,10 +1739,10 @@ static int push_scqe(ns_dev * card, vc_map * vc, scq_info * scq, ns_scqe * tbd,
}
scq->full = 1;
- spin_unlock_irqrestore(&scq->lock, flags);
- interruptible_sleep_on_timeout(&scq->scqfull_waitq,
- SCQFULL_TIMEOUT);
- spin_lock_irqsave(&scq->lock, flags);
+ wait_event_interruptible_lock_irq_timeout(scq->scqfull_waitq,
+ scq->tail != scq->next,
+ scq->lock,
+ SCQFULL_TIMEOUT);
if (scq->full) {
spin_unlock_irqrestore(&scq->lock, flags);
@@ -1789,10 +1789,10 @@ static int push_scqe(ns_dev * card, vc_map * vc, scq_info * scq, ns_scqe * tbd,
scq->full = 1;
if (has_run++)
break;
- spin_unlock_irqrestore(&scq->lock, flags);
- interruptible_sleep_on_timeout(&scq->scqfull_waitq,
- SCQFULL_TIMEOUT);
- spin_lock_irqsave(&scq->lock, flags);
+ wait_event_interruptible_lock_irq_timeout(scq->scqfull_waitq,
+ scq->tail != scq->next,
+ scq->lock,
+ SCQFULL_TIMEOUT);
}
if (!scq->full) {
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index e3fb496..943cf0d 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -760,7 +760,7 @@ static irqreturn_t solos_irq(int irq, void *dev_id)
return IRQ_RETVAL(handled);
}
-void solos_bh(unsigned long card_arg)
+static void solos_bh(unsigned long card_arg)
{
struct solos_card *card = (void *)card_arg;
uint32_t card_flags;
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index a36749f..9b0ea0a 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -139,7 +139,6 @@ static int cn_call_callback(struct sk_buff *skb)
spin_unlock_bh(&dev->cbdev->queue_lock);
if (cbq != NULL) {
- err = 0;
cbq->callback(msg, nsp);
kfree_skb(skb);
cn_queue_release_callback(cbq);
diff --git a/drivers/isdn/divert/divert_procfs.c b/drivers/isdn/divert/divert_procfs.c
index fb4f1ba..1c5dc34 100644
--- a/drivers/isdn/divert/divert_procfs.c
+++ b/drivers/isdn/divert/divert_procfs.c
@@ -86,12 +86,13 @@ isdn_divert_read(struct file *file, char __user *buf, size_t count, loff_t *off)
struct divert_info *inf;
int len;
- if (!*((struct divert_info **) file->private_data)) {
+ if (!(inf = *((struct divert_info **) file->private_data))) {
if (file->f_flags & O_NONBLOCK)
return -EAGAIN;
- interruptible_sleep_on(&(rd_queue));
+ wait_event_interruptible(rd_queue, (inf =
+ *((struct divert_info **) file->private_data)));
}
- if (!(inf = *((struct divert_info **) file->private_data)))
+ if (!inf)
return (0);
inf->usage_cnt--; /* new usage count */
diff --git a/drivers/isdn/hisax/elsa.c b/drivers/isdn/hisax/elsa.c
index 2be1c8a..d8ef64d 100644
--- a/drivers/isdn/hisax/elsa.c
+++ b/drivers/isdn/hisax/elsa.c
@@ -509,7 +509,8 @@ static void
set_arcofi(struct IsdnCardState *cs, int bc) {
cs->dc.isac.arcofi_bc = bc;
arcofi_fsm(cs, ARCOFI_START, &ARCOFI_COP_5);
- interruptible_sleep_on(&cs->dc.isac.arcofi_wait);
+ wait_event_interruptible(cs->dc.isac.arcofi_wait,
+ cs->dc.isac.arcofi_state == ARCOFI_NOP);
}
static int
@@ -528,7 +529,8 @@ check_arcofi(struct IsdnCardState *cs)
}
cs->dc.isac.arcofi_bc = 0;
arcofi_fsm(cs, ARCOFI_START, &ARCOFI_VERSION);
- interruptible_sleep_on(&cs->dc.isac.arcofi_wait);
+ wait_event_interruptible(cs->dc.isac.arcofi_wait,
+ cs->dc.isac.arcofi_state == ARCOFI_NOP);
if (!test_and_clear_bit(FLG_ARCOFI_ERROR, &cs->HW_Flags)) {
debugl1(cs, "Arcofi response received %d bytes", cs->dc.isac.mon_rxp);
p = cs->dc.isac.mon_rx;
@@ -595,7 +597,8 @@ check_arcofi(struct IsdnCardState *cs)
Elsa_Types[cs->subtyp],
cs->hw.elsa.base + 8);
arcofi_fsm(cs, ARCOFI_START, &ARCOFI_XOP_0);
- interruptible_sleep_on(&cs->dc.isac.arcofi_wait);
+ wait_event_interruptible(cs->dc.isac.arcofi_wait,
+ cs->dc.isac.arcofi_state == ARCOFI_NOP);
return (1);
}
return (0);
diff --git a/drivers/isdn/hisax/elsa_ser.c b/drivers/isdn/hisax/elsa_ser.c
index 3f84dd8..a2a358c 100644
--- a/drivers/isdn/hisax/elsa_ser.c
+++ b/drivers/isdn/hisax/elsa_ser.c
@@ -573,7 +573,8 @@ modem_l2l1(struct PStack *st, int pr, void *arg)
test_and_clear_bit(BC_FLG_ACTIV, &bcs->Flag);
bcs->cs->dc.isac.arcofi_bc = st->l1.bc;
arcofi_fsm(bcs->cs, ARCOFI_START, &ARCOFI_XOP_0);
- interruptible_sleep_on(&bcs->cs->dc.isac.arcofi_wait);
+ wait_event_interruptible(bcs->cs->dc.isac.arcofi_wait,
+ bcs->cs->dc.isac.arcofi_state == ARCOFI_NOP);
bcs->cs->hw.elsa.MFlag = 1;
} else {
printk(KERN_WARNING "ElsaSer: unknown pr %x\n", pr);
diff --git a/drivers/isdn/hysdn/hysdn_proclog.c b/drivers/isdn/hysdn/hysdn_proclog.c
index b61e8d5..7b5fd8f 100644
--- a/drivers/isdn/hysdn/hysdn_proclog.c
+++ b/drivers/isdn/hysdn/hysdn_proclog.c
@@ -175,14 +175,15 @@ hysdn_log_read(struct file *file, char __user *buf, size_t count, loff_t *off)
int len;
hysdn_card *card = PDE_DATA(file_inode(file));
- if (!*((struct log_data **) file->private_data)) {
+ if (!(inf = *((struct log_data **) file->private_data))) {
struct procdata *pd = card->proclog;
if (file->f_flags & O_NONBLOCK)
return (-EAGAIN);
- interruptible_sleep_on(&(pd->rd_queue));
+ wait_event_interruptible(pd->rd_queue, (inf =
+ *((struct log_data **) file->private_data)));
}
- if (!(inf = *((struct log_data **) file->private_data)))
+ if (!inf)
return (0);
inf->usage_cnt--; /* new usage count */
diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
index 9bb12ba..130f216 100644
--- a/drivers/isdn/i4l/isdn_common.c
+++ b/drivers/isdn/i4l/isdn_common.c
@@ -777,7 +777,8 @@ isdn_readbchan(int di, int channel, u_char *buf, u_char *fp, int len, wait_queue
return 0;
if (skb_queue_empty(&dev->drv[di]->rpqueue[channel])) {
if (sleep)
- interruptible_sleep_on(sleep);
+ wait_event_interruptible(*sleep,
+ !skb_queue_empty(&dev->drv[di]->rpqueue[channel]));
else
return 0;
}
@@ -1072,7 +1073,8 @@ isdn_read(struct file *file, char __user *buf, size_t count, loff_t *off)
retval = -EAGAIN;
goto out;
}
- interruptible_sleep_on(&(dev->info_waitq));
+ wait_event_interruptible(dev->info_waitq,
+ file->private_data);
}
p = isdn_statstr();
file->private_data = NULL;
@@ -1128,7 +1130,8 @@ isdn_read(struct file *file, char __user *buf, size_t count, loff_t *off)
retval = -EAGAIN;
goto out;
}
- interruptible_sleep_on(&(dev->drv[drvidx]->st_waitq));
+ wait_event_interruptible(dev->drv[drvidx]->st_waitq,
+ dev->drv[drvidx]->stavail);
}
if (dev->drv[drvidx]->interface->readstat) {
if (count > dev->drv[drvidx]->stavail)
@@ -1188,8 +1191,8 @@ isdn_write(struct file *file, const char __user *buf, size_t count, loff_t *off)
goto out;
}
chidx = isdn_minor2chan(minor);
- while ((retval = isdn_writebuf_stub(drvidx, chidx, buf, count)) == 0)
- interruptible_sleep_on(&dev->drv[drvidx]->snd_waitq[chidx]);
+ wait_event_interruptible(dev->drv[drvidx]->snd_waitq[chidx],
+ (retval = isdn_writebuf_stub(drvidx, chidx, buf, count)));
goto out;
}
if (minor <= ISDN_MINOR_CTRLMAX) {
diff --git a/drivers/isdn/pcbit/drv.c b/drivers/isdn/pcbit/drv.c
index 1eaf622..f02cc50 100644
--- a/drivers/isdn/pcbit/drv.c
+++ b/drivers/isdn/pcbit/drv.c
@@ -796,6 +796,7 @@ static void set_running_timeout(unsigned long ptr)
#endif
dev = (struct pcbit_dev *) ptr;
+ dev->l2_state = L2_DOWN;
wake_up_interruptible(&dev->set_running_wq);
}
@@ -818,7 +819,8 @@ static int set_protocol_running(struct pcbit_dev *dev)
add_timer(&dev->set_running_timer);
- interruptible_sleep_on(&dev->set_running_wq);
+ wait_event(dev->set_running_wq, dev->l2_state == L2_RUNNING ||
+ dev->l2_state == L2_DOWN);
del_timer(&dev->set_running_timer);
@@ -842,8 +844,6 @@ static int set_protocol_running(struct pcbit_dev *dev)
printk(KERN_DEBUG "pcbit: initialization failed\n");
printk(KERN_DEBUG "pcbit: firmware not loaded\n");
- dev->l2_state = L2_DOWN;
-
#ifdef DEBUG
printk(KERN_DEBUG "Bank3 = %02x\n",
readb(dev->sh_mem + BANK3));
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index dcde5605..a2ef3f7 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -768,11 +768,11 @@ static int ad_lacpdu_send(struct port *port)
lacpdu_header = (struct lacpdu_header *)skb_put(skb, length);
- memcpy(lacpdu_header->hdr.h_dest, lacpdu_mcast_addr, ETH_ALEN);
+ ether_addr_copy(lacpdu_header->hdr.h_dest, lacpdu_mcast_addr);
/* Note: source address is set to be the member's PERMANENT address,
* because we use it to identify loopback lacpdus in receive.
*/
- memcpy(lacpdu_header->hdr.h_source, slave->perm_hwaddr, ETH_ALEN);
+ ether_addr_copy(lacpdu_header->hdr.h_source, slave->perm_hwaddr);
lacpdu_header->hdr.h_proto = PKT_TYPE_LACPDU;
lacpdu_header->lacpdu = port->lacpdu;
@@ -810,11 +810,11 @@ static int ad_marker_send(struct port *port, struct bond_marker *marker)
marker_header = (struct bond_marker_header *)skb_put(skb, length);
- memcpy(marker_header->hdr.h_dest, lacpdu_mcast_addr, ETH_ALEN);
+ ether_addr_copy(marker_header->hdr.h_dest, lacpdu_mcast_addr);
/* Note: source address is set to be the member's PERMANENT address,
* because we use it to identify loopback MARKERs in receive.
*/
- memcpy(marker_header->hdr.h_source, slave->perm_hwaddr, ETH_ALEN);
+ ether_addr_copy(marker_header->hdr.h_source, slave->perm_hwaddr);
marker_header->hdr.h_proto = PKT_TYPE_LACPDU;
marker_header->marker = *marker;
@@ -1079,7 +1079,8 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
/* detect loopback situation */
if (MAC_ADDRESS_EQUAL(&(lacpdu->actor_system),
&(port->actor_system))) {
- pr_err("%s: An illegal loopback occurred on adapter (%s).\nCheck the configuration to verify that all adapters are connected to 802.3ad compliant switch ports\n",
+ pr_err("%s: An illegal loopback occurred on adapter (%s)\n"
+ "Check the configuration to verify that all adapters are connected to 802.3ad compliant switch ports\n",
port->slave->bond->dev->name,
port->slave->dev->name);
return;
@@ -1948,7 +1949,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
* new aggregator
*/
if ((new_aggregator) && ((!new_aggregator->lag_ports) || ((new_aggregator->lag_ports == port) && !new_aggregator->lag_ports->next_port_in_aggregator))) {
- pr_debug("Some port(s) related to LAG %d - replaceing with LAG %d\n",
+ pr_debug("Some port(s) related to LAG %d - replacing with LAG %d\n",
aggregator->aggregator_identifier,
new_aggregator->aggregator_identifier);
@@ -2310,9 +2311,9 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
port->actor_oper_port_key = (port->actor_admin_port_key &=
~AD_SPEED_KEY_BITS);
}
- pr_debug("Port %d changed link status to %s",
- port->actor_port_number,
- (link == BOND_LINK_UP) ? "UP" : "DOWN");
+ pr_debug("Port %d changed link status to %s\n",
+ port->actor_port_number,
+ link == BOND_LINK_UP ? "UP" : "DOWN");
/* there is no need to reselect a new aggregator, just signal the
* state machines to reinitialize
*/
@@ -2390,17 +2391,16 @@ int __bond_3ad_get_active_agg_info(struct bonding *bond,
}
}
- if (aggregator) {
- ad_info->aggregator_id = aggregator->aggregator_identifier;
- ad_info->ports = aggregator->num_of_ports;
- ad_info->actor_key = aggregator->actor_oper_aggregator_key;
- ad_info->partner_key = aggregator->partner_oper_aggregator_key;
- memcpy(ad_info->partner_system,
- aggregator->partner_system.mac_addr_value, ETH_ALEN);
- return 0;
- }
+ if (!aggregator)
+ return -1;
- return -1;
+ ad_info->aggregator_id = aggregator->aggregator_identifier;
+ ad_info->ports = aggregator->num_of_ports;
+ ad_info->actor_key = aggregator->actor_oper_aggregator_key;
+ ad_info->partner_key = aggregator->partner_oper_aggregator_key;
+ ether_addr_copy(ad_info->partner_system,
+ aggregator->partner_system.mac_addr_value);
+ return 0;
}
/* Wrapper used to hold bond->lock so no slave manipulation can occur */
diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h
index f4dd959..bb03b1d 100644
--- a/drivers/net/bonding/bond_3ad.h
+++ b/drivers/net/bonding/bond_3ad.h
@@ -28,7 +28,7 @@
#include <linux/netdevice.h>
#include <linux/if_ether.h>
-// General definitions
+/* General definitions */
#define PKT_TYPE_LACPDU cpu_to_be16(ETH_P_SLOW)
#define AD_TIMER_INTERVAL 100 /*msec*/
@@ -47,54 +47,54 @@ enum {
BOND_AD_COUNT = 2,
};
-// rx machine states(43.4.11 in the 802.3ad standard)
+/* rx machine states(43.4.11 in the 802.3ad standard) */
typedef enum {
AD_RX_DUMMY,
- AD_RX_INITIALIZE, // rx Machine
- AD_RX_PORT_DISABLED, // rx Machine
- AD_RX_LACP_DISABLED, // rx Machine
- AD_RX_EXPIRED, // rx Machine
- AD_RX_DEFAULTED, // rx Machine
- AD_RX_CURRENT // rx Machine
+ AD_RX_INITIALIZE, /* rx Machine */
+ AD_RX_PORT_DISABLED, /* rx Machine */
+ AD_RX_LACP_DISABLED, /* rx Machine */
+ AD_RX_EXPIRED, /* rx Machine */
+ AD_RX_DEFAULTED, /* rx Machine */
+ AD_RX_CURRENT /* rx Machine */
} rx_states_t;
-// periodic machine states(43.4.12 in the 802.3ad standard)
+/* periodic machine states(43.4.12 in the 802.3ad standard) */
typedef enum {
AD_PERIODIC_DUMMY,
- AD_NO_PERIODIC, // periodic machine
- AD_FAST_PERIODIC, // periodic machine
- AD_SLOW_PERIODIC, // periodic machine
- AD_PERIODIC_TX // periodic machine
+ AD_NO_PERIODIC, /* periodic machine */
+ AD_FAST_PERIODIC, /* periodic machine */
+ AD_SLOW_PERIODIC, /* periodic machine */
+ AD_PERIODIC_TX /* periodic machine */
} periodic_states_t;
-// mux machine states(43.4.13 in the 802.3ad standard)
+/* mux machine states(43.4.13 in the 802.3ad standard) */
typedef enum {
AD_MUX_DUMMY,
- AD_MUX_DETACHED, // mux machine
- AD_MUX_WAITING, // mux machine
- AD_MUX_ATTACHED, // mux machine
- AD_MUX_COLLECTING_DISTRIBUTING // mux machine
+ AD_MUX_DETACHED, /* mux machine */
+ AD_MUX_WAITING, /* mux machine */
+ AD_MUX_ATTACHED, /* mux machine */
+ AD_MUX_COLLECTING_DISTRIBUTING /* mux machine */
} mux_states_t;
-// tx machine states(43.4.15 in the 802.3ad standard)
+/* tx machine states(43.4.15 in the 802.3ad standard) */
typedef enum {
AD_TX_DUMMY,
- AD_TRANSMIT // tx Machine
+ AD_TRANSMIT /* tx Machine */
} tx_states_t;
-// rx indication types
+/* rx indication types */
typedef enum {
- AD_TYPE_LACPDU = 1, // type lacpdu
- AD_TYPE_MARKER // type marker
+ AD_TYPE_LACPDU = 1, /* type lacpdu */
+ AD_TYPE_MARKER /* type marker */
} pdu_type_t;
-// rx marker indication types
+/* rx marker indication types */
typedef enum {
- AD_MARKER_INFORMATION_SUBTYPE = 1, // marker imformation subtype
- AD_MARKER_RESPONSE_SUBTYPE // marker response subtype
+ AD_MARKER_INFORMATION_SUBTYPE = 1, /* marker imformation subtype */
+ AD_MARKER_RESPONSE_SUBTYPE /* marker response subtype */
} bond_marker_subtype_t;
-// timers types(43.4.9 in the 802.3ad standard)
+/* timers types(43.4.9 in the 802.3ad standard) */
typedef enum {
AD_CURRENT_WHILE_TIMER,
AD_ACTOR_CHURN_TIMER,
@@ -105,35 +105,35 @@ typedef enum {
#pragma pack(1)
-// Link Aggregation Control Protocol(LACP) data unit structure(43.4.2.2 in the 802.3ad standard)
+/* Link Aggregation Control Protocol(LACP) data unit structure(43.4.2.2 in the 802.3ad standard) */
typedef struct lacpdu {
- u8 subtype; // = LACP(= 0x01)
+ u8 subtype; /* = LACP(= 0x01) */
u8 version_number;
- u8 tlv_type_actor_info; // = actor information(type/length/value)
- u8 actor_information_length; // = 20
+ u8 tlv_type_actor_info; /* = actor information(type/length/value) */
+ u8 actor_information_length; /* = 20 */
__be16 actor_system_priority;
struct mac_addr actor_system;
__be16 actor_key;
__be16 actor_port_priority;
__be16 actor_port;
u8 actor_state;
- u8 reserved_3_1[3]; // = 0
- u8 tlv_type_partner_info; // = partner information
- u8 partner_information_length; // = 20
+ u8 reserved_3_1[3]; /* = 0 */
+ u8 tlv_type_partner_info; /* = partner information */
+ u8 partner_information_length; /* = 20 */
__be16 partner_system_priority;
struct mac_addr partner_system;
__be16 partner_key;
__be16 partner_port_priority;
__be16 partner_port;
u8 partner_state;
- u8 reserved_3_2[3]; // = 0
- u8 tlv_type_collector_info; // = collector information
- u8 collector_information_length; // = 16
+ u8 reserved_3_2[3]; /* = 0 */
+ u8 tlv_type_collector_info; /* = collector information */
+ u8 collector_information_length;/* = 16 */
__be16 collector_max_delay;
u8 reserved_12[12];
- u8 tlv_type_terminator; // = terminator
- u8 terminator_length; // = 0
- u8 reserved_50[50]; // = 0
+ u8 tlv_type_terminator; /* = terminator */
+ u8 terminator_length; /* = 0 */
+ u8 reserved_50[50]; /* = 0 */
} __packed lacpdu_t;
typedef struct lacpdu_header {
@@ -141,20 +141,20 @@ typedef struct lacpdu_header {
struct lacpdu lacpdu;
} __packed lacpdu_header_t;
-// Marker Protocol Data Unit(PDU) structure(43.5.3.2 in the 802.3ad standard)
+/* Marker Protocol Data Unit(PDU) structure(43.5.3.2 in the 802.3ad standard) */
typedef struct bond_marker {
- u8 subtype; // = 0x02 (marker PDU)
- u8 version_number; // = 0x01
- u8 tlv_type; // = 0x01 (marker information)
- // = 0x02 (marker response information)
- u8 marker_length; // = 0x16
- u16 requester_port; // The number assigned to the port by the requester
- struct mac_addr requester_system; // The requester's system id
- u32 requester_transaction_id; // The transaction id allocated by the requester,
- u16 pad; // = 0
- u8 tlv_type_terminator; // = 0x00
- u8 terminator_length; // = 0x00
- u8 reserved_90[90]; // = 0
+ u8 subtype; /* = 0x02 (marker PDU) */
+ u8 version_number; /* = 0x01 */
+ u8 tlv_type; /* = 0x01 (marker information) */
+ /* = 0x02 (marker response information) */
+ u8 marker_length; /* = 0x16 */
+ u16 requester_port; /* The number assigned to the port by the requester */
+ struct mac_addr requester_system; /* The requester's system id */
+ u32 requester_transaction_id; /* The transaction id allocated by the requester, */
+ u16 pad; /* = 0 */
+ u8 tlv_type_terminator; /* = 0x00 */
+ u8 terminator_length; /* = 0x00 */
+ u8 reserved_90[90]; /* = 0 */
} __packed bond_marker_t;
typedef struct bond_marker_header {
@@ -173,7 +173,7 @@ struct port;
#pragma pack(8)
#endif
-// aggregator structure(43.4.5 in the 802.3ad standard)
+/* aggregator structure(43.4.5 in the 802.3ad standard) */
typedef struct aggregator {
struct mac_addr aggregator_mac_address;
u16 aggregator_identifier;
@@ -183,12 +183,12 @@ typedef struct aggregator {
struct mac_addr partner_system;
u16 partner_system_priority;
u16 partner_oper_aggregator_key;
- u16 receive_state; // BOOLEAN
- u16 transmit_state; // BOOLEAN
+ u16 receive_state; /* BOOLEAN */
+ u16 transmit_state; /* BOOLEAN */
struct port *lag_ports;
- // ****** PRIVATE PARAMETERS ******
- struct slave *slave; // pointer to the bond slave that this aggregator belongs to
- u16 is_active; // BOOLEAN. Indicates if this aggregator is active
+ /* ****** PRIVATE PARAMETERS ****** */
+ struct slave *slave; /* pointer to the bond slave that this aggregator belongs to */
+ u16 is_active; /* BOOLEAN. Indicates if this aggregator is active */
u16 num_of_ports;
} aggregator_t;
@@ -201,12 +201,12 @@ struct port_params {
u16 port_state;
};
-// port structure(43.4.6 in the 802.3ad standard)
+/* port structure(43.4.6 in the 802.3ad standard) */
typedef struct port {
u16 actor_port_number;
u16 actor_port_priority;
- struct mac_addr actor_system; // This parameter is added here although it is not specified in the standard, just for simplification
- u16 actor_system_priority; // This parameter is added here although it is not specified in the standard, just for simplification
+ struct mac_addr actor_system; /* This parameter is added here although it is not specified in the standard, just for simplification */
+ u16 actor_system_priority; /* This parameter is added here although it is not specified in the standard, just for simplification */
u16 actor_port_aggregator_identifier;
bool ntt;
u16 actor_admin_port_key;
@@ -219,24 +219,24 @@ typedef struct port {
bool is_enabled;
- // ****** PRIVATE PARAMETERS ******
- u16 sm_vars; // all state machines variables for this port
- rx_states_t sm_rx_state; // state machine rx state
- u16 sm_rx_timer_counter; // state machine rx timer counter
- periodic_states_t sm_periodic_state;// state machine periodic state
- u16 sm_periodic_timer_counter; // state machine periodic timer counter
- mux_states_t sm_mux_state; // state machine mux state
- u16 sm_mux_timer_counter; // state machine mux timer counter
- tx_states_t sm_tx_state; // state machine tx state
- u16 sm_tx_timer_counter; // state machine tx timer counter(allways on - enter to transmit state 3 time per second)
- struct slave *slave; // pointer to the bond slave that this port belongs to
- struct aggregator *aggregator; // pointer to an aggregator that this port related to
- struct port *next_port_in_aggregator; // Next port on the linked list of the parent aggregator
- u32 transaction_id; // continuous number for identification of Marker PDU's;
- struct lacpdu lacpdu; // the lacpdu that will be sent for this port
+ /* ****** PRIVATE PARAMETERS ****** */
+ u16 sm_vars; /* all state machines variables for this port */
+ rx_states_t sm_rx_state; /* state machine rx state */
+ u16 sm_rx_timer_counter; /* state machine rx timer counter */
+ periodic_states_t sm_periodic_state; /* state machine periodic state */
+ u16 sm_periodic_timer_counter; /* state machine periodic timer counter */
+ mux_states_t sm_mux_state; /* state machine mux state */
+ u16 sm_mux_timer_counter; /* state machine mux timer counter */
+ tx_states_t sm_tx_state; /* state machine tx state */
+ u16 sm_tx_timer_counter; /* state machine tx timer counter(allways on - enter to transmit state 3 time per second) */
+ struct slave *slave; /* pointer to the bond slave that this port belongs to */
+ struct aggregator *aggregator; /* pointer to an aggregator that this port related to */
+ struct port *next_port_in_aggregator; /* Next port on the linked list of the parent aggregator */
+ u32 transaction_id; /* continuous number for identification of Marker PDU's; */
+ struct lacpdu lacpdu; /* the lacpdu that will be sent for this port */
} port_t;
-// system structure
+/* system structure */
struct ad_system {
u16 sys_priority;
struct mac_addr sys_mac_addr;
@@ -246,27 +246,26 @@ struct ad_system {
#pragma pack()
#endif
-// ================= AD Exported structures to the main bonding code ==================
+/* ========== AD Exported structures to the main bonding code ========== */
#define BOND_AD_INFO(bond) ((bond)->ad_info)
#define SLAVE_AD_INFO(slave) ((slave)->ad_info)
struct ad_bond_info {
- struct ad_system system; /* 802.3ad system structure */
- u32 agg_select_timer; // Timer to select aggregator after all adapter's hand shakes
+ struct ad_system system; /* 802.3ad system structure */
+ u32 agg_select_timer; /* Timer to select aggregator after all adapter's hand shakes */
u16 aggregator_identifier;
};
struct ad_slave_info {
- struct aggregator aggregator; // 802.3ad aggregator structure
- struct port port; // 802.3ad port structure
- spinlock_t state_machine_lock; /* mutex state machines vs.
- incoming LACPDU */
+ struct aggregator aggregator; /* 802.3ad aggregator structure */
+ struct port port; /* 802.3ad port structure */
+ spinlock_t state_machine_lock; /* mutex state machines vs. incoming LACPDU */
u16 id;
};
-// ================= AD Exported functions to the main bonding code ==================
+/* ========== AD Exported functions to the main bonding code ========== */
void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution);
-void bond_3ad_bind_slave(struct slave *slave);
+void bond_3ad_bind_slave(struct slave *slave);
void bond_3ad_unbind_slave(struct slave *slave);
void bond_3ad_state_machine_handler(struct work_struct *);
void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout);
@@ -281,5 +280,5 @@ int bond_3ad_lacpdu_recv(const struct sk_buff *skb, struct bonding *bond,
struct slave *slave);
int bond_3ad_set_carrier(struct bonding *bond);
void bond_3ad_update_lacp_rate(struct bonding *bond);
-#endif //__BOND_3AD_H__
+#endif /* __BOND_3AD_H__ */
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index a2c4747..aaeeacf 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -93,9 +93,8 @@ static inline u8 _simple_hash(const u8 *hash_start, int hash_size)
int i;
u8 hash = 0;
- for (i = 0; i < hash_size; i++) {
+ for (i = 0; i < hash_size; i++)
hash ^= hash_start[i];
- }
return hash;
}
@@ -190,9 +189,8 @@ static int tlb_initialize(struct bonding *bond)
bond_info->tx_hashtbl = new_hashtbl;
- for (i = 0; i < TLB_HASH_TABLE_SIZE; i++) {
+ for (i = 0; i < TLB_HASH_TABLE_SIZE; i++)
tlb_init_table_entry(&bond_info->tx_hashtbl[i], 0);
- }
_unlock_tx_hashtbl_bh(bond);
@@ -264,9 +262,8 @@ static struct slave *__tlb_choose_channel(struct bonding *bond, u32 hash_index,
hash_table[hash_index].next = next_index;
hash_table[hash_index].prev = TLB_NULL_INDEX;
- if (next_index != TLB_NULL_INDEX) {
+ if (next_index != TLB_NULL_INDEX)
hash_table[next_index].prev = hash_index;
- }
slave_info->head = hash_index;
slave_info->load +=
@@ -274,9 +271,8 @@ static struct slave *__tlb_choose_channel(struct bonding *bond, u32 hash_index,
}
}
- if (assigned_slave) {
+ if (assigned_slave)
hash_table[hash_index].tx_bytes += skb_len;
- }
return assigned_slave;
}
@@ -329,7 +325,7 @@ static void rlb_update_entry_from_arp(struct bonding *bond, struct arp_pkt *arp)
_lock_rx_hashtbl_bh(bond);
- hash_index = _simple_hash((u8*)&(arp->ip_src), sizeof(arp->ip_src));
+ hash_index = _simple_hash((u8 *)&(arp->ip_src), sizeof(arp->ip_src));
client_info = &(bond_info->rx_hashtbl[hash_index]);
if ((client_info->assigned) &&
@@ -337,7 +333,7 @@ static void rlb_update_entry_from_arp(struct bonding *bond, struct arp_pkt *arp)
(client_info->ip_dst == arp->ip_src) &&
(!ether_addr_equal_64bits(client_info->mac_dst, arp->mac_src))) {
/* update the clients MAC address */
- memcpy(client_info->mac_dst, arp->mac_src, ETH_ALEN);
+ ether_addr_copy(client_info->mac_dst, arp->mac_src);
client_info->ntt = 1;
bond_info->rx_ntt = 1;
}
@@ -451,9 +447,8 @@ static struct slave *__rlb_next_rx_slave(struct bonding *bond)
*/
static void rlb_teach_disabled_mac_on_primary(struct bonding *bond, u8 addr[])
{
- if (!bond->curr_active_slave) {
+ if (!bond->curr_active_slave)
return;
- }
if (!bond->alb_info.primary_is_promisc) {
if (!dev_set_promiscuity(bond->curr_active_slave->dev, 1))
@@ -513,9 +508,8 @@ static void rlb_clear_slave(struct bonding *bond, struct slave *slave)
write_lock_bh(&bond->curr_slave_lock);
- if (slave != bond->curr_active_slave) {
+ if (slave != bond->curr_active_slave)
rlb_teach_disabled_mac_on_primary(bond, slave->dev->dev_addr);
- }
write_unlock_bh(&bond->curr_slave_lock);
}
@@ -524,9 +518,8 @@ static void rlb_update_client(struct rlb_client_info *client_info)
{
int i;
- if (!client_info->slave) {
+ if (!client_info->slave)
return;
- }
for (i = 0; i < RLB_ARP_BURST_SIZE; i++) {
struct sk_buff *skb;
@@ -574,9 +567,8 @@ static void rlb_update_rx_clients(struct bonding *bond)
client_info = &(bond_info->rx_hashtbl[hash_index]);
if (client_info->ntt) {
rlb_update_client(client_info);
- if (bond_info->rlb_update_retry_counter == 0) {
+ if (bond_info->rlb_update_retry_counter == 0)
client_info->ntt = 0;
- }
}
}
@@ -610,10 +602,10 @@ static void rlb_req_update_slave_clients(struct bonding *bond, struct slave *sla
}
}
- // update the team's flag only after the whole iteration
+ /* update the team's flag only after the whole iteration */
if (ntt) {
bond_info->rx_ntt = 1;
- //fasten the change
+ /* fasten the change */
bond_info->rlb_update_retry_counter = RLB_UPDATE_RETRY;
}
@@ -677,9 +669,9 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
/* the entry is already assigned to this client */
if (!ether_addr_equal_64bits(arp->mac_dst, mac_bcast)) {
/* update mac address from arp */
- memcpy(client_info->mac_dst, arp->mac_dst, ETH_ALEN);
+ ether_addr_copy(client_info->mac_dst, arp->mac_dst);
}
- memcpy(client_info->mac_src, arp->mac_src, ETH_ALEN);
+ ether_addr_copy(client_info->mac_src, arp->mac_src);
assigned_slave = client_info->slave;
if (assigned_slave) {
@@ -719,8 +711,8 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
* will be updated with clients actual unicast mac address
* upon receiving an arp reply.
*/
- memcpy(client_info->mac_dst, arp->mac_dst, ETH_ALEN);
- memcpy(client_info->mac_src, arp->mac_src, ETH_ALEN);
+ ether_addr_copy(client_info->mac_dst, arp->mac_dst);
+ ether_addr_copy(client_info->mac_src, arp->mac_src);
client_info->slave = assigned_slave;
if (!ether_addr_equal_64bits(client_info->mac_dst, mac_bcast)) {
@@ -770,9 +762,8 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
* rx channel
*/
tx_slave = rlb_choose_channel(skb, bond);
- if (tx_slave) {
- memcpy(arp->mac_src,tx_slave->dev->dev_addr, ETH_ALEN);
- }
+ if (tx_slave)
+ ether_addr_copy(arp->mac_src, tx_slave->dev->dev_addr);
pr_debug("Server sent ARP Reply packet\n");
} else if (arp->op_code == htons(ARPOP_REQUEST)) {
/* Create an entry in the rx_hashtbl for this client as a
@@ -824,9 +815,8 @@ static void rlb_rebalance(struct bonding *bond)
}
/* update the team's flag only after the whole iteration */
- if (ntt) {
+ if (ntt)
bond_info->rx_ntt = 1;
- }
_unlock_rx_hashtbl_bh(bond);
}
@@ -923,7 +913,7 @@ static void rlb_src_link(struct bonding *bond, u32 ip_src_hash, u32 ip_dst_hash)
static void rlb_purge_src_ip(struct bonding *bond, struct arp_pkt *arp)
{
struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
- u32 ip_src_hash = _simple_hash((u8*)&(arp->ip_src), sizeof(arp->ip_src));
+ u32 ip_src_hash = _simple_hash((u8 *)&(arp->ip_src), sizeof(arp->ip_src));
u32 index;
_lock_rx_hashtbl_bh(bond);
@@ -957,9 +947,8 @@ static int rlb_initialize(struct bonding *bond)
bond_info->rx_hashtbl_used_head = RLB_NULL_INDEX;
- for (i = 0; i < RLB_HASH_TABLE_SIZE; i++) {
+ for (i = 0; i < RLB_HASH_TABLE_SIZE; i++)
rlb_init_table_entry(bond_info->rx_hashtbl + i);
- }
_unlock_rx_hashtbl_bh(bond);
@@ -1014,8 +1003,8 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
char *data;
memset(&pkt, 0, size);
- memcpy(pkt.mac_dst, mac_addr, ETH_ALEN);
- memcpy(pkt.mac_src, mac_addr, ETH_ALEN);
+ ether_addr_copy(pkt.mac_dst, mac_addr);
+ ether_addr_copy(pkt.mac_src, mac_addr);
pkt.type = cpu_to_be16(ETH_P_LOOP);
skb = dev_alloc_skb(size);
@@ -1097,7 +1086,7 @@ static void alb_swap_mac_addr(struct slave *slave1, struct slave *slave2)
{
u8 tmp_mac_addr[ETH_ALEN];
- memcpy(tmp_mac_addr, slave1->dev->dev_addr, ETH_ALEN);
+ ether_addr_copy(tmp_mac_addr, slave1->dev->dev_addr);
alb_set_slave_mac_addr(slave1, slave2->dev->dev_addr);
alb_set_slave_mac_addr(slave2, tmp_mac_addr);
@@ -1254,9 +1243,9 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
if (free_mac_slave) {
alb_set_slave_mac_addr(slave, free_mac_slave->perm_hwaddr);
- pr_warning("%s: Warning: the hw address of slave %s is in use by the bond; giving it the hw address of %s\n",
- bond->dev->name, slave->dev->name,
- free_mac_slave->dev->name);
+ pr_warn("%s: Warning: the hw address of slave %s is in use by the bond; giving it the hw address of %s\n",
+ bond->dev->name, slave->dev->name,
+ free_mac_slave->dev->name);
} else if (has_bond_addr) {
pr_err("%s: Error: the hw address of slave %s is in use by the bond; couldn't find a slave with a free hw address to give it (this should not have happened)\n",
@@ -1294,12 +1283,12 @@ static int alb_set_mac_address(struct bonding *bond, void *addr)
bond_for_each_slave(bond, slave, iter) {
/* save net_device's current hw address */
- memcpy(tmp_addr, slave->dev->dev_addr, ETH_ALEN);
+ ether_addr_copy(tmp_addr, slave->dev->dev_addr);
res = dev_set_mac_address(slave->dev, addr);
/* restore net_device's hw address */
- memcpy(slave->dev->dev_addr, tmp_addr, ETH_ALEN);
+ ether_addr_copy(slave->dev->dev_addr, tmp_addr);
if (res)
goto unwind;
@@ -1315,9 +1304,9 @@ unwind:
bond_for_each_slave(bond, rollback_slave, iter) {
if (rollback_slave == slave)
break;
- memcpy(tmp_addr, rollback_slave->dev->dev_addr, ETH_ALEN);
+ ether_addr_copy(tmp_addr, rollback_slave->dev->dev_addr);
dev_set_mac_address(rollback_slave->dev, &sa);
- memcpy(rollback_slave->dev->dev_addr, tmp_addr, ETH_ALEN);
+ ether_addr_copy(rollback_slave->dev->dev_addr, tmp_addr);
}
return res;
@@ -1330,9 +1319,8 @@ int bond_alb_initialize(struct bonding *bond, int rlb_enabled)
int res;
res = tlb_initialize(bond);
- if (res) {
+ if (res)
return res;
- }
if (rlb_enabled) {
bond->alb_info.rlb_enabled = 1;
@@ -1355,9 +1343,8 @@ void bond_alb_deinitialize(struct bonding *bond)
tlb_deinitialize(bond);
- if (bond_info->rlb_enabled) {
+ if (bond_info->rlb_enabled)
rlb_deinitialize(bond);
- }
}
int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
@@ -1436,14 +1423,13 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
break;
}
- hash_start = (char*)eth_data->h_dest;
+ hash_start = (char *)eth_data->h_dest;
hash_size = ETH_ALEN;
break;
case ETH_P_ARP:
do_tx_balance = 0;
- if (bond_info->rlb_enabled) {
+ if (bond_info->rlb_enabled)
tx_slave = rlb_arp_xmit(skb, bond);
- }
break;
default:
do_tx_balance = 0;
@@ -1463,19 +1449,18 @@ int bond_alb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
if (tx_slave && SLAVE_IS_OK(tx_slave)) {
if (tx_slave != rcu_dereference(bond->curr_active_slave)) {
- memcpy(eth_data->h_source,
- tx_slave->dev->dev_addr,
- ETH_ALEN);
+ ether_addr_copy(eth_data->h_source,
+ tx_slave->dev->dev_addr);
}
bond_dev_queue_xmit(bond, skb, tx_slave->dev);
goto out;
- } else {
- if (tx_slave) {
- _lock_tx_hashtbl(bond);
- __tlb_clear_slave(bond, tx_slave, 0);
- _unlock_tx_hashtbl(bond);
- }
+ }
+
+ if (tx_slave) {
+ _lock_tx_hashtbl(bond);
+ __tlb_clear_slave(bond, tx_slave, 0);
+ _unlock_tx_hashtbl(bond);
}
/* no suitable interface, frame not sent */
@@ -1577,11 +1562,10 @@ void bond_alb_monitor(struct work_struct *work)
--bond_info->rlb_update_delay_counter;
} else {
rlb_update_rx_clients(bond);
- if (bond_info->rlb_update_retry_counter) {
+ if (bond_info->rlb_update_retry_counter)
--bond_info->rlb_update_retry_counter;
- } else {
+ else
bond_info->rx_ntt = 0;
- }
}
}
}
@@ -1598,23 +1582,20 @@ int bond_alb_init_slave(struct bonding *bond, struct slave *slave)
int res;
res = alb_set_slave_mac_addr(slave, slave->perm_hwaddr);
- if (res) {
+ if (res)
return res;
- }
res = alb_handle_addr_collision_on_attach(bond, slave);
- if (res) {
+ if (res)
return res;
- }
tlb_init_slave(slave);
/* order a rebalance ASAP */
bond->alb_info.tx_rebalance_counter = BOND_TLB_REBALANCE_TICKS;
- if (bond->alb_info.rlb_enabled) {
+ if (bond->alb_info.rlb_enabled)
bond->alb_info.rlb_rebalance = 1;
- }
return 0;
}
@@ -1645,9 +1626,8 @@ void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char
if (link == BOND_LINK_DOWN) {
tlb_clear_slave(bond, slave, 0);
- if (bond->alb_info.rlb_enabled) {
+ if (bond->alb_info.rlb_enabled)
rlb_clear_slave(bond, slave);
- }
} else if (link == BOND_LINK_UP) {
/* order a rebalance ASAP */
bond_info->tx_rebalance_counter = BOND_TLB_REBALANCE_TICKS;
@@ -1723,14 +1703,14 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
struct sockaddr sa;
u8 tmp_addr[ETH_ALEN];
- memcpy(tmp_addr, new_slave->dev->dev_addr, ETH_ALEN);
+ ether_addr_copy(tmp_addr, new_slave->dev->dev_addr);
memcpy(sa.sa_data, bond->dev->dev_addr, bond->dev->addr_len);
sa.sa_family = bond->dev->type;
/* we don't care if it can't change its mac, best effort */
dev_set_mac_address(new_slave->dev, &sa);
- memcpy(new_slave->dev->dev_addr, tmp_addr, ETH_ALEN);
+ ether_addr_copy(new_slave->dev->dev_addr, tmp_addr);
}
/* curr_active_slave must be set before calling alb_swap_mac_addr */
@@ -1759,14 +1739,12 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
struct slave *swap_slave;
int res;
- if (!is_valid_ether_addr(sa->sa_data)) {
+ if (!is_valid_ether_addr(sa->sa_data))
return -EADDRNOTAVAIL;
- }
res = alb_set_mac_address(bond, addr);
- if (res) {
+ if (res)
return res;
- }
memcpy(bond_dev->dev_addr, sa->sa_data, bond_dev->addr_len);
@@ -1774,9 +1752,8 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
* Otherwise we'll need to pass the new address to it and handle
* duplications.
*/
- if (!bond->curr_active_slave) {
+ if (!bond->curr_active_slave)
return 0;
- }
swap_slave = bond_slave_has_mac(bond, bond_dev->dev_addr);
@@ -1800,8 +1777,7 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
void bond_alb_clear_vlan(struct bonding *bond, unsigned short vlan_id)
{
- if (bond->alb_info.rlb_enabled) {
+ if (bond->alb_info.rlb_enabled)
rlb_clear_vlan(bond, vlan_id);
- }
}
diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c
index 5fc4c23..2d3f7fa 100644
--- a/drivers/net/bonding/bond_debugfs.c
+++ b/drivers/net/bonding/bond_debugfs.c
@@ -69,7 +69,7 @@ void bond_debug_register(struct bonding *bond)
debugfs_create_dir(bond->dev->name, bonding_debug_root);
if (!bond->debug_dir) {
- pr_warning("%s: Warning: failed to register to debugfs\n",
+ pr_warn("%s: Warning: failed to register to debugfs\n",
bond->dev->name);
return;
}
@@ -98,9 +98,8 @@ void bond_debug_reregister(struct bonding *bond)
if (d) {
bond->debug_dir = d;
} else {
- pr_warning("%s: Warning: failed to reregister, "
- "so just unregister old one\n",
- bond->dev->name);
+ pr_warn("%s: Warning: failed to reregister, so just unregister old one\n",
+ bond->dev->name);
bond_debug_unregister(bond);
}
}
@@ -110,8 +109,7 @@ void bond_create_debugfs(void)
bonding_debug_root = debugfs_create_dir("bonding", NULL);
if (!bonding_debug_root) {
- pr_warning("Warning: Cannot create bonding directory"
- " in debugfs\n");
+ pr_warn("Warning: Cannot create bonding directory in debugfs\n");
}
}
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index e5628fc..730d72c 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -673,12 +673,12 @@ static void bond_do_fail_over_mac(struct bonding *bond,
write_unlock_bh(&bond->curr_slave_lock);
if (old_active) {
- memcpy(tmp_mac, new_active->dev->dev_addr, ETH_ALEN);
- memcpy(saddr.sa_data, old_active->dev->dev_addr,
- ETH_ALEN);
+ ether_addr_copy(tmp_mac, new_active->dev->dev_addr);
+ ether_addr_copy(saddr.sa_data,
+ old_active->dev->dev_addr);
saddr.sa_family = new_active->dev->type;
} else {
- memcpy(saddr.sa_data, bond->dev->dev_addr, ETH_ALEN);
+ ether_addr_copy(saddr.sa_data, bond->dev->dev_addr);
saddr.sa_family = bond->dev->type;
}
@@ -692,7 +692,7 @@ static void bond_do_fail_over_mac(struct bonding *bond,
if (!old_active)
goto out;
- memcpy(saddr.sa_data, tmp_mac, ETH_ALEN);
+ ether_addr_copy(saddr.sa_data, tmp_mac);
saddr.sa_family = old_active->dev->type;
rv = dev_set_mac_address(old_active->dev, &saddr);
@@ -798,11 +798,11 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
return;
if (new_active) {
- new_active->jiffies = jiffies;
+ new_active->last_link_up = jiffies;
if (new_active->link == BOND_LINK_BACK) {
if (USES_PRIMARY(bond->params.mode)) {
- pr_info("%s: making interface %s the new active one %d ms earlier.\n",
+ pr_info("%s: making interface %s the new active one %d ms earlier\n",
bond->dev->name, new_active->dev->name,
(bond->params.updelay - new_active->delay) * bond->params.miimon);
}
@@ -817,7 +817,7 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP);
} else {
if (USES_PRIMARY(bond->params.mode)) {
- pr_info("%s: making interface %s the new active one.\n",
+ pr_info("%s: making interface %s the new active one\n",
bond->dev->name, new_active->dev->name);
}
}
@@ -910,7 +910,7 @@ void bond_select_active_slave(struct bonding *bond)
pr_info("%s: first active interface up!\n",
bond->dev->name);
} else {
- pr_info("%s: now running without any active interface !\n",
+ pr_info("%s: now running without any active interface!\n",
bond->dev->name);
}
}
@@ -946,14 +946,6 @@ static inline void slave_disable_netpoll(struct slave *slave)
slave->np = NULL;
__netpoll_free_async(np);
}
-static inline bool slave_dev_support_netpoll(struct net_device *slave_dev)
-{
- if (slave_dev->priv_flags & IFF_DISABLE_NETPOLL)
- return false;
- if (!slave_dev->netdev_ops->ndo_poll_controller)
- return false;
- return true;
-}
static void bond_poll_controller(struct net_device *bond_dev)
{
@@ -1119,9 +1111,6 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
slave = bond_slave_get_rcu(skb->dev);
bond = slave->bond;
- if (bond->params.arp_interval)
- slave->dev->last_rx = jiffies;
-
recv_probe = ACCESS_ONCE(bond->recv_probe);
if (recv_probe) {
ret = recv_probe(skb, bond, slave);
@@ -1146,7 +1135,7 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb)
kfree_skb(skb);
return RX_HANDLER_CONSUMED;
}
- memcpy(eth_hdr(skb)->h_dest, bond->dev->dev_addr, ETH_ALEN);
+ ether_addr_copy(eth_hdr(skb)->h_dest, bond->dev->dev_addr);
}
return ret;
@@ -1187,13 +1176,13 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
if (!bond->params.use_carrier &&
slave_dev->ethtool_ops->get_link == NULL &&
slave_ops->ndo_do_ioctl == NULL) {
- pr_warning("%s: Warning: no link monitoring support for %s\n",
- bond_dev->name, slave_dev->name);
+ pr_warn("%s: Warning: no link monitoring support for %s\n",
+ bond_dev->name, slave_dev->name);
}
/* already enslaved */
if (slave_dev->flags & IFF_SLAVE) {
- pr_debug("Error, Device was already enslaved\n");
+ pr_debug("Error: Device was already enslaved\n");
return -EBUSY;
}
@@ -1211,9 +1200,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
bond_dev->name, slave_dev->name, bond_dev->name);
return -EPERM;
} else {
- pr_warning("%s: Warning: enslaved VLAN challenged slave %s. Adding VLANs will be blocked as long as %s is part of bond %s\n",
- bond_dev->name, slave_dev->name,
- slave_dev->name, bond_dev->name);
+ pr_warn("%s: Warning: enslaved VLAN challenged slave %s. Adding VLANs will be blocked as long as %s is part of bond %s\n",
+ bond_dev->name, slave_dev->name,
+ slave_dev->name, bond_dev->name);
}
} else {
pr_debug("%s: ! NETIF_F_VLAN_CHALLENGED\n", slave_dev->name);
@@ -1226,7 +1215,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
* enslaving it; the old ifenslave will not.
*/
if ((slave_dev->flags & IFF_UP)) {
- pr_err("%s is up. This may be due to an out of date ifenslave.\n",
+ pr_err("%s is up - this may be due to an out of date ifenslave\n",
slave_dev->name);
res = -EPERM;
goto err_undo_flags;
@@ -1270,24 +1259,23 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
bond_dev);
}
} else if (bond_dev->type != slave_dev->type) {
- pr_err("%s ether type (%d) is different from other slaves (%d), can not enslave it.\n",
- slave_dev->name,
- slave_dev->type, bond_dev->type);
+ pr_err("%s ether type (%d) is different from other slaves (%d), can not enslave it\n",
+ slave_dev->name, slave_dev->type, bond_dev->type);
res = -EINVAL;
goto err_undo_flags;
}
if (slave_ops->ndo_set_mac_address == NULL) {
if (!bond_has_slaves(bond)) {
- pr_warn("%s: Warning: The first slave device specified does not support setting the MAC address.\n",
+ pr_warn("%s: Warning: The first slave device specified does not support setting the MAC address\n",
bond_dev->name);
if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) {
bond->params.fail_over_mac = BOND_FOM_ACTIVE;
- pr_warn("%s: Setting fail_over_mac to active for active-backup mode.\n",
+ pr_warn("%s: Setting fail_over_mac to active for active-backup mode\n",
bond_dev->name);
}
} else if (bond->params.fail_over_mac != BOND_FOM_ACTIVE) {
- pr_err("%s: Error: The slave device specified does not support setting the MAC address, but fail_over_mac is not set to active.\n",
+ pr_err("%s: Error: The slave device specified does not support setting the MAC address, but fail_over_mac is not set to active\n",
bond_dev->name);
res = -EOPNOTSUPP;
goto err_undo_flags;
@@ -1326,7 +1314,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
* that need it, and for restoring it upon release, and then
* set it to the master's address
*/
- memcpy(new_slave->perm_hwaddr, slave_dev->dev_addr, ETH_ALEN);
+ ether_addr_copy(new_slave->perm_hwaddr, slave_dev->dev_addr);
if (!bond->params.fail_over_mac ||
bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
@@ -1410,10 +1398,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
bond_update_speed_duplex(new_slave);
- new_slave->last_arp_rx = jiffies -
+ new_slave->last_rx = jiffies -
(msecs_to_jiffies(bond->params.arp_interval) + 1);
for (i = 0; i < BOND_MAX_ARP_TARGETS; i++)
- new_slave->target_last_arp_rx[i] = new_slave->last_arp_rx;
+ new_slave->target_last_arp_rx[i] = new_slave->last_rx;
if (bond->params.miimon && !bond->params.use_carrier) {
link_reporting = bond_check_dev_link(bond, slave_dev, 1);
@@ -1428,12 +1416,12 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
* supported); thus, we don't need to change
* the messages for netif_carrier.
*/
- pr_warning("%s: Warning: MII and ETHTOOL support not available for interface %s, and arp_interval/arp_ip_target module parameters not specified, thus bonding will not detect link failures! see bonding.txt for details.\n",
- bond_dev->name, slave_dev->name);
+ pr_warn("%s: Warning: MII and ETHTOOL support not available for interface %s, and arp_interval/arp_ip_target module parameters not specified, thus bonding will not detect link failures! see bonding.txt for details\n",
+ bond_dev->name, slave_dev->name);
} else if (link_reporting == -1) {
/* unable get link status using mii/ethtool */
- pr_warning("%s: Warning: can't get link status from interface %s; the network driver associated with this interface does not support MII or ETHTOOL link status reporting, thus miimon has no effect on this interface.\n",
- bond_dev->name, slave_dev->name);
+ pr_warn("%s: Warning: can't get link status from interface %s; the network driver associated with this interface does not support MII or ETHTOOL link status reporting, thus miimon has no effect on this interface\n",
+ bond_dev->name, slave_dev->name);
}
}
@@ -1457,10 +1445,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
}
if (new_slave->link != BOND_LINK_DOWN)
- new_slave->jiffies = jiffies;
+ new_slave->last_link_up = jiffies;
pr_debug("Initial state of slave_dev is BOND_LINK_%s\n",
- new_slave->link == BOND_LINK_DOWN ? "DOWN" :
- (new_slave->link == BOND_LINK_UP ? "UP" : "BACK"));
+ new_slave->link == BOND_LINK_DOWN ? "DOWN" :
+ (new_slave->link == BOND_LINK_UP ? "UP" : "BACK"));
if (USES_PRIMARY(bond->params.mode) && bond->params.primary[0]) {
/* if there is a primary slave, remember it */
@@ -1520,9 +1508,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
slave_dev->npinfo = bond->dev->npinfo;
if (slave_dev->npinfo) {
if (slave_enable_netpoll(new_slave)) {
- pr_info("Error, %s: master_dev is using netpoll, "
- "but new slave device does not support netpoll.\n",
- bond_dev->name);
+ pr_info("Error, %s: master_dev is using netpoll, but new slave device does not support netpoll\n",
+ bond_dev->name);
res = -EBUSY;
goto err_detach;
}
@@ -1560,10 +1547,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
unblock_netpoll_tx();
}
- pr_info("%s: enslaving %s as a%s interface with a%s link.\n",
+ pr_info("%s: Enslaving %s as %s interface with %s link\n",
bond_dev->name, slave_dev->name,
- bond_is_active_slave(new_slave) ? "n active" : " backup",
- new_slave->link != BOND_LINK_DOWN ? "n up" : " down");
+ bond_is_active_slave(new_slave) ? "an active" : "a backup",
+ new_slave->link != BOND_LINK_DOWN ? "an up" : "a down");
/* enslave is successful */
return 0;
@@ -1603,7 +1590,7 @@ err_restore_mac:
* MAC if this slave's MAC is in use by the bond, or at
* least print a warning.
*/
- memcpy(addr.sa_data, new_slave->perm_hwaddr, ETH_ALEN);
+ ether_addr_copy(addr.sa_data, new_slave->perm_hwaddr);
addr.sa_family = slave_dev->type;
dev_set_mac_address(slave_dev, &addr);
}
@@ -1648,7 +1635,7 @@ static int __bond_release_one(struct net_device *bond_dev,
/* slave is not a slave or master is not master of this slave */
if (!(slave_dev->flags & IFF_SLAVE) ||
!netdev_has_upper_dev(slave_dev, bond_dev)) {
- pr_err("%s: Error: cannot release %s.\n",
+ pr_err("%s: Error: cannot release %s\n",
bond_dev->name, slave_dev->name);
return -EINVAL;
}
@@ -1679,7 +1666,7 @@ static int __bond_release_one(struct net_device *bond_dev,
write_unlock_bh(&bond->lock);
- pr_info("%s: releasing %s interface %s\n",
+ pr_info("%s: Releasing %s interface %s\n",
bond_dev->name,
bond_is_active_slave(slave) ? "active" : "backup",
slave_dev->name);
@@ -1692,10 +1679,10 @@ static int __bond_release_one(struct net_device *bond_dev,
bond->params.mode != BOND_MODE_ACTIVEBACKUP)) {
if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) &&
bond_has_slaves(bond))
- pr_warn("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s. Set the HWaddr of %s to a different address to avoid conflicts.\n",
- bond_dev->name, slave_dev->name,
- slave->perm_hwaddr,
- bond_dev->name, slave_dev->name);
+ pr_warn("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s - set the HWaddr of %s to a different address to avoid conflicts\n",
+ bond_dev->name, slave_dev->name,
+ slave->perm_hwaddr,
+ bond_dev->name, slave_dev->name);
}
if (bond->primary_slave == slave)
@@ -1736,10 +1723,10 @@ static int __bond_release_one(struct net_device *bond_dev,
eth_hw_addr_random(bond_dev);
if (vlan_uses_dev(bond_dev)) {
- pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n",
- bond_dev->name, bond_dev->name);
- pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n",
- bond_dev->name);
+ pr_warn("%s: Warning: clearing HW address of %s while it still has VLANs\n",
+ bond_dev->name, bond_dev->name);
+ pr_warn("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs\n",
+ bond_dev->name);
}
}
@@ -1755,7 +1742,7 @@ static int __bond_release_one(struct net_device *bond_dev,
bond_compute_features(bond);
if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
(old_features & NETIF_F_VLAN_CHALLENGED))
- pr_info("%s: last VLAN challenged slave %s left bond %s. VLAN blocking is removed\n",
+ pr_info("%s: last VLAN challenged slave %s left bond %s - VLAN blocking is removed\n",
bond_dev->name, slave_dev->name, bond_dev->name);
/* must do this from outside any spinlocks */
@@ -1790,7 +1777,7 @@ static int __bond_release_one(struct net_device *bond_dev,
if (bond->params.fail_over_mac != BOND_FOM_ACTIVE ||
bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
/* restore original ("permanent") mac address */
- memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN);
+ ether_addr_copy(addr.sa_data, slave->perm_hwaddr);
addr.sa_family = slave_dev->type;
dev_set_mac_address(slave_dev, &addr);
}
@@ -1823,7 +1810,7 @@ static int bond_release_and_destroy(struct net_device *bond_dev,
ret = bond_release(bond_dev, slave_dev);
if (ret == 0 && !bond_has_slaves(bond)) {
bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
- pr_info("%s: destroying bond %s.\n",
+ pr_info("%s: Destroying bond %s\n",
bond_dev->name, bond_dev->name);
unregister_netdevice(bond_dev);
}
@@ -1837,9 +1824,7 @@ static int bond_info_query(struct net_device *bond_dev, struct ifbond *info)
info->bond_mode = bond->params.mode;
info->miimon = bond->params.miimon;
- read_lock(&bond->lock);
info->num_slaves = bond->slave_cnt;
- read_unlock(&bond->lock);
return 0;
}
@@ -1851,7 +1836,6 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in
int i = 0, res = -ENODEV;
struct slave *slave;
- read_lock(&bond->lock);
bond_for_each_slave(bond, slave, iter) {
if (i++ == (int)info->slave_id) {
res = 0;
@@ -1862,7 +1846,6 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in
break;
}
}
- read_unlock(&bond->lock);
return res;
}
@@ -1892,7 +1875,7 @@ static int bond_miimon_inspect(struct bonding *bond)
slave->link = BOND_LINK_FAIL;
slave->delay = bond->params.downdelay;
if (slave->delay) {
- pr_info("%s: link status down for %sinterface %s, disabling it in %d ms.\n",
+ pr_info("%s: link status down for %sinterface %s, disabling it in %d ms\n",
bond->dev->name,
(bond->params.mode ==
BOND_MODE_ACTIVEBACKUP) ?
@@ -1908,8 +1891,8 @@ static int bond_miimon_inspect(struct bonding *bond)
* recovered before downdelay expired
*/
slave->link = BOND_LINK_UP;
- slave->jiffies = jiffies;
- pr_info("%s: link status up again after %d ms for interface %s.\n",
+ slave->last_link_up = jiffies;
+ pr_info("%s: link status up again after %d ms for interface %s\n",
bond->dev->name,
(bond->params.downdelay - slave->delay) *
bond->params.miimon,
@@ -1934,7 +1917,7 @@ static int bond_miimon_inspect(struct bonding *bond)
slave->delay = bond->params.updelay;
if (slave->delay) {
- pr_info("%s: link status up for interface %s, enabling it in %d ms.\n",
+ pr_info("%s: link status up for interface %s, enabling it in %d ms\n",
bond->dev->name, slave->dev->name,
ignore_updelay ? 0 :
bond->params.updelay *
@@ -1944,7 +1927,7 @@ static int bond_miimon_inspect(struct bonding *bond)
case BOND_LINK_BACK:
if (!link_state) {
slave->link = BOND_LINK_DOWN;
- pr_info("%s: link status down again after %d ms for interface %s.\n",
+ pr_info("%s: link status down again after %d ms for interface %s\n",
bond->dev->name,
(bond->params.updelay - slave->delay) *
bond->params.miimon,
@@ -1983,7 +1966,7 @@ static void bond_miimon_commit(struct bonding *bond)
case BOND_LINK_UP:
slave->link = BOND_LINK_UP;
- slave->jiffies = jiffies;
+ slave->last_link_up = jiffies;
if (bond->params.mode == BOND_MODE_8023AD) {
/* prevent it from being the active one */
@@ -1996,7 +1979,7 @@ static void bond_miimon_commit(struct bonding *bond)
bond_set_backup_slave(slave);
}
- pr_info("%s: link status definitely up for interface %s, %u Mbps %s duplex.\n",
+ pr_info("%s: link status definitely up for interface %s, %u Mbps %s duplex\n",
bond->dev->name, slave->dev->name,
slave->speed == SPEED_UNKNOWN ? 0 : slave->speed,
slave->duplex ? "full" : "half");
@@ -2145,8 +2128,8 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op, __be32 dest_
{
struct sk_buff *skb;
- pr_debug("arp %d on slave %s: dst %pI4 src %pI4 vid %d\n", arp_op,
- slave_dev->name, &dest_ip, &src_ip, vlan_id);
+ pr_debug("arp %d on slave %s: dst %pI4 src %pI4 vid %d\n",
+ arp_op, slave_dev->name, &dest_ip, &src_ip, vlan_id);
skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip,
NULL, slave_dev->dev_addr, NULL);
@@ -2181,8 +2164,13 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
rt = ip_route_output(dev_net(bond->dev), targets[i], 0,
RTO_ONLINK, 0);
if (IS_ERR(rt)) {
- pr_debug("%s: no route to arp_ip_target %pI4\n",
- bond->dev->name, &targets[i]);
+ /* there's no route to target - try to send arp
+ * probe to generate any traffic (arp_validate=0)
+ */
+ if (bond->params.arp_validate && net_ratelimit())
+ pr_warn("%s: no route to arp_ip_target %pI4 and arp_validate is set\n",
+ bond->dev->name, &targets[i]);
+ bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i], 0, 0);
continue;
}
@@ -2260,7 +2248,7 @@ static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32
pr_debug("bva: sip %pI4 not found in targets\n", &sip);
return;
}
- slave->last_arp_rx = jiffies;
+ slave->last_rx = jiffies;
slave->target_last_arp_rx[i] = jiffies;
}
@@ -2268,17 +2256,19 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
struct slave *slave)
{
struct arphdr *arp = (struct arphdr *)skb->data;
+ struct slave *curr_active_slave;
unsigned char *arp_ptr;
__be32 sip, tip;
- int alen;
+ int alen, is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
- if (skb->protocol != __cpu_to_be16(ETH_P_ARP))
+ if (!slave_do_arp_validate(bond, slave)) {
+ if ((slave_do_arp_validate_only(bond, slave) && is_arp) ||
+ !slave_do_arp_validate_only(bond, slave))
+ slave->last_rx = jiffies;
return RX_HANDLER_ANOTHER;
-
- read_lock(&bond->lock);
-
- if (!slave_do_arp_validate(bond, slave))
- goto out_unlock;
+ } else if (!is_arp) {
+ return RX_HANDLER_ANOTHER;
+ }
alen = arp_hdr_len(bond->dev);
@@ -2312,6 +2302,8 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
bond->params.arp_validate, slave_do_arp_validate(bond, slave),
&sip, &tip);
+ curr_active_slave = rcu_dereference(bond->curr_active_slave);
+
/*
* Backup slaves won't see the ARP reply, but do come through
* here for each ARP probe (so we swap the sip/tip to validate
@@ -2325,15 +2317,15 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
* is done to avoid endless looping when we can't reach the
* arp_ip_target and fool ourselves with our own arp requests.
*/
+
if (bond_is_active_slave(slave))
bond_validate_arp(bond, slave, sip, tip);
- else if (bond->curr_active_slave &&
- time_after(slave_last_rx(bond, bond->curr_active_slave),
- bond->curr_active_slave->jiffies))
+ else if (curr_active_slave &&
+ time_after(slave_last_rx(bond, curr_active_slave),
+ curr_active_slave->last_link_up))
bond_validate_arp(bond, slave, tip, sip);
out_unlock:
- read_unlock(&bond->lock);
if (arp != (struct arphdr *)skb->data)
kfree(arp);
return RX_HANDLER_ANOTHER;
@@ -2376,9 +2368,9 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
oldcurrent = ACCESS_ONCE(bond->curr_active_slave);
/* see if any of the previous devices are up now (i.e. they have
* xmt and rcv traffic). the curr_active_slave does not come into
- * the picture unless it is null. also, slave->jiffies is not needed
- * here because we send an arp on each slave and give a slave as
- * long as it needs to get the tx/rx within the delta.
+ * the picture unless it is null. also, slave->last_link_up is not
+ * needed here because we send an arp on each slave and give a slave
+ * as long as it needs to get the tx/rx within the delta.
* TODO: what about up/down delay in arp mode? it wasn't here before
* so it can wait
*/
@@ -2387,7 +2379,7 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
if (slave->link != BOND_LINK_UP) {
if (bond_time_in_interval(bond, trans_start, 1) &&
- bond_time_in_interval(bond, slave->dev->last_rx, 1)) {
+ bond_time_in_interval(bond, slave->last_rx, 1)) {
slave->link = BOND_LINK_UP;
slave_state_changed = 1;
@@ -2398,7 +2390,7 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
* is closed.
*/
if (!oldcurrent) {
- pr_info("%s: link status definitely up for interface %s, ",
+ pr_info("%s: link status definitely up for interface %s\n",
bond->dev->name,
slave->dev->name);
do_failover = 1;
@@ -2416,7 +2408,7 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
* if we don't know our ip yet
*/
if (!bond_time_in_interval(bond, trans_start, 2) ||
- !bond_time_in_interval(bond, slave->dev->last_rx, 2)) {
+ !bond_time_in_interval(bond, slave->last_rx, 2)) {
slave->link = BOND_LINK_DOWN;
slave_state_changed = 1;
@@ -2424,9 +2416,8 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
if (slave->link_failure_count < UINT_MAX)
slave->link_failure_count++;
- pr_info("%s: interface %s is now down.\n",
- bond->dev->name,
- slave->dev->name);
+ pr_info("%s: interface %s is now down\n",
+ bond->dev->name, slave->dev->name);
if (slave == oldcurrent)
do_failover = 1;
@@ -2505,7 +2496,7 @@ static int bond_ab_arp_inspect(struct bonding *bond)
* active. This avoids bouncing, as the last receive
* times need a full ARP monitor cycle to be updated.
*/
- if (bond_time_in_interval(bond, slave->jiffies, 2))
+ if (bond_time_in_interval(bond, slave->last_link_up, 2))
continue;
/*
@@ -2576,7 +2567,7 @@ static void bond_ab_arp_commit(struct bonding *bond)
bond->current_arp_slave = NULL;
}
- pr_info("%s: link status definitely up for interface %s.\n",
+ pr_info("%s: link status definitely up for interface %s\n",
bond->dev->name, slave->dev->name);
if (!bond->curr_active_slave ||
@@ -2682,7 +2673,7 @@ static bool bond_ab_arp_probe(struct bonding *bond)
bond_set_slave_inactive_flags(slave,
BOND_SLAVE_NOTIFY_LATER);
- pr_info("%s: backup interface %s is now down.\n",
+ pr_info("%s: backup interface %s is now down\n",
bond->dev->name, slave->dev->name);
}
if (slave == curr_arp_slave)
@@ -2698,7 +2689,7 @@ static bool bond_ab_arp_probe(struct bonding *bond)
new_slave->link = BOND_LINK_BACK;
bond_set_slave_active_flags(new_slave, BOND_SLAVE_NOTIFY_LATER);
bond_arp_send_all(bond, new_slave);
- new_slave->jiffies = jiffies;
+ new_slave->last_link_up = jiffies;
rcu_assign_pointer(bond->current_arp_slave, new_slave);
check_state:
@@ -2879,9 +2870,9 @@ static int bond_slave_netdev_event(unsigned long event,
break;
}
- pr_info("%s: Primary slave changed to %s, reselecting active slave.\n",
- bond->dev->name, bond->primary_slave ? slave_dev->name :
- "none");
+ pr_info("%s: Primary slave changed to %s, reselecting active slave\n",
+ bond->dev->name,
+ bond->primary_slave ? slave_dev->name : "none");
block_netpoll_tx();
write_lock_bh(&bond->curr_slave_lock);
@@ -2917,8 +2908,7 @@ static int bond_netdev_event(struct notifier_block *this,
struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
pr_debug("event_dev: %s, event: %lx\n",
- event_dev ? event_dev->name : "None",
- event);
+ event_dev ? event_dev->name : "None", event);
if (!(event_dev->priv_flags & IFF_BONDING))
return NOTIFY_DONE;
@@ -3087,8 +3077,7 @@ static int bond_open(struct net_device *bond_dev)
if (bond->params.arp_interval) { /* arp interval, in milliseconds. */
queue_delayed_work(bond->wq, &bond->arp_work, 0);
- if (bond->params.arp_validate)
- bond->recv_probe = bond_arp_rcv;
+ bond->recv_probe = bond_arp_rcv;
}
if (bond->params.mode == BOND_MODE_8023AD) {
@@ -3375,8 +3364,8 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
struct list_head *iter;
int res = 0;
- pr_debug("bond=%p, name=%s, new_mtu=%d\n", bond,
- (bond_dev ? bond_dev->name : "None"), new_mtu);
+ pr_debug("bond=%p, name=%s, new_mtu=%d\n",
+ bond, bond_dev ? bond_dev->name : "None", new_mtu);
/* Can't hold bond->lock with bh disabled here since
* some base drivers panic. On the other hand we can't
@@ -3395,8 +3384,7 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
bond_for_each_slave(bond, slave, iter) {
pr_debug("s %p c_m %p\n",
- slave,
- slave->dev->netdev_ops->ndo_change_mtu);
+ slave, slave->dev->netdev_ops->ndo_change_mtu);
res = dev_set_mtu(slave->dev, new_mtu);
@@ -3484,15 +3472,7 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
*/
bond_for_each_slave(bond, slave, iter) {
- const struct net_device_ops *slave_ops = slave->dev->netdev_ops;
pr_debug("slave %p %s\n", slave, slave->dev->name);
-
- if (slave_ops->ndo_set_mac_address == NULL) {
- res = -EOPNOTSUPP;
- pr_debug("EOPNOTSUPP %s\n", slave->dev->name);
- goto unwind;
- }
-
res = dev_set_mac_address(slave->dev, addr);
if (res) {
/* TODO: consider downing the slave
@@ -3958,7 +3938,7 @@ static void bond_uninit(struct net_device *bond_dev)
/* Release the bonded slaves */
bond_for_each_slave(bond, slave, iter)
__bond_release_one(bond_dev, slave->dev, true);
- pr_info("%s: released all slaves\n", bond_dev->name);
+ pr_info("%s: Released all slaves\n", bond_dev->name);
list_del(&bond->bond_list);
@@ -4036,7 +4016,7 @@ static int bond_check_params(struct bond_params *params)
if ((bond_mode != BOND_MODE_XOR) &&
(bond_mode != BOND_MODE_8023AD)) {
pr_info("xmit_hash_policy param is irrelevant in mode %s\n",
- bond_mode_name(bond_mode));
+ bond_mode_name(bond_mode));
} else {
bond_opt_initstr(&newval, xmit_hash_policy);
valptr = bond_opt_parse(bond_opt_get(BOND_OPT_XMIT_HASH),
@@ -4077,74 +4057,71 @@ static int bond_check_params(struct bond_params *params)
}
params->ad_select = valptr->value;
if (bond_mode != BOND_MODE_8023AD)
- pr_warning("ad_select param only affects 802.3ad mode\n");
+ pr_warn("ad_select param only affects 802.3ad mode\n");
} else {
params->ad_select = BOND_AD_STABLE;
}
if (max_bonds < 0) {
- pr_warning("Warning: max_bonds (%d) not in range %d-%d, so it was reset to BOND_DEFAULT_MAX_BONDS (%d)\n",
- max_bonds, 0, INT_MAX, BOND_DEFAULT_MAX_BONDS);
+ pr_warn("Warning: max_bonds (%d) not in range %d-%d, so it was reset to BOND_DEFAULT_MAX_BONDS (%d)\n",
+ max_bonds, 0, INT_MAX, BOND_DEFAULT_MAX_BONDS);
max_bonds = BOND_DEFAULT_MAX_BONDS;
}
if (miimon < 0) {
- pr_warning("Warning: miimon module parameter (%d), not in range 0-%d, so it was reset to 0\n",
- miimon, INT_MAX);
+ pr_warn("Warning: miimon module parameter (%d), not in range 0-%d, so it was reset to 0\n",
+ miimon, INT_MAX);
miimon = 0;
}
if (updelay < 0) {
- pr_warning("Warning: updelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
- updelay, INT_MAX);
+ pr_warn("Warning: updelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
+ updelay, INT_MAX);
updelay = 0;
}
if (downdelay < 0) {
- pr_warning("Warning: downdelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
- downdelay, INT_MAX);
+ pr_warn("Warning: downdelay module parameter (%d), not in range 0-%d, so it was reset to 0\n",
+ downdelay, INT_MAX);
downdelay = 0;
}
if ((use_carrier != 0) && (use_carrier != 1)) {
- pr_warning("Warning: use_carrier module parameter (%d), not of valid value (0/1), so it was set to 1\n",
- use_carrier);
+ pr_warn("Warning: use_carrier module parameter (%d), not of valid value (0/1), so it was set to 1\n",
+ use_carrier);
use_carrier = 1;
}
if (num_peer_notif < 0 || num_peer_notif > 255) {
- pr_warning("Warning: num_grat_arp/num_unsol_na (%d) not in range 0-255 so it was reset to 1\n",
- num_peer_notif);
+ pr_warn("Warning: num_grat_arp/num_unsol_na (%d) not in range 0-255 so it was reset to 1\n",
+ num_peer_notif);
num_peer_notif = 1;
}
/* reset values for 802.3ad/TLB/ALB */
if (BOND_NO_USES_ARP(bond_mode)) {
if (!miimon) {
- pr_warning("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n");
- pr_warning("Forcing miimon to 100msec\n");
+ pr_warn("Warning: miimon must be specified, otherwise bonding will not detect link failure, speed and duplex which are essential for 802.3ad operation\n");
+ pr_warn("Forcing miimon to 100msec\n");
miimon = BOND_DEFAULT_MIIMON;
}
}
if (tx_queues < 1 || tx_queues > 255) {
- pr_warning("Warning: tx_queues (%d) should be between "
- "1 and 255, resetting to %d\n",
- tx_queues, BOND_DEFAULT_TX_QUEUES);
+ pr_warn("Warning: tx_queues (%d) should be between 1 and 255, resetting to %d\n",
+ tx_queues, BOND_DEFAULT_TX_QUEUES);
tx_queues = BOND_DEFAULT_TX_QUEUES;
}
if ((all_slaves_active != 0) && (all_slaves_active != 1)) {
- pr_warning("Warning: all_slaves_active module parameter (%d), "
- "not of valid value (0/1), so it was set to "
- "0\n", all_slaves_active);
+ pr_warn("Warning: all_slaves_active module parameter (%d), not of valid value (0/1), so it was set to 0\n",
+ all_slaves_active);
all_slaves_active = 0;
}
if (resend_igmp < 0 || resend_igmp > 255) {
- pr_warning("Warning: resend_igmp (%d) should be between "
- "0 and 255, resetting to %d\n",
- resend_igmp, BOND_DEFAULT_RESEND_IGMP);
+ pr_warn("Warning: resend_igmp (%d) should be between 0 and 255, resetting to %d\n",
+ resend_igmp, BOND_DEFAULT_RESEND_IGMP);
resend_igmp = BOND_DEFAULT_RESEND_IGMP;
}
@@ -4165,37 +4142,36 @@ static int bond_check_params(struct bond_params *params)
/* just warn the user the up/down delay will have
* no effect since miimon is zero...
*/
- pr_warning("Warning: miimon module parameter not set and updelay (%d) or downdelay (%d) module parameter is set; updelay and downdelay have no effect unless miimon is set\n",
- updelay, downdelay);
+ pr_warn("Warning: miimon module parameter not set and updelay (%d) or downdelay (%d) module parameter is set; updelay and downdelay have no effect unless miimon is set\n",
+ updelay, downdelay);
}
} else {
/* don't allow arp monitoring */
if (arp_interval) {
- pr_warning("Warning: miimon (%d) and arp_interval (%d) can't be used simultaneously, disabling ARP monitoring\n",
- miimon, arp_interval);
+ pr_warn("Warning: miimon (%d) and arp_interval (%d) can't be used simultaneously, disabling ARP monitoring\n",
+ miimon, arp_interval);
arp_interval = 0;
}
if ((updelay % miimon) != 0) {
- pr_warning("Warning: updelay (%d) is not a multiple of miimon (%d), updelay rounded to %d ms\n",
- updelay, miimon,
- (updelay / miimon) * miimon);
+ pr_warn("Warning: updelay (%d) is not a multiple of miimon (%d), updelay rounded to %d ms\n",
+ updelay, miimon, (updelay / miimon) * miimon);
}
updelay /= miimon;
if ((downdelay % miimon) != 0) {
- pr_warning("Warning: downdelay (%d) is not a multiple of miimon (%d), downdelay rounded to %d ms\n",
- downdelay, miimon,
- (downdelay / miimon) * miimon);
+ pr_warn("Warning: downdelay (%d) is not a multiple of miimon (%d), downdelay rounded to %d ms\n",
+ downdelay, miimon,
+ (downdelay / miimon) * miimon);
}
downdelay /= miimon;
}
if (arp_interval < 0) {
- pr_warning("Warning: arp_interval module parameter (%d) , not in range 0-%d, so it was reset to 0\n",
- arp_interval, INT_MAX);
+ pr_warn("Warning: arp_interval module parameter (%d), not in range 0-%d, so it was reset to 0\n",
+ arp_interval, INT_MAX);
arp_interval = 0;
}
@@ -4206,30 +4182,26 @@ static int bond_check_params(struct bond_params *params)
__be32 ip;
if (!in4_pton(arp_ip_target[i], -1, (u8 *)&ip, -1, NULL) ||
IS_IP_TARGET_UNUSABLE_ADDRESS(ip)) {
- pr_warning("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n",
- arp_ip_target[i]);
+ pr_warn("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n",
+ arp_ip_target[i]);
arp_interval = 0;
} else {
if (bond_get_targets_ip(arp_target, ip) == -1)
arp_target[arp_ip_count++] = ip;
else
- pr_warning("Warning: duplicate address %pI4 in arp_ip_target, skipping\n",
- &ip);
+ pr_warn("Warning: duplicate address %pI4 in arp_ip_target, skipping\n",
+ &ip);
}
}
if (arp_interval && !arp_ip_count) {
/* don't allow arping if no arp_ip_target given... */
- pr_warning("Warning: arp_interval module parameter (%d) specified without providing an arp_ip_target parameter, arp_interval was reset to 0\n",
- arp_interval);
+ pr_warn("Warning: arp_interval module parameter (%d) specified without providing an arp_ip_target parameter, arp_interval was reset to 0\n",
+ arp_interval);
arp_interval = 0;
}
if (arp_validate) {
- if (bond_mode != BOND_MODE_ACTIVEBACKUP) {
- pr_err("arp_validate only supported in active-backup mode\n");
- return -EINVAL;
- }
if (!arp_interval) {
pr_err("arp_validate requires arp_interval\n");
return -EINVAL;
@@ -4271,23 +4243,23 @@ static int bond_check_params(struct bond_params *params)
arp_interval, valptr->string, arp_ip_count);
for (i = 0; i < arp_ip_count; i++)
- pr_info(" %s", arp_ip_target[i]);
+ pr_cont(" %s", arp_ip_target[i]);
- pr_info("\n");
+ pr_cont("\n");
} else if (max_bonds) {
/* miimon and arp_interval not set, we need one so things
* work as expected, see bonding.txt for details
*/
- pr_debug("Warning: either miimon or arp_interval and arp_ip_target module parameters must be specified, otherwise bonding will not detect link failures! see bonding.txt for details.\n");
+ pr_debug("Warning: either miimon or arp_interval and arp_ip_target module parameters must be specified, otherwise bonding will not detect link failures! see bonding.txt for details\n");
}
if (primary && !USES_PRIMARY(bond_mode)) {
/* currently, using a primary only makes sense
* in active backup, TLB or ALB modes
*/
- pr_warning("Warning: %s primary device specified but has no effect in %s mode\n",
- primary, bond_mode_name(bond_mode));
+ pr_warn("Warning: %s primary device specified but has no effect in %s mode\n",
+ primary, bond_mode_name(bond_mode));
primary = NULL;
}
@@ -4316,14 +4288,14 @@ static int bond_check_params(struct bond_params *params)
}
fail_over_mac_value = valptr->value;
if (bond_mode != BOND_MODE_ACTIVEBACKUP)
- pr_warning("Warning: fail_over_mac only affects active-backup mode.\n");
+ pr_warn("Warning: fail_over_mac only affects active-backup mode\n");
} else {
fail_over_mac_value = BOND_FOM_NONE;
}
if (lp_interval == 0) {
- pr_warning("Warning: ip_interval must be between 1 and %d, so it was reset to %d\n",
- INT_MAX, BOND_ALB_DEFAULT_LP_INTERVAL);
+ pr_warn("Warning: ip_interval must be between 1 and %d, so it was reset to %d\n",
+ INT_MAX, BOND_ALB_DEFAULT_LP_INTERVAL);
lp_interval = BOND_ALB_DEFAULT_LP_INTERVAL;
}
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index 70651f8..20659b1 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -181,7 +181,7 @@ static int bond_changelink(struct net_device *bond_dev,
int arp_interval = nla_get_u32(data[IFLA_BOND_ARP_INTERVAL]);
if (arp_interval && miimon) {
- pr_err("%s: ARP monitoring cannot be used with MII monitoring.\n",
+ pr_err("%s: ARP monitoring cannot be used with MII monitoring\n",
bond->dev->name);
return -EINVAL;
}
@@ -207,7 +207,7 @@ static int bond_changelink(struct net_device *bond_dev,
i++;
}
if (i == 0 && bond->params.arp_interval)
- pr_warn("%s: removing last arp target with arp_interval on\n",
+ pr_warn("%s: Removing last arp target with arp_interval on\n",
bond->dev->name);
if (err)
return err;
@@ -216,7 +216,7 @@ static int bond_changelink(struct net_device *bond_dev,
int arp_validate = nla_get_u32(data[IFLA_BOND_ARP_VALIDATE]);
if (arp_validate && miimon) {
- pr_err("%s: ARP validating cannot be used with MII monitoring.\n",
+ pr_err("%s: ARP validating cannot be used with MII monitoring\n",
bond->dev->name);
return -EINVAL;
}
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index c378784..23f3655 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -47,11 +47,14 @@ static struct bond_opt_value bond_xmit_hashtype_tbl[] = {
};
static struct bond_opt_value bond_arp_validate_tbl[] = {
- { "none", BOND_ARP_VALIDATE_NONE, BOND_VALFLAG_DEFAULT},
- { "active", BOND_ARP_VALIDATE_ACTIVE, 0},
- { "backup", BOND_ARP_VALIDATE_BACKUP, 0},
- { "all", BOND_ARP_VALIDATE_ALL, 0},
- { NULL, -1, 0},
+ { "none", BOND_ARP_VALIDATE_NONE, BOND_VALFLAG_DEFAULT},
+ { "active", BOND_ARP_VALIDATE_ACTIVE, 0},
+ { "backup", BOND_ARP_VALIDATE_BACKUP, 0},
+ { "all", BOND_ARP_VALIDATE_ALL, 0},
+ { "filter", BOND_ARP_FILTER, 0},
+ { "filter_active", BOND_ARP_FILTER_ACTIVE, 0},
+ { "filter_backup", BOND_ARP_FILTER_BACKUP, 0},
+ { NULL, -1, 0},
};
static struct bond_opt_value bond_arp_all_targets_tbl[] = {
@@ -151,7 +154,8 @@ static struct bond_option bond_opts[] = {
.id = BOND_OPT_ARP_VALIDATE,
.name = "arp_validate",
.desc = "validate src/dst of ARP probes",
- .unsuppmodes = BOND_MODE_ALL_EX(BIT(BOND_MODE_ACTIVEBACKUP)),
+ .unsuppmodes = BIT(BOND_MODE_8023AD) | BIT(BOND_MODE_TLB) |
+ BIT(BOND_MODE_ALB),
.values = bond_arp_validate_tbl,
.set = bond_option_arp_validate_set
},
@@ -473,10 +477,10 @@ static void bond_opt_error_interpret(struct bonding *bond,
p = strchr(val->string, '\n');
if (p)
*p = '\0';
- pr_err("%s: option %s: invalid value (%s).\n",
+ pr_err("%s: option %s: invalid value (%s)\n",
bond->dev->name, opt->name, val->string);
} else {
- pr_err("%s: option %s: invalid value (%llu).\n",
+ pr_err("%s: option %s: invalid value (%llu)\n",
bond->dev->name, opt->name, val->value);
}
}
@@ -484,7 +488,7 @@ static void bond_opt_error_interpret(struct bonding *bond,
maxval = bond_opt_get_flags(opt, BOND_VALFLAG_MAX);
if (!maxval)
break;
- pr_err("%s: option %s: allowed values %llu - %llu.\n",
+ pr_err("%s: option %s: allowed values %llu - %llu\n",
bond->dev->name, opt->name, minval ? minval->value : 0,
maxval->value);
break;
@@ -492,11 +496,11 @@ static void bond_opt_error_interpret(struct bonding *bond,
bond_opt_dep_print(bond, opt);
break;
case -ENOTEMPTY:
- pr_err("%s: option %s: unable to set because the bond device has slaves.\n",
+ pr_err("%s: option %s: unable to set because the bond device has slaves\n",
bond->dev->name, opt->name);
break;
case -EBUSY:
- pr_err("%s: option %s: unable to set because the bond device is up.\n",
+ pr_err("%s: option %s: unable to set because the bond device is up\n",
bond->dev->name, opt->name);
break;
default:
@@ -589,7 +593,7 @@ int bond_option_mode_set(struct bonding *bond, struct bond_opt_value *newval)
bond->params.arp_interval = 0;
/* set miimon to default value */
bond->params.miimon = BOND_DEFAULT_MIIMON;
- pr_info("%s: Setting MII monitoring interval to %d.\n",
+ pr_info("%s: Setting MII monitoring interval to %d\n",
bond->dev->name, bond->params.miimon);
}
@@ -636,13 +640,13 @@ int bond_option_active_slave_set(struct bonding *bond,
if (slave_dev) {
if (!netif_is_bond_slave(slave_dev)) {
- pr_err("Device %s is not bonding slave.\n",
+ pr_err("Device %s is not bonding slave\n",
slave_dev->name);
return -EINVAL;
}
if (bond->dev != netdev_master_upper_dev_get(slave_dev)) {
- pr_err("%s: Device %s is not our slave.\n",
+ pr_err("%s: Device %s is not our slave\n",
bond->dev->name, slave_dev->name);
return -EINVAL;
}
@@ -653,8 +657,7 @@ int bond_option_active_slave_set(struct bonding *bond,
/* check to see if we are clearing active */
if (!slave_dev) {
- pr_info("%s: Clearing current active slave.\n",
- bond->dev->name);
+ pr_info("%s: Clearing current active slave\n", bond->dev->name);
rcu_assign_pointer(bond->curr_active_slave, NULL);
bond_select_active_slave(bond);
} else {
@@ -665,16 +668,16 @@ int bond_option_active_slave_set(struct bonding *bond,
if (new_active == old_active) {
/* do nothing */
- pr_info("%s: %s is already the current active slave.\n",
+ pr_info("%s: %s is already the current active slave\n",
bond->dev->name, new_active->dev->name);
} else {
if (old_active && (new_active->link == BOND_LINK_UP) &&
IS_UP(new_active->dev)) {
- pr_info("%s: Setting %s as active slave.\n",
+ pr_info("%s: Setting %s as active slave\n",
bond->dev->name, new_active->dev->name);
bond_change_active_slave(bond, new_active);
} else {
- pr_err("%s: Could not set %s as active slave; either %s is down or the link is down.\n",
+ pr_err("%s: Could not set %s as active slave; either %s is down or the link is down\n",
bond->dev->name, new_active->dev->name,
new_active->dev->name);
ret = -EINVAL;
@@ -690,19 +693,19 @@ int bond_option_active_slave_set(struct bonding *bond,
int bond_option_miimon_set(struct bonding *bond, struct bond_opt_value *newval)
{
- pr_info("%s: Setting MII monitoring interval to %llu.\n",
+ pr_info("%s: Setting MII monitoring interval to %llu\n",
bond->dev->name, newval->value);
bond->params.miimon = newval->value;
if (bond->params.updelay)
- pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value.\n",
+ pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value\n",
bond->dev->name,
bond->params.updelay * bond->params.miimon);
if (bond->params.downdelay)
- pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value.\n",
+ pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value\n",
bond->dev->name,
bond->params.downdelay * bond->params.miimon);
if (newval->value && bond->params.arp_interval) {
- pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n",
+ pr_info("%s: MII monitoring cannot be used with ARP monitoring - disabling ARP monitoring...\n",
bond->dev->name);
bond->params.arp_interval = 0;
if (bond->params.arp_validate)
@@ -742,9 +745,8 @@ int bond_option_updelay_set(struct bonding *bond, struct bond_opt_value *newval)
bond->params.miimon);
}
bond->params.updelay = value / bond->params.miimon;
- pr_info("%s: Setting up delay to %d.\n",
- bond->dev->name,
- bond->params.updelay * bond->params.miimon);
+ pr_info("%s: Setting up delay to %d\n",
+ bond->dev->name, bond->params.updelay * bond->params.miimon);
return 0;
}
@@ -767,9 +769,8 @@ int bond_option_downdelay_set(struct bonding *bond,
bond->params.miimon);
}
bond->params.downdelay = value / bond->params.miimon;
- pr_info("%s: Setting down delay to %d.\n",
- bond->dev->name,
- bond->params.downdelay * bond->params.miimon);
+ pr_info("%s: Setting down delay to %d\n",
+ bond->dev->name, bond->params.downdelay * bond->params.miimon);
return 0;
}
@@ -777,7 +778,7 @@ int bond_option_downdelay_set(struct bonding *bond,
int bond_option_use_carrier_set(struct bonding *bond,
struct bond_opt_value *newval)
{
- pr_info("%s: Setting use_carrier to %llu.\n",
+ pr_info("%s: Setting use_carrier to %llu\n",
bond->dev->name, newval->value);
bond->params.use_carrier = newval->value;
@@ -787,17 +788,17 @@ int bond_option_use_carrier_set(struct bonding *bond,
int bond_option_arp_interval_set(struct bonding *bond,
struct bond_opt_value *newval)
{
- pr_info("%s: Setting ARP monitoring interval to %llu.\n",
+ pr_info("%s: Setting ARP monitoring interval to %llu\n",
bond->dev->name, newval->value);
bond->params.arp_interval = newval->value;
if (newval->value) {
if (bond->params.miimon) {
- pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n",
+ pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring\n",
bond->dev->name, bond->dev->name);
bond->params.miimon = 0;
}
if (!bond->params.arp_targets[0])
- pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n",
+ pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified\n",
bond->dev->name);
}
if (bond->dev->flags & IFF_UP) {
@@ -812,8 +813,7 @@ int bond_option_arp_interval_set(struct bonding *bond,
cancel_delayed_work_sync(&bond->arp_work);
} else {
/* arp_validate can be set only in active-backup mode */
- if (bond->params.arp_validate)
- bond->recv_probe = bond_arp_rcv;
+ bond->recv_probe = bond_arp_rcv;
cancel_delayed_work_sync(&bond->mii_work);
queue_delayed_work(bond->wq, &bond->arp_work, 0);
}
@@ -856,12 +856,11 @@ static int _bond_option_arp_ip_target_add(struct bonding *bond, __be32 target)
ind = bond_get_targets_ip(targets, 0); /* first free slot */
if (ind == -1) {
- pr_err("%s: ARP target table is full!\n",
- bond->dev->name);
+ pr_err("%s: ARP target table is full!\n", bond->dev->name);
return -EINVAL;
}
- pr_info("%s: adding ARP target %pI4.\n", bond->dev->name, &target);
+ pr_info("%s: Adding ARP target %pI4\n", bond->dev->name, &target);
_bond_options_arp_ip_target_set(bond, ind, target, jiffies);
@@ -896,17 +895,16 @@ int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target)
ind = bond_get_targets_ip(targets, target);
if (ind == -1) {
- pr_err("%s: unable to remove nonexistent ARP target %pI4.\n",
+ pr_err("%s: unable to remove nonexistent ARP target %pI4\n",
bond->dev->name, &target);
return -EINVAL;
}
if (ind == 0 && !targets[1] && bond->params.arp_interval)
- pr_warn("%s: removing last arp target with arp_interval on\n",
+ pr_warn("%s: Removing last arp target with arp_interval on\n",
bond->dev->name);
- pr_info("%s: removing ARP target %pI4.\n", bond->dev->name,
- &target);
+ pr_info("%s: Removing ARP target %pI4\n", bond->dev->name, &target);
/* not to race with bond_arp_rcv */
write_lock_bh(&bond->lock);
@@ -954,7 +952,7 @@ int bond_option_arp_ip_targets_set(struct bonding *bond,
else if (newval->string[0] == '-')
ret = bond_option_arp_ip_target_rem(bond, target);
else
- pr_err("no command found in arp_ip_targets file for bond %s. Use +<addr> or -<addr>.\n",
+ pr_err("no command found in arp_ip_targets file for bond %s - use +<addr> or -<addr>\n",
bond->dev->name);
} else {
target = newval->value;
@@ -967,7 +965,7 @@ int bond_option_arp_ip_targets_set(struct bonding *bond,
int bond_option_arp_validate_set(struct bonding *bond,
struct bond_opt_value *newval)
{
- pr_info("%s: setting arp_validate to %s (%llu).\n",
+ pr_info("%s: Setting arp_validate to %s (%llu)\n",
bond->dev->name, newval->string, newval->value);
if (bond->dev->flags & IFF_UP) {
@@ -984,7 +982,7 @@ int bond_option_arp_validate_set(struct bonding *bond,
int bond_option_arp_all_targets_set(struct bonding *bond,
struct bond_opt_value *newval)
{
- pr_info("%s: setting arp_all_targets to %s (%llu).\n",
+ pr_info("%s: Setting arp_all_targets to %s (%llu)\n",
bond->dev->name, newval->string, newval->value);
bond->params.arp_all_targets = newval->value;
@@ -1006,8 +1004,7 @@ int bond_option_primary_set(struct bonding *bond, struct bond_opt_value *newval)
*p = '\0';
/* check to see if we are clearing primary */
if (!strlen(primary)) {
- pr_info("%s: Setting primary slave to None.\n",
- bond->dev->name);
+ pr_info("%s: Setting primary slave to None\n", bond->dev->name);
bond->primary_slave = NULL;
memset(bond->params.primary, 0, sizeof(bond->params.primary));
bond_select_active_slave(bond);
@@ -1016,7 +1013,7 @@ int bond_option_primary_set(struct bonding *bond, struct bond_opt_value *newval)
bond_for_each_slave(bond, slave, iter) {
if (strncmp(slave->dev->name, primary, IFNAMSIZ) == 0) {
- pr_info("%s: Setting %s as primary slave.\n",
+ pr_info("%s: Setting %s as primary slave\n",
bond->dev->name, slave->dev->name);
bond->primary_slave = slave;
strcpy(bond->params.primary, slave->dev->name);
@@ -1026,15 +1023,14 @@ int bond_option_primary_set(struct bonding *bond, struct bond_opt_value *newval)
}
if (bond->primary_slave) {
- pr_info("%s: Setting primary slave to None.\n",
- bond->dev->name);
+ pr_info("%s: Setting primary slave to None\n", bond->dev->name);
bond->primary_slave = NULL;
bond_select_active_slave(bond);
}
strncpy(bond->params.primary, primary, IFNAMSIZ);
bond->params.primary[IFNAMSIZ - 1] = 0;
- pr_info("%s: Recording %s as primary, but it has not been enslaved to %s yet.\n",
+ pr_info("%s: Recording %s as primary, but it has not been enslaved to %s yet\n",
bond->dev->name, primary, bond->dev->name);
out:
@@ -1048,7 +1044,7 @@ out:
int bond_option_primary_reselect_set(struct bonding *bond,
struct bond_opt_value *newval)
{
- pr_info("%s: setting primary_reselect to %s (%llu).\n",
+ pr_info("%s: Setting primary_reselect to %s (%llu)\n",
bond->dev->name, newval->string, newval->value);
bond->params.primary_reselect = newval->value;
@@ -1064,7 +1060,7 @@ int bond_option_primary_reselect_set(struct bonding *bond,
int bond_option_fail_over_mac_set(struct bonding *bond,
struct bond_opt_value *newval)
{
- pr_info("%s: Setting fail_over_mac to %s (%llu).\n",
+ pr_info("%s: Setting fail_over_mac to %s (%llu)\n",
bond->dev->name, newval->string, newval->value);
bond->params.fail_over_mac = newval->value;
@@ -1074,7 +1070,7 @@ int bond_option_fail_over_mac_set(struct bonding *bond,
int bond_option_xmit_hash_policy_set(struct bonding *bond,
struct bond_opt_value *newval)
{
- pr_info("%s: setting xmit hash policy to %s (%llu).\n",
+ pr_info("%s: Setting xmit hash policy to %s (%llu)\n",
bond->dev->name, newval->string, newval->value);
bond->params.xmit_policy = newval->value;
@@ -1084,7 +1080,7 @@ int bond_option_xmit_hash_policy_set(struct bonding *bond,
int bond_option_resend_igmp_set(struct bonding *bond,
struct bond_opt_value *newval)
{
- pr_info("%s: Setting resend_igmp to %llu.\n",
+ pr_info("%s: Setting resend_igmp to %llu\n",
bond->dev->name, newval->value);
bond->params.resend_igmp = newval->value;
@@ -1158,7 +1154,7 @@ int bond_option_pps_set(struct bonding *bond, struct bond_opt_value *newval)
int bond_option_lacp_rate_set(struct bonding *bond,
struct bond_opt_value *newval)
{
- pr_info("%s: Setting LACP rate to %s (%llu).\n",
+ pr_info("%s: Setting LACP rate to %s (%llu)\n",
bond->dev->name, newval->string, newval->value);
bond->params.lacp_fast = newval->value;
bond_3ad_update_lacp_rate(bond);
@@ -1169,7 +1165,7 @@ int bond_option_lacp_rate_set(struct bonding *bond,
int bond_option_ad_select_set(struct bonding *bond,
struct bond_opt_value *newval)
{
- pr_info("%s: Setting ad_select to %s (%llu).\n",
+ pr_info("%s: Setting ad_select to %s (%llu)\n",
bond->dev->name, newval->string, newval->value);
bond->params.ad_select = newval->value;
@@ -1199,8 +1195,7 @@ int bond_option_queue_id_set(struct bonding *bond,
goto err_no_cmd;
/* Check buffer length, valid ifname and queue id */
- if (strlen(newval->string) > IFNAMSIZ ||
- !dev_valid_name(newval->string) ||
+ if (!dev_valid_name(newval->string) ||
qid > bond->dev->real_num_tx_queues)
goto err_no_cmd;
@@ -1232,8 +1227,7 @@ out:
return ret;
err_no_cmd:
- pr_info("invalid input for queue_id set for %s.\n",
- bond->dev->name);
+ pr_info("invalid input for queue_id set for %s\n", bond->dev->name);
ret = -EPERM;
goto out;
@@ -1254,7 +1248,7 @@ int bond_option_slaves_set(struct bonding *bond, struct bond_opt_value *newval)
dev = __dev_get_by_name(dev_net(bond->dev), ifname);
if (!dev) {
- pr_info("%s: Interface %s does not exist!\n",
+ pr_info("%s: interface %s does not exist!\n",
bond->dev->name, ifname);
ret = -ENODEV;
goto out;
@@ -1262,12 +1256,12 @@ int bond_option_slaves_set(struct bonding *bond, struct bond_opt_value *newval)
switch (command[0]) {
case '+':
- pr_info("%s: Adding slave %s.\n", bond->dev->name, dev->name);
+ pr_info("%s: Adding slave %s\n", bond->dev->name, dev->name);
ret = bond_enslave(bond->dev, dev);
break;
case '-':
- pr_info("%s: Removing slave %s.\n", bond->dev->name, dev->name);
+ pr_info("%s: Removing slave %s\n", bond->dev->name, dev->name);
ret = bond_release(bond->dev, dev);
break;
@@ -1279,7 +1273,7 @@ out:
return ret;
err_no_cmd:
- pr_err("no command found in slaves file for bond %s. Use +ifname or -ifname.\n",
+ pr_err("no command found in slaves file for bond %s - use +ifname or -ifname\n",
bond->dev->name);
ret = -EPERM;
goto out;
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index 3ac20e7..588cf39 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -69,9 +69,7 @@ static void bond_info_show_master(struct seq_file *seq)
struct slave *curr;
int i;
- read_lock(&bond->curr_slave_lock);
- curr = bond->curr_active_slave;
- read_unlock(&bond->curr_slave_lock);
+ curr = rcu_dereference(bond->curr_active_slave);
seq_printf(seq, "Bonding Mode: %s",
bond_mode_name(bond->params.mode));
@@ -254,8 +252,8 @@ void bond_create_proc_entry(struct bonding *bond)
S_IRUGO, bn->proc_dir,
&bond_info_fops, bond);
if (bond->proc_entry == NULL)
- pr_warning("Warning: Cannot create /proc/net/%s/%s\n",
- DRV_NAME, bond_dev->name);
+ pr_warn("Warning: Cannot create /proc/net/%s/%s\n",
+ DRV_NAME, bond_dev->name);
else
memcpy(bond->proc_file_name, bond_dev->name, IFNAMSIZ);
}
@@ -281,8 +279,8 @@ void __net_init bond_create_proc_dir(struct bond_net *bn)
if (!bn->proc_dir) {
bn->proc_dir = proc_mkdir(DRV_NAME, bn->net->proc_net);
if (!bn->proc_dir)
- pr_warning("Warning: cannot create /proc/net/%s\n",
- DRV_NAME);
+ pr_warn("Warning: Cannot create /proc/net/%s\n",
+ DRV_NAME);
}
}
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index 643fcc1..225ee69 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -117,9 +117,9 @@ static ssize_t bonding_store_bonds(struct class *cls,
rv = bond_create(bn->net, ifname);
if (rv) {
if (rv == -EEXIST)
- pr_info("%s already exists.\n", ifname);
+ pr_info("%s already exists\n", ifname);
else
- pr_info("%s creation failed.\n", ifname);
+ pr_info("%s creation failed\n", ifname);
res = rv;
}
} else if (command[0] == '-') {
@@ -144,7 +144,7 @@ static ssize_t bonding_store_bonds(struct class *cls,
return res;
err_no_cmd:
- pr_err("no command found in bonding_masters. Use +ifname or -ifname.\n");
+ pr_err("no command found in bonding_masters - use +ifname or -ifname\n");
return -EPERM;
}
@@ -1135,7 +1135,7 @@ int bond_create_sysfs(struct bond_net *bn)
/* Is someone being kinky and naming a device bonding_master? */
if (__dev_get_by_name(bn->net,
class_attr_bonding_masters.attr.name))
- pr_err("network device named %s already exists in sysfs",
+ pr_err("network device named %s already exists in sysfs\n",
class_attr_bonding_masters.attr.name);
ret = 0;
}
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 2b0fdec..b7127a1 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -188,8 +188,9 @@ struct slave {
struct net_device *dev; /* first - useful for panic debug */
struct bonding *bond; /* our master */
int delay;
- unsigned long jiffies;
- unsigned long last_arp_rx;
+ /* all three in jiffies */
+ unsigned long last_link_up;
+ unsigned long last_rx;
unsigned long target_last_arp_rx[BOND_MAX_ARP_TARGETS];
s8 link; /* one of BOND_LINK_XXXX */
s8 new_link;
@@ -374,6 +375,11 @@ static inline bool bond_is_active_slave(struct slave *slave)
#define BOND_ARP_VALIDATE_BACKUP (1 << BOND_STATE_BACKUP)
#define BOND_ARP_VALIDATE_ALL (BOND_ARP_VALIDATE_ACTIVE | \
BOND_ARP_VALIDATE_BACKUP)
+#define BOND_ARP_FILTER (BOND_ARP_VALIDATE_ALL + 1)
+#define BOND_ARP_FILTER_ACTIVE (BOND_ARP_VALIDATE_ACTIVE | \
+ BOND_ARP_FILTER)
+#define BOND_ARP_FILTER_BACKUP (BOND_ARP_VALIDATE_BACKUP | \
+ BOND_ARP_FILTER)
#define BOND_SLAVE_NOTIFY_NOW true
#define BOND_SLAVE_NOTIFY_LATER false
@@ -384,6 +390,12 @@ static inline int slave_do_arp_validate(struct bonding *bond,
return bond->params.arp_validate & (1 << bond_slave_state(slave));
}
+static inline int slave_do_arp_validate_only(struct bonding *bond,
+ struct slave *slave)
+{
+ return bond->params.arp_validate & BOND_ARP_FILTER;
+}
+
/* Get the oldest arp which we've received on this slave for bond's
* arp_targets.
*/
@@ -403,14 +415,10 @@ static inline unsigned long slave_oldest_target_arp_rx(struct bonding *bond,
static inline unsigned long slave_last_rx(struct bonding *bond,
struct slave *slave)
{
- if (slave_do_arp_validate(bond, slave)) {
- if (bond->params.arp_all_targets == BOND_ARP_TARGETS_ALL)
- return slave_oldest_target_arp_rx(bond, slave);
- else
- return slave->last_arp_rx;
- }
+ if (bond->params.arp_all_targets == BOND_ARP_TARGETS_ALL)
+ return slave_oldest_target_arp_rx(bond, slave);
- return slave->dev->last_rx;
+ return slave->last_rx;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index 88a6a58..fc73865 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -204,7 +204,6 @@ static void ldisc_receive(struct tty_struct *tty, const u8 *data,
skb->protocol = htons(ETH_P_CAIF);
skb_reset_mac_header(skb);
- skb->dev = ser->dev;
debugfs_rx(ser, data, count);
/* Push received packet up the stack. */
ret = netif_rx_ni(skb);
diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c
index 155db68..ff54c0e 100644
--- a/drivers/net/caif/caif_spi.c
+++ b/drivers/net/caif/caif_spi.c
@@ -554,7 +554,6 @@ int cfspi_rxfrm(struct cfspi *cfspi, u8 *buf, size_t len)
skb->protocol = htons(ETH_P_CAIF);
skb_reset_mac_header(skb);
- skb->dev = cfspi->ndev;
/*
* Push received packet up the stack.
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 6efe274..1d00b95 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -420,7 +420,11 @@ static void at91_chip_start(struct net_device *dev)
at91_transceiver_switch(priv, 1);
/* enable chip */
- at91_write(priv, AT91_MR, AT91_MR_CANEN);
+ if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
+ reg_mr = AT91_MR_CANEN | AT91_MR_ABM;
+ else
+ reg_mr = AT91_MR_CANEN;
+ at91_write(priv, AT91_MR, reg_mr);
priv->can.state = CAN_STATE_ERROR_ACTIVE;
@@ -1341,7 +1345,8 @@ static int at91_can_probe(struct platform_device *pdev)
priv->can.bittiming_const = &at91_bittiming_const;
priv->can.do_set_mode = at91_set_mode;
priv->can.do_get_berr_counter = at91_get_berr_counter;
- priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES;
+ priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
+ CAN_CTRLMODE_LISTENONLY;
priv->dev = dev;
priv->reg_base = addr;
priv->devtype_data = *devtype_data;
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index fc59bc6..c0563f1 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -512,6 +512,30 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
}
EXPORT_SYMBOL_GPL(alloc_can_skb);
+struct sk_buff *alloc_canfd_skb(struct net_device *dev,
+ struct canfd_frame **cfd)
+{
+ struct sk_buff *skb;
+
+ skb = netdev_alloc_skb(dev, sizeof(struct can_skb_priv) +
+ sizeof(struct canfd_frame));
+ if (unlikely(!skb))
+ return NULL;
+
+ skb->protocol = htons(ETH_P_CANFD);
+ skb->pkt_type = PACKET_BROADCAST;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ can_skb_reserve(skb);
+ can_skb_prv(skb)->ifindex = dev->ifindex;
+
+ *cfd = (struct canfd_frame *)skb_put(skb, sizeof(struct canfd_frame));
+ memset(*cfd, 0, sizeof(struct canfd_frame));
+
+ return skb;
+}
+EXPORT_SYMBOL_GPL(alloc_canfd_skb);
+
struct sk_buff *alloc_can_err_skb(struct net_device *dev, struct can_frame **cf)
{
struct sk_buff *skb;
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig
index ff2ba86..4b18b87 100644
--- a/drivers/net/can/sja1000/Kconfig
+++ b/drivers/net/can/sja1000/Kconfig
@@ -17,16 +17,9 @@ config CAN_SJA1000_PLATFORM
the "platform bus" (Linux abstraction for directly to the
processor attached devices). Which can be found on various
boards from Phytec (http://www.phytec.de) like the PCM027,
- PCM038.
-
-config CAN_SJA1000_OF_PLATFORM
- tristate "Generic OF Platform Bus based SJA1000 driver"
- depends on OF
- ---help---
- This driver adds support for the SJA1000 chips connected to
- the OpenFirmware "platform bus" found on embedded systems with
- OpenFirmware bindings, e.g. if you have a PowerPC based system
- you may want to enable this option.
+ PCM038. It also provides the OpenFirmware "platform bus" found
+ on embedded systems with OpenFirmware bindings, e.g. if you
+ have a PowerPC based system you may want to enable this option.
config CAN_EMS_PCMCIA
tristate "EMS CPC-CARD Card"
diff --git a/drivers/net/can/sja1000/Makefile b/drivers/net/can/sja1000/Makefile
index b3d05cb..531d5fc 100644
--- a/drivers/net/can/sja1000/Makefile
+++ b/drivers/net/can/sja1000/Makefile
@@ -5,7 +5,6 @@
obj-$(CONFIG_CAN_SJA1000) += sja1000.o
obj-$(CONFIG_CAN_SJA1000_ISA) += sja1000_isa.o
obj-$(CONFIG_CAN_SJA1000_PLATFORM) += sja1000_platform.o
-obj-$(CONFIG_CAN_SJA1000_OF_PLATFORM) += sja1000_of_platform.o
obj-$(CONFIG_CAN_EMS_PCMCIA) += ems_pcmcia.o
obj-$(CONFIG_CAN_EMS_PCI) += ems_pci.o
obj-$(CONFIG_CAN_KVASER_PCI) += kvaser_pci.o
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index f17c301..55cce47 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -106,8 +106,7 @@ static int sja1000_probe_chip(struct net_device *dev)
struct sja1000_priv *priv = netdev_priv(dev);
if (priv->reg_base && sja1000_is_absent(priv)) {
- printk(KERN_INFO "%s: probing @0x%lX failed\n",
- DRV_NAME, dev->base_addr);
+ netdev_err(dev, "probing failed\n");
return 0;
}
return -1;
diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c
deleted file mode 100644
index 2f6e245..0000000
--- a/drivers/net/can/sja1000/sja1000_of_platform.c
+++ /dev/null
@@ -1,220 +0,0 @@
-/*
- * Driver for SJA1000 CAN controllers on the OpenFirmware platform bus
- *
- * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the version 2 of the GNU General Public License
- * as published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-/* This is a generic driver for SJA1000 chips on the OpenFirmware platform
- * bus found on embedded PowerPC systems. You need a SJA1000 CAN node
- * definition in your flattened device tree source (DTS) file similar to:
- *
- * can@3,100 {
- * compatible = "nxp,sja1000";
- * reg = <3 0x100 0x80>;
- * interrupts = <2 0>;
- * interrupt-parent = <&mpic>;
- * nxp,external-clock-frequency = <16000000>;
- * };
- *
- * See "Documentation/devicetree/bindings/net/can/sja1000.txt" for further
- * information.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/netdevice.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/can/dev.h>
-
-#include <linux/of_platform.h>
-#include <linux/of_address.h>
-#include <linux/of_irq.h>
-
-#include "sja1000.h"
-
-#define DRV_NAME "sja1000_of_platform"
-
-MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
-MODULE_DESCRIPTION("Socket-CAN driver for SJA1000 on the OF platform bus");
-MODULE_LICENSE("GPL v2");
-
-#define SJA1000_OFP_CAN_CLOCK (16000000 / 2)
-
-#define SJA1000_OFP_OCR OCR_TX0_PULLDOWN
-#define SJA1000_OFP_CDR (CDR_CBP | CDR_CLK_OFF)
-
-static u8 sja1000_ofp_read_reg(const struct sja1000_priv *priv, int reg)
-{
- return ioread8(priv->reg_base + reg);
-}
-
-static void sja1000_ofp_write_reg(const struct sja1000_priv *priv,
- int reg, u8 val)
-{
- iowrite8(val, priv->reg_base + reg);
-}
-
-static int sja1000_ofp_remove(struct platform_device *ofdev)
-{
- struct net_device *dev = platform_get_drvdata(ofdev);
- struct sja1000_priv *priv = netdev_priv(dev);
- struct device_node *np = ofdev->dev.of_node;
- struct resource res;
-
- unregister_sja1000dev(dev);
- free_sja1000dev(dev);
- iounmap(priv->reg_base);
- irq_dispose_mapping(dev->irq);
-
- of_address_to_resource(np, 0, &res);
- release_mem_region(res.start, resource_size(&res));
-
- return 0;
-}
-
-static int sja1000_ofp_probe(struct platform_device *ofdev)
-{
- struct device_node *np = ofdev->dev.of_node;
- struct net_device *dev;
- struct sja1000_priv *priv;
- struct resource res;
- u32 prop;
- int err, irq, res_size;
- void __iomem *base;
-
- err = of_address_to_resource(np, 0, &res);
- if (err) {
- dev_err(&ofdev->dev, "invalid address\n");
- return err;
- }
-
- res_size = resource_size(&res);
-
- if (!request_mem_region(res.start, res_size, DRV_NAME)) {
- dev_err(&ofdev->dev, "couldn't request %pR\n", &res);
- return -EBUSY;
- }
-
- base = ioremap_nocache(res.start, res_size);
- if (!base) {
- dev_err(&ofdev->dev, "couldn't ioremap %pR\n", &res);
- err = -ENOMEM;
- goto exit_release_mem;
- }
-
- irq = irq_of_parse_and_map(np, 0);
- if (irq == 0) {
- dev_err(&ofdev->dev, "no irq found\n");
- err = -ENODEV;
- goto exit_unmap_mem;
- }
-
- dev = alloc_sja1000dev(0);
- if (!dev) {
- err = -ENOMEM;
- goto exit_dispose_irq;
- }
-
- priv = netdev_priv(dev);
-
- priv->read_reg = sja1000_ofp_read_reg;
- priv->write_reg = sja1000_ofp_write_reg;
-
- err = of_property_read_u32(np, "nxp,external-clock-frequency", &prop);
- if (!err)
- priv->can.clock.freq = prop / 2;
- else
- priv->can.clock.freq = SJA1000_OFP_CAN_CLOCK; /* default */
-
- err = of_property_read_u32(np, "nxp,tx-output-mode", &prop);
- if (!err)
- priv->ocr |= prop & OCR_MODE_MASK;
- else
- priv->ocr |= OCR_MODE_NORMAL; /* default */
-
- err = of_property_read_u32(np, "nxp,tx-output-config", &prop);
- if (!err)
- priv->ocr |= (prop << OCR_TX_SHIFT) & OCR_TX_MASK;
- else
- priv->ocr |= OCR_TX0_PULLDOWN; /* default */
-
- err = of_property_read_u32(np, "nxp,clock-out-frequency", &prop);
- if (!err && prop) {
- u32 divider = priv->can.clock.freq * 2 / prop;
-
- if (divider > 1)
- priv->cdr |= divider / 2 - 1;
- else
- priv->cdr |= CDR_CLKOUT_MASK;
- } else {
- priv->cdr |= CDR_CLK_OFF; /* default */
- }
-
- if (!of_property_read_bool(np, "nxp,no-comparator-bypass"))
- priv->cdr |= CDR_CBP; /* default */
-
- priv->irq_flags = IRQF_SHARED;
- priv->reg_base = base;
-
- dev->irq = irq;
-
- dev_info(&ofdev->dev,
- "reg_base=0x%p irq=%d clock=%d ocr=0x%02x cdr=0x%02x\n",
- priv->reg_base, dev->irq, priv->can.clock.freq,
- priv->ocr, priv->cdr);
-
- platform_set_drvdata(ofdev, dev);
- SET_NETDEV_DEV(dev, &ofdev->dev);
-
- err = register_sja1000dev(dev);
- if (err) {
- dev_err(&ofdev->dev, "registering %s failed (err=%d)\n",
- DRV_NAME, err);
- goto exit_free_sja1000;
- }
-
- return 0;
-
-exit_free_sja1000:
- free_sja1000dev(dev);
-exit_dispose_irq:
- irq_dispose_mapping(irq);
-exit_unmap_mem:
- iounmap(base);
-exit_release_mem:
- release_mem_region(res.start, res_size);
-
- return err;
-}
-
-static struct of_device_id sja1000_ofp_table[] = {
- {.compatible = "nxp,sja1000"},
- {},
-};
-MODULE_DEVICE_TABLE(of, sja1000_ofp_table);
-
-static struct platform_driver sja1000_ofp_driver = {
- .driver = {
- .owner = THIS_MODULE,
- .name = DRV_NAME,
- .of_match_table = sja1000_ofp_table,
- },
- .probe = sja1000_ofp_probe,
- .remove = sja1000_ofp_remove,
-};
-
-module_platform_driver(sja1000_ofp_driver);
diff --git a/drivers/net/can/sja1000/sja1000_platform.c b/drivers/net/can/sja1000/sja1000_platform.c
index 943df64..95a844a 100644
--- a/drivers/net/can/sja1000/sja1000_platform.c
+++ b/drivers/net/can/sja1000/sja1000_platform.c
@@ -26,12 +26,16 @@
#include <linux/can/dev.h>
#include <linux/can/platform/sja1000.h>
#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
#include "sja1000.h"
#define DRV_NAME "sja1000_platform"
+#define SP_CAN_CLOCK (16000000 / 2)
MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de>");
+MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
MODULE_DESCRIPTION("Socket-CAN driver for SJA1000 on the platform bus");
MODULE_ALIAS("platform:" DRV_NAME);
MODULE_LICENSE("GPL v2");
@@ -66,59 +70,16 @@ static void sp_write_reg32(const struct sja1000_priv *priv, int reg, u8 val)
iowrite8(val, priv->reg_base + reg * 4);
}
-static int sp_probe(struct platform_device *pdev)
+static void sp_populate(struct sja1000_priv *priv,
+ struct sja1000_platform_data *pdata,
+ unsigned long resource_mem_flags)
{
- int err;
- void __iomem *addr;
- struct net_device *dev;
- struct sja1000_priv *priv;
- struct resource *res_mem, *res_irq;
- struct sja1000_platform_data *pdata;
-
- pdata = dev_get_platdata(&pdev->dev);
- if (!pdata) {
- dev_err(&pdev->dev, "No platform data provided!\n");
- err = -ENODEV;
- goto exit;
- }
-
- res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res_mem || !res_irq) {
- err = -ENODEV;
- goto exit;
- }
-
- if (!request_mem_region(res_mem->start, resource_size(res_mem),
- DRV_NAME)) {
- err = -EBUSY;
- goto exit;
- }
-
- addr = ioremap_nocache(res_mem->start, resource_size(res_mem));
- if (!addr) {
- err = -ENOMEM;
- goto exit_release;
- }
-
- dev = alloc_sja1000dev(0);
- if (!dev) {
- err = -ENOMEM;
- goto exit_iounmap;
- }
- priv = netdev_priv(dev);
-
- dev->irq = res_irq->start;
- priv->irq_flags = res_irq->flags & IRQF_TRIGGER_MASK;
- if (res_irq->flags & IORESOURCE_IRQ_SHAREABLE)
- priv->irq_flags |= IRQF_SHARED;
- priv->reg_base = addr;
/* The CAN clock frequency is half the oscillator clock frequency */
priv->can.clock.freq = pdata->osc_freq / 2;
priv->ocr = pdata->ocr;
priv->cdr = pdata->cdr;
- switch (res_mem->flags & IORESOURCE_MEM_TYPE_MASK) {
+ switch (resource_mem_flags & IORESOURCE_MEM_TYPE_MASK) {
case IORESOURCE_MEM_32BIT:
priv->read_reg = sp_read_reg32;
priv->write_reg = sp_write_reg32;
@@ -133,6 +94,124 @@ static int sp_probe(struct platform_device *pdev)
priv->write_reg = sp_write_reg8;
break;
}
+}
+
+static void sp_populate_of(struct sja1000_priv *priv, struct device_node *of)
+{
+ int err;
+ u32 prop;
+
+ err = of_property_read_u32(of, "reg-io-width", &prop);
+ if (err)
+ prop = 1; /* 8 bit is default */
+
+ switch (prop) {
+ case 4:
+ priv->read_reg = sp_read_reg32;
+ priv->write_reg = sp_write_reg32;
+ break;
+ case 2:
+ priv->read_reg = sp_read_reg16;
+ priv->write_reg = sp_write_reg16;
+ break;
+ case 1: /* fallthrough */
+ default:
+ priv->read_reg = sp_read_reg8;
+ priv->write_reg = sp_write_reg8;
+ }
+
+ err = of_property_read_u32(of, "nxp,external-clock-frequency", &prop);
+ if (!err)
+ priv->can.clock.freq = prop / 2;
+ else
+ priv->can.clock.freq = SP_CAN_CLOCK; /* default */
+
+ err = of_property_read_u32(of, "nxp,tx-output-mode", &prop);
+ if (!err)
+ priv->ocr |= prop & OCR_MODE_MASK;
+ else
+ priv->ocr |= OCR_MODE_NORMAL; /* default */
+
+ err = of_property_read_u32(of, "nxp,tx-output-config", &prop);
+ if (!err)
+ priv->ocr |= (prop << OCR_TX_SHIFT) & OCR_TX_MASK;
+ else
+ priv->ocr |= OCR_TX0_PULLDOWN; /* default */
+
+ err = of_property_read_u32(of, "nxp,clock-out-frequency", &prop);
+ if (!err && prop) {
+ u32 divider = priv->can.clock.freq * 2 / prop;
+
+ if (divider > 1)
+ priv->cdr |= divider / 2 - 1;
+ else
+ priv->cdr |= CDR_CLKOUT_MASK;
+ } else {
+ priv->cdr |= CDR_CLK_OFF; /* default */
+ }
+
+ if (!of_property_read_bool(of, "nxp,no-comparator-bypass"))
+ priv->cdr |= CDR_CBP; /* default */
+}
+
+static int sp_probe(struct platform_device *pdev)
+{
+ int err, irq = 0;
+ void __iomem *addr;
+ struct net_device *dev;
+ struct sja1000_priv *priv;
+ struct resource *res_mem, *res_irq = NULL;
+ struct sja1000_platform_data *pdata;
+ struct device_node *of = pdev->dev.of_node;
+
+ pdata = dev_get_platdata(&pdev->dev);
+ if (!pdata && !of) {
+ dev_err(&pdev->dev, "No platform data provided!\n");
+ return -ENODEV;
+ }
+
+ res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res_mem)
+ return -ENODEV;
+
+ if (!devm_request_mem_region(&pdev->dev, res_mem->start,
+ resource_size(res_mem), DRV_NAME))
+ return -EBUSY;
+
+ addr = devm_ioremap_nocache(&pdev->dev, res_mem->start,
+ resource_size(res_mem));
+ if (!addr)
+ return -ENOMEM;
+
+ if (of)
+ irq = irq_of_parse_and_map(of, 0);
+ else
+ res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+
+ if (!irq && !res_irq)
+ return -ENODEV;
+
+ dev = alloc_sja1000dev(0);
+ if (!dev)
+ return -ENOMEM;
+ priv = netdev_priv(dev);
+
+ if (res_irq) {
+ irq = res_irq->start;
+ priv->irq_flags = res_irq->flags & IRQF_TRIGGER_MASK;
+ if (res_irq->flags & IORESOURCE_IRQ_SHAREABLE)
+ priv->irq_flags |= IRQF_SHARED;
+ } else {
+ priv->irq_flags = IRQF_SHARED;
+ }
+
+ dev->irq = irq;
+ priv->reg_base = addr;
+
+ if (of)
+ sp_populate_of(priv, of);
+ else
+ sp_populate(priv, pdata, res_mem->flags);
platform_set_drvdata(pdev, dev);
SET_NETDEV_DEV(dev, &pdev->dev);
@@ -150,39 +229,32 @@ static int sp_probe(struct platform_device *pdev)
exit_free:
free_sja1000dev(dev);
- exit_iounmap:
- iounmap(addr);
- exit_release:
- release_mem_region(res_mem->start, resource_size(res_mem));
- exit:
return err;
}
static int sp_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
- struct sja1000_priv *priv = netdev_priv(dev);
- struct resource *res;
unregister_sja1000dev(dev);
-
- if (priv->reg_base)
- iounmap(priv->reg_base);
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- release_mem_region(res->start, resource_size(res));
-
free_sja1000dev(dev);
return 0;
}
+static struct of_device_id sp_of_table[] = {
+ {.compatible = "nxp,sja1000"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, sp_of_table);
+
static struct platform_driver sp_driver = {
.probe = sp_probe,
.remove = sp_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
+ .of_match_table = sp_of_table,
},
};
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index bd8f84b..1656317c 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -88,16 +88,10 @@ static netdev_tx_t dummy_xmit(struct sk_buff *skb, struct net_device *dev)
static int dummy_dev_init(struct net_device *dev)
{
- int i;
- dev->dstats = alloc_percpu(struct pcpu_dstats);
+ dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats);
if (!dev->dstats)
return -ENOMEM;
- for_each_possible_cpu(i) {
- struct pcpu_dstats *dstats;
- dstats = per_cpu_ptr(dev->dstats, i);
- u64_stats_init(&dstats->syncp);
- }
return 0;
}
diff --git a/drivers/net/ethernet/3com/3c589_cs.c b/drivers/net/ethernet/3com/3c589_cs.c
index 5992860..063557e 100644
--- a/drivers/net/ethernet/3com/3c589_cs.c
+++ b/drivers/net/ethernet/3com/3c589_cs.c
@@ -1,23 +1,24 @@
-/*======================================================================
-
- A PCMCIA ethernet driver for the 3com 3c589 card.
-
- Copyright (C) 1999 David A. Hinds -- dahinds@users.sourceforge.net
-
- 3c589_cs.c 1.162 2001/10/13 00:08:50
-
- The network driver code is based on Donald Becker's 3c589 code:
-
- Written 1994 by Donald Becker.
- Copyright 1993 United States Government as represented by the
- Director, National Security Agency. This software may be used and
- distributed according to the terms of the GNU General Public License,
- incorporated herein by reference.
- Donald Becker may be reached at becker@scyld.com
-
- Updated for 2.5.x by Alan Cox <alan@lxorguk.ukuu.org.uk>
-
-======================================================================*/
+/* ======================================================================
+ *
+ * A PCMCIA ethernet driver for the 3com 3c589 card.
+ *
+ * Copyright (C) 1999 David A. Hinds -- dahinds@users.sourceforge.net
+ *
+ * 3c589_cs.c 1.162 2001/10/13 00:08:50
+ *
+ * The network driver code is based on Donald Becker's 3c589 code:
+ *
+ * Written 1994 by Donald Becker.
+ * Copyright 1993 United States Government as represented by the
+ * Director, National Security Agency. This software may be used and
+ * distributed according to the terms of the GNU General Public License,
+ * incorporated herein by reference.
+ * Donald Becker may be reached at becker@scyld.com
+ *
+ * Updated for 2.5.x by Alan Cox <alan@lxorguk.ukuu.org.uk>
+ *
+ * ======================================================================
+ */
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -41,18 +42,20 @@
#include <linux/ioport.h>
#include <linux/bitops.h>
#include <linux/jiffies.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
#include <pcmcia/ciscode.h>
#include <pcmcia/ds.h>
-#include <asm/uaccess.h>
-#include <asm/io.h>
/* To minimize the size of the driver source I only define operating
- constants if they are used several times. You'll need the manual
- if you want to understand driver details. */
+ * constants if they are used several times. You'll need the manual
+ * if you want to understand driver details.
+ */
+
/* Offsets from base I/O address. */
#define EL3_DATA 0x00
#define EL3_TIMER 0x0a
@@ -65,7 +68,9 @@
#define EL3WINDOW(win_num) outw(SelectWindow + (win_num), ioaddr + EL3_CMD)
/* The top five bits written to EL3_CMD are a command, the lower
- 11 bits are the parameter, if applicable. */
+ * 11 bits are the parameter, if applicable.
+ */
+
enum c509cmd {
TotalReset = 0<<11,
SelectWindow = 1<<11,
@@ -190,138 +195,142 @@ static const struct net_device_ops el3_netdev_ops = {
static int tc589_probe(struct pcmcia_device *link)
{
- struct el3_private *lp;
- struct net_device *dev;
+ struct el3_private *lp;
+ struct net_device *dev;
- dev_dbg(&link->dev, "3c589_attach()\n");
+ dev_dbg(&link->dev, "3c589_attach()\n");
- /* Create new ethernet device */
- dev = alloc_etherdev(sizeof(struct el3_private));
- if (!dev)
- return -ENOMEM;
- lp = netdev_priv(dev);
- link->priv = dev;
- lp->p_dev = link;
+ /* Create new ethernet device */
+ dev = alloc_etherdev(sizeof(struct el3_private));
+ if (!dev)
+ return -ENOMEM;
+ lp = netdev_priv(dev);
+ link->priv = dev;
+ lp->p_dev = link;
- spin_lock_init(&lp->lock);
- link->resource[0]->end = 16;
- link->resource[0]->flags |= IO_DATA_PATH_WIDTH_16;
+ spin_lock_init(&lp->lock);
+ link->resource[0]->end = 16;
+ link->resource[0]->flags |= IO_DATA_PATH_WIDTH_16;
- link->config_flags |= CONF_ENABLE_IRQ;
- link->config_index = 1;
+ link->config_flags |= CONF_ENABLE_IRQ;
+ link->config_index = 1;
- dev->netdev_ops = &el3_netdev_ops;
- dev->watchdog_timeo = TX_TIMEOUT;
+ dev->netdev_ops = &el3_netdev_ops;
+ dev->watchdog_timeo = TX_TIMEOUT;
- SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
+ SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
- return tc589_config(link);
+ return tc589_config(link);
}
static void tc589_detach(struct pcmcia_device *link)
{
- struct net_device *dev = link->priv;
+ struct net_device *dev = link->priv;
- dev_dbg(&link->dev, "3c589_detach\n");
+ dev_dbg(&link->dev, "3c589_detach\n");
- unregister_netdev(dev);
+ unregister_netdev(dev);
- tc589_release(link);
+ tc589_release(link);
- free_netdev(dev);
+ free_netdev(dev);
} /* tc589_detach */
static int tc589_config(struct pcmcia_device *link)
{
- struct net_device *dev = link->priv;
- __be16 *phys_addr;
- int ret, i, j, multi = 0, fifo;
- unsigned int ioaddr;
- static const char * const ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
- u8 *buf;
- size_t len;
-
- dev_dbg(&link->dev, "3c589_config\n");
-
- phys_addr = (__be16 *)dev->dev_addr;
- /* Is this a 3c562? */
- if (link->manf_id != MANFID_3COM)
- dev_info(&link->dev, "hmmm, is this really a 3Com card??\n");
- multi = (link->card_id == PRODID_3COM_3C562);
-
- link->io_lines = 16;
-
- /* For the 3c562, the base address must be xx00-xx7f */
- for (i = j = 0; j < 0x400; j += 0x10) {
- if (multi && (j & 0x80)) continue;
- link->resource[0]->start = j ^ 0x300;
- i = pcmcia_request_io(link);
- if (i == 0)
- break;
- }
- if (i != 0)
- goto failed;
-
- ret = pcmcia_request_irq(link, el3_interrupt);
- if (ret)
- goto failed;
-
- ret = pcmcia_enable_device(link);
- if (ret)
- goto failed;
-
- dev->irq = link->irq;
- dev->base_addr = link->resource[0]->start;
- ioaddr = dev->base_addr;
- EL3WINDOW(0);
-
- /* The 3c589 has an extra EEPROM for configuration info, including
- the hardware address. The 3c562 puts the address in the CIS. */
- len = pcmcia_get_tuple(link, 0x88, &buf);
- if (buf && len >= 6) {
- for (i = 0; i < 3; i++)
- phys_addr[i] = htons(le16_to_cpu(buf[i*2]));
- kfree(buf);
- } else {
- kfree(buf); /* 0 < len < 6 */
- for (i = 0; i < 3; i++)
- phys_addr[i] = htons(read_eeprom(ioaddr, i));
- if (phys_addr[0] == htons(0x6060)) {
- dev_err(&link->dev, "IO port conflict at 0x%03lx-0x%03lx\n",
- dev->base_addr, dev->base_addr+15);
- goto failed;
+ struct net_device *dev = link->priv;
+ __be16 *phys_addr;
+ int ret, i, j, multi = 0, fifo;
+ unsigned int ioaddr;
+ static const char * const ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
+ u8 *buf;
+ size_t len;
+
+ dev_dbg(&link->dev, "3c589_config\n");
+
+ phys_addr = (__be16 *)dev->dev_addr;
+ /* Is this a 3c562? */
+ if (link->manf_id != MANFID_3COM)
+ dev_info(&link->dev, "hmmm, is this really a 3Com card??\n");
+ multi = (link->card_id == PRODID_3COM_3C562);
+
+ link->io_lines = 16;
+
+ /* For the 3c562, the base address must be xx00-xx7f */
+ for (i = j = 0; j < 0x400; j += 0x10) {
+ if (multi && (j & 0x80))
+ continue;
+ link->resource[0]->start = j ^ 0x300;
+ i = pcmcia_request_io(link);
+ if (i == 0)
+ break;
}
- }
-
- /* The address and resource configuration register aren't loaded from
- the EEPROM and *must* be set to 0 and IRQ3 for the PCMCIA version. */
- outw(0x3f00, ioaddr + 8);
- fifo = inl(ioaddr);
-
- /* The if_port symbol can be set when the module is loaded */
- if ((if_port >= 0) && (if_port <= 3))
- dev->if_port = if_port;
- else
- dev_err(&link->dev, "invalid if_port requested\n");
-
- SET_NETDEV_DEV(dev, &link->dev);
-
- if (register_netdev(dev) != 0) {
- dev_err(&link->dev, "register_netdev() failed\n");
- goto failed;
- }
-
- netdev_info(dev, "3Com 3c%s, io %#3lx, irq %d, hw_addr %pM\n",
- (multi ? "562" : "589"), dev->base_addr, dev->irq,
- dev->dev_addr);
- netdev_info(dev, " %dK FIFO split %s Rx:Tx, %s xcvr\n",
- (fifo & 7) ? 32 : 8, ram_split[(fifo >> 16) & 3],
- if_names[dev->if_port]);
- return 0;
+ if (i != 0)
+ goto failed;
+
+ ret = pcmcia_request_irq(link, el3_interrupt);
+ if (ret)
+ goto failed;
+
+ ret = pcmcia_enable_device(link);
+ if (ret)
+ goto failed;
+
+ dev->irq = link->irq;
+ dev->base_addr = link->resource[0]->start;
+ ioaddr = dev->base_addr;
+ EL3WINDOW(0);
+
+ /* The 3c589 has an extra EEPROM for configuration info, including
+ * the hardware address. The 3c562 puts the address in the CIS.
+ */
+ len = pcmcia_get_tuple(link, 0x88, &buf);
+ if (buf && len >= 6) {
+ for (i = 0; i < 3; i++)
+ phys_addr[i] = htons(le16_to_cpu(buf[i*2]));
+ kfree(buf);
+ } else {
+ kfree(buf); /* 0 < len < 6 */
+ for (i = 0; i < 3; i++)
+ phys_addr[i] = htons(read_eeprom(ioaddr, i));
+ if (phys_addr[0] == htons(0x6060)) {
+ dev_err(&link->dev, "IO port conflict at 0x%03lx-0x%03lx\n",
+ dev->base_addr, dev->base_addr+15);
+ goto failed;
+ }
+ }
+
+ /* The address and resource configuration register aren't loaded from
+ * the EEPROM and *must* be set to 0 and IRQ3 for the PCMCIA version.
+ */
+
+ outw(0x3f00, ioaddr + 8);
+ fifo = inl(ioaddr);
+
+ /* The if_port symbol can be set when the module is loaded */
+ if ((if_port >= 0) && (if_port <= 3))
+ dev->if_port = if_port;
+ else
+ dev_err(&link->dev, "invalid if_port requested\n");
+
+ SET_NETDEV_DEV(dev, &link->dev);
+
+ if (register_netdev(dev) != 0) {
+ dev_err(&link->dev, "register_netdev() failed\n");
+ goto failed;
+ }
+
+ netdev_info(dev, "3Com 3c%s, io %#3lx, irq %d, hw_addr %pM\n",
+ (multi ? "562" : "589"), dev->base_addr, dev->irq,
+ dev->dev_addr);
+ netdev_info(dev, " %dK FIFO split %s Rx:Tx, %s xcvr\n",
+ (fifo & 7) ? 32 : 8, ram_split[(fifo >> 16) & 3],
+ if_names[dev->if_port]);
+ return 0;
failed:
- tc589_release(link);
- return -ENODEV;
+ tc589_release(link);
+ return -ENODEV;
} /* tc589_config */
static void tc589_release(struct pcmcia_device *link)
@@ -353,113 +362,120 @@ static int tc589_resume(struct pcmcia_device *link)
/*====================================================================*/
-/*
- Use this for commands that may take time to finish
-*/
+/* Use this for commands that may take time to finish */
+
static void tc589_wait_for_completion(struct net_device *dev, int cmd)
{
- int i = 100;
- outw(cmd, dev->base_addr + EL3_CMD);
- while (--i > 0)
- if (!(inw(dev->base_addr + EL3_STATUS) & 0x1000)) break;
- if (i == 0)
- netdev_warn(dev, "command 0x%04x did not complete!\n", cmd);
+ int i = 100;
+ outw(cmd, dev->base_addr + EL3_CMD);
+ while (--i > 0)
+ if (!(inw(dev->base_addr + EL3_STATUS) & 0x1000))
+ break;
+ if (i == 0)
+ netdev_warn(dev, "command 0x%04x did not complete!\n", cmd);
}
-/*
- Read a word from the EEPROM using the regular EEPROM access register.
- Assume that we are in register window zero.
-*/
+/* Read a word from the EEPROM using the regular EEPROM access register.
+ * Assume that we are in register window zero.
+ */
+
static u16 read_eeprom(unsigned int ioaddr, int index)
{
- int i;
- outw(EEPROM_READ + index, ioaddr + 10);
- /* Reading the eeprom takes 162 us */
- for (i = 1620; i >= 0; i--)
- if ((inw(ioaddr + 10) & EEPROM_BUSY) == 0)
- break;
- return inw(ioaddr + 12);
+ int i;
+ outw(EEPROM_READ + index, ioaddr + 10);
+ /* Reading the eeprom takes 162 us */
+ for (i = 1620; i >= 0; i--)
+ if ((inw(ioaddr + 10) & EEPROM_BUSY) == 0)
+ break;
+ return inw(ioaddr + 12);
}
-/*
- Set transceiver type, perhaps to something other than what the user
- specified in dev->if_port.
-*/
+/* Set transceiver type, perhaps to something other than what the user
+ * specified in dev->if_port.
+ */
+
static void tc589_set_xcvr(struct net_device *dev, int if_port)
{
- struct el3_private *lp = netdev_priv(dev);
- unsigned int ioaddr = dev->base_addr;
-
- EL3WINDOW(0);
- switch (if_port) {
- case 0: case 1: outw(0, ioaddr + 6); break;
- case 2: outw(3<<14, ioaddr + 6); break;
- case 3: outw(1<<14, ioaddr + 6); break;
- }
- /* On PCMCIA, this just turns on the LED */
- outw((if_port == 2) ? StartCoax : StopCoax, ioaddr + EL3_CMD);
- /* 10baseT interface, enable link beat and jabber check. */
- EL3WINDOW(4);
- outw(MEDIA_LED | ((if_port < 2) ? MEDIA_TP : 0), ioaddr + WN4_MEDIA);
- EL3WINDOW(1);
- if (if_port == 2)
- lp->media_status = ((dev->if_port == 0) ? 0x8000 : 0x4000);
- else
- lp->media_status = ((dev->if_port == 0) ? 0x4010 : 0x8800);
+ struct el3_private *lp = netdev_priv(dev);
+ unsigned int ioaddr = dev->base_addr;
+
+ EL3WINDOW(0);
+ switch (if_port) {
+ case 0:
+ case 1:
+ outw(0, ioaddr + 6);
+ break;
+ case 2:
+ outw(3<<14, ioaddr + 6);
+ break;
+ case 3:
+ outw(1<<14, ioaddr + 6);
+ break;
+ }
+ /* On PCMCIA, this just turns on the LED */
+ outw((if_port == 2) ? StartCoax : StopCoax, ioaddr + EL3_CMD);
+ /* 10baseT interface, enable link beat and jabber check. */
+ EL3WINDOW(4);
+ outw(MEDIA_LED | ((if_port < 2) ? MEDIA_TP : 0), ioaddr + WN4_MEDIA);
+ EL3WINDOW(1);
+ if (if_port == 2)
+ lp->media_status = ((dev->if_port == 0) ? 0x8000 : 0x4000);
+ else
+ lp->media_status = ((dev->if_port == 0) ? 0x4010 : 0x8800);
}
static void dump_status(struct net_device *dev)
{
- unsigned int ioaddr = dev->base_addr;
- EL3WINDOW(1);
- netdev_info(dev, " irq status %04x, rx status %04x, tx status %02x tx free %04x\n",
- inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS),
- inb(ioaddr+TX_STATUS), inw(ioaddr+TX_FREE));
- EL3WINDOW(4);
- netdev_info(dev, " diagnostics: fifo %04x net %04x ethernet %04x media %04x\n",
- inw(ioaddr+0x04), inw(ioaddr+0x06), inw(ioaddr+0x08),
- inw(ioaddr+0x0a));
- EL3WINDOW(1);
+ unsigned int ioaddr = dev->base_addr;
+ EL3WINDOW(1);
+ netdev_info(dev, " irq status %04x, rx status %04x, tx status %02x tx free %04x\n",
+ inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS),
+ inb(ioaddr+TX_STATUS), inw(ioaddr+TX_FREE));
+ EL3WINDOW(4);
+ netdev_info(dev, " diagnostics: fifo %04x net %04x ethernet %04x media %04x\n",
+ inw(ioaddr+0x04), inw(ioaddr+0x06), inw(ioaddr+0x08),
+ inw(ioaddr+0x0a));
+ EL3WINDOW(1);
}
/* Reset and restore all of the 3c589 registers. */
static void tc589_reset(struct net_device *dev)
{
- unsigned int ioaddr = dev->base_addr;
- int i;
-
- EL3WINDOW(0);
- outw(0x0001, ioaddr + 4); /* Activate board. */
- outw(0x3f00, ioaddr + 8); /* Set the IRQ line. */
-
- /* Set the station address in window 2. */
- EL3WINDOW(2);
- for (i = 0; i < 6; i++)
- outb(dev->dev_addr[i], ioaddr + i);
-
- tc589_set_xcvr(dev, dev->if_port);
-
- /* Switch to the stats window, and clear all stats by reading. */
- outw(StatsDisable, ioaddr + EL3_CMD);
- EL3WINDOW(6);
- for (i = 0; i < 9; i++)
- inb(ioaddr+i);
- inw(ioaddr + 10);
- inw(ioaddr + 12);
-
- /* Switch to register set 1 for normal use. */
- EL3WINDOW(1);
-
- set_rx_mode(dev);
- outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */
- outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */
- outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */
- /* Allow status bits to be seen. */
- outw(SetStatusEnb | 0xff, ioaddr + EL3_CMD);
- /* Ack all pending events, and set active indicator mask. */
- outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq,
+ unsigned int ioaddr = dev->base_addr;
+ int i;
+
+ EL3WINDOW(0);
+ outw(0x0001, ioaddr + 4); /* Activate board. */
+ outw(0x3f00, ioaddr + 8); /* Set the IRQ line. */
+
+ /* Set the station address in window 2. */
+ EL3WINDOW(2);
+ for (i = 0; i < 6; i++)
+ outb(dev->dev_addr[i], ioaddr + i);
+
+ tc589_set_xcvr(dev, dev->if_port);
+
+ /* Switch to the stats window, and clear all stats by reading. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+ EL3WINDOW(6);
+ for (i = 0; i < 9; i++)
+ inb(ioaddr+i);
+ inw(ioaddr + 10);
+ inw(ioaddr + 12);
+
+ /* Switch to register set 1 for normal use. */
+ EL3WINDOW(1);
+
+ set_rx_mode(dev);
+ outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */
+ outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */
+ outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */
+ /* Allow status bits to be seen. */
+ outw(SetStatusEnb | 0xff, ioaddr + EL3_CMD);
+ /* Ack all pending events, and set active indicator mask. */
+ outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq,
ioaddr + EL3_CMD);
- outw(SetIntrEnb | IntLatch | TxAvailable | RxComplete | StatsFull
+ outw(SetIntrEnb | IntLatch | TxAvailable | RxComplete | StatsFull
| AdapterFailure, ioaddr + EL3_CMD);
}
@@ -478,381 +494,406 @@ static const struct ethtool_ops netdev_ethtool_ops = {
static int el3_config(struct net_device *dev, struct ifmap *map)
{
- if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) {
- if (map->port <= 3) {
- dev->if_port = map->port;
- netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]);
- tc589_set_xcvr(dev, dev->if_port);
- } else
- return -EINVAL;
- }
- return 0;
+ if ((map->port != (u_char)(-1)) && (map->port != dev->if_port)) {
+ if (map->port <= 3) {
+ dev->if_port = map->port;
+ netdev_info(dev, "switched to %s port\n", if_names[dev->if_port]);
+ tc589_set_xcvr(dev, dev->if_port);
+ } else {
+ return -EINVAL;
+ }
+ }
+ return 0;
}
static int el3_open(struct net_device *dev)
{
- struct el3_private *lp = netdev_priv(dev);
- struct pcmcia_device *link = lp->p_dev;
+ struct el3_private *lp = netdev_priv(dev);
+ struct pcmcia_device *link = lp->p_dev;
- if (!pcmcia_dev_present(link))
- return -ENODEV;
+ if (!pcmcia_dev_present(link))
+ return -ENODEV;
- link->open++;
- netif_start_queue(dev);
+ link->open++;
+ netif_start_queue(dev);
- tc589_reset(dev);
- init_timer(&lp->media);
- lp->media.function = media_check;
- lp->media.data = (unsigned long) dev;
- lp->media.expires = jiffies + HZ;
- add_timer(&lp->media);
+ tc589_reset(dev);
+ init_timer(&lp->media);
+ lp->media.function = media_check;
+ lp->media.data = (unsigned long) dev;
+ lp->media.expires = jiffies + HZ;
+ add_timer(&lp->media);
- dev_dbg(&link->dev, "%s: opened, status %4.4x.\n",
+ dev_dbg(&link->dev, "%s: opened, status %4.4x.\n",
dev->name, inw(dev->base_addr + EL3_STATUS));
- return 0;
+ return 0;
}
static void el3_tx_timeout(struct net_device *dev)
{
- unsigned int ioaddr = dev->base_addr;
-
- netdev_warn(dev, "Transmit timed out!\n");
- dump_status(dev);
- dev->stats.tx_errors++;
- dev->trans_start = jiffies; /* prevent tx timeout */
- /* Issue TX_RESET and TX_START commands. */
- tc589_wait_for_completion(dev, TxReset);
- outw(TxEnable, ioaddr + EL3_CMD);
- netif_wake_queue(dev);
+ unsigned int ioaddr = dev->base_addr;
+
+ netdev_warn(dev, "Transmit timed out!\n");
+ dump_status(dev);
+ dev->stats.tx_errors++;
+ dev->trans_start = jiffies; /* prevent tx timeout */
+ /* Issue TX_RESET and TX_START commands. */
+ tc589_wait_for_completion(dev, TxReset);
+ outw(TxEnable, ioaddr + EL3_CMD);
+ netif_wake_queue(dev);
}
static void pop_tx_status(struct net_device *dev)
{
- unsigned int ioaddr = dev->base_addr;
- int i;
-
- /* Clear the Tx status stack. */
- for (i = 32; i > 0; i--) {
- u_char tx_status = inb(ioaddr + TX_STATUS);
- if (!(tx_status & 0x84)) break;
- /* reset transmitter on jabber error or underrun */
- if (tx_status & 0x30)
- tc589_wait_for_completion(dev, TxReset);
- if (tx_status & 0x38) {
- netdev_dbg(dev, "transmit error: status 0x%02x\n", tx_status);
- outw(TxEnable, ioaddr + EL3_CMD);
- dev->stats.tx_aborted_errors++;
+ unsigned int ioaddr = dev->base_addr;
+ int i;
+
+ /* Clear the Tx status stack. */
+ for (i = 32; i > 0; i--) {
+ u_char tx_status = inb(ioaddr + TX_STATUS);
+ if (!(tx_status & 0x84))
+ break;
+ /* reset transmitter on jabber error or underrun */
+ if (tx_status & 0x30)
+ tc589_wait_for_completion(dev, TxReset);
+ if (tx_status & 0x38) {
+ netdev_dbg(dev, "transmit error: status 0x%02x\n", tx_status);
+ outw(TxEnable, ioaddr + EL3_CMD);
+ dev->stats.tx_aborted_errors++;
+ }
+ outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */
}
- outb(0x00, ioaddr + TX_STATUS); /* Pop the status stack. */
- }
}
static netdev_tx_t el3_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
- unsigned int ioaddr = dev->base_addr;
- struct el3_private *priv = netdev_priv(dev);
- unsigned long flags;
+ unsigned int ioaddr = dev->base_addr;
+ struct el3_private *priv = netdev_priv(dev);
+ unsigned long flags;
- netdev_dbg(dev, "el3_start_xmit(length = %ld) called, status %4.4x.\n",
+ netdev_dbg(dev, "el3_start_xmit(length = %ld) called, status %4.4x.\n",
(long)skb->len, inw(ioaddr + EL3_STATUS));
- spin_lock_irqsave(&priv->lock, flags);
+ spin_lock_irqsave(&priv->lock, flags);
- dev->stats.tx_bytes += skb->len;
+ dev->stats.tx_bytes += skb->len;
- /* Put out the doubleword header... */
- outw(skb->len, ioaddr + TX_FIFO);
- outw(0x00, ioaddr + TX_FIFO);
- /* ... and the packet rounded to a doubleword. */
- outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
+ /* Put out the doubleword header... */
+ outw(skb->len, ioaddr + TX_FIFO);
+ outw(0x00, ioaddr + TX_FIFO);
+ /* ... and the packet rounded to a doubleword. */
+ outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
- if (inw(ioaddr + TX_FREE) <= 1536) {
- netif_stop_queue(dev);
- /* Interrupt us when the FIFO has room for max-sized packet. */
- outw(SetTxThreshold + 1536, ioaddr + EL3_CMD);
- }
+ if (inw(ioaddr + TX_FREE) <= 1536) {
+ netif_stop_queue(dev);
+ /* Interrupt us when the FIFO has room for max-sized packet. */
+ outw(SetTxThreshold + 1536, ioaddr + EL3_CMD);
+ }
- pop_tx_status(dev);
- spin_unlock_irqrestore(&priv->lock, flags);
- dev_kfree_skb(skb);
+ pop_tx_status(dev);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ dev_kfree_skb(skb);
- return NETDEV_TX_OK;
+ return NETDEV_TX_OK;
}
/* The EL3 interrupt handler. */
static irqreturn_t el3_interrupt(int irq, void *dev_id)
{
- struct net_device *dev = (struct net_device *) dev_id;
- struct el3_private *lp = netdev_priv(dev);
- unsigned int ioaddr;
- __u16 status;
- int i = 0, handled = 1;
+ struct net_device *dev = (struct net_device *) dev_id;
+ struct el3_private *lp = netdev_priv(dev);
+ unsigned int ioaddr;
+ __u16 status;
+ int i = 0, handled = 1;
- if (!netif_device_present(dev))
- return IRQ_NONE;
+ if (!netif_device_present(dev))
+ return IRQ_NONE;
- ioaddr = dev->base_addr;
+ ioaddr = dev->base_addr;
- netdev_dbg(dev, "interrupt, status %4.4x.\n", inw(ioaddr + EL3_STATUS));
+ netdev_dbg(dev, "interrupt, status %4.4x.\n", inw(ioaddr + EL3_STATUS));
- spin_lock(&lp->lock);
- while ((status = inw(ioaddr + EL3_STATUS)) &
+ spin_lock(&lp->lock);
+ while ((status = inw(ioaddr + EL3_STATUS)) &
(IntLatch | RxComplete | StatsFull)) {
- if ((status & 0xe000) != 0x2000) {
- netdev_dbg(dev, "interrupt from dead card\n");
- handled = 0;
- break;
- }
- if (status & RxComplete)
- el3_rx(dev);
- if (status & TxAvailable) {
- netdev_dbg(dev, " TX room bit was handled.\n");
- /* There's room in the FIFO for a full-sized packet. */
- outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
- netif_wake_queue(dev);
- }
- if (status & TxComplete)
- pop_tx_status(dev);
- if (status & (AdapterFailure | RxEarly | StatsFull)) {
- /* Handle all uncommon interrupts. */
- if (status & StatsFull) /* Empty statistics. */
- update_stats(dev);
- if (status & RxEarly) { /* Rx early is unused. */
- el3_rx(dev);
- outw(AckIntr | RxEarly, ioaddr + EL3_CMD);
- }
- if (status & AdapterFailure) {
- u16 fifo_diag;
- EL3WINDOW(4);
- fifo_diag = inw(ioaddr + 4);
- EL3WINDOW(1);
- netdev_warn(dev, "adapter failure, FIFO diagnostic register %04x.\n",
+ if ((status & 0xe000) != 0x2000) {
+ netdev_dbg(dev, "interrupt from dead card\n");
+ handled = 0;
+ break;
+ }
+ if (status & RxComplete)
+ el3_rx(dev);
+ if (status & TxAvailable) {
+ netdev_dbg(dev, " TX room bit was handled.\n");
+ /* There's room in the FIFO for a full-sized packet. */
+ outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
+ netif_wake_queue(dev);
+ }
+ if (status & TxComplete)
+ pop_tx_status(dev);
+ if (status & (AdapterFailure | RxEarly | StatsFull)) {
+ /* Handle all uncommon interrupts. */
+ if (status & StatsFull) /* Empty statistics. */
+ update_stats(dev);
+ if (status & RxEarly) {
+ /* Rx early is unused. */
+ el3_rx(dev);
+ outw(AckIntr | RxEarly, ioaddr + EL3_CMD);
+ }
+ if (status & AdapterFailure) {
+ u16 fifo_diag;
+ EL3WINDOW(4);
+ fifo_diag = inw(ioaddr + 4);
+ EL3WINDOW(1);
+ netdev_warn(dev, "adapter failure, FIFO diagnostic register %04x.\n",
fifo_diag);
- if (fifo_diag & 0x0400) {
- /* Tx overrun */
- tc589_wait_for_completion(dev, TxReset);
- outw(TxEnable, ioaddr + EL3_CMD);
+ if (fifo_diag & 0x0400) {
+ /* Tx overrun */
+ tc589_wait_for_completion(dev, TxReset);
+ outw(TxEnable, ioaddr + EL3_CMD);
+ }
+ if (fifo_diag & 0x2000) {
+ /* Rx underrun */
+ tc589_wait_for_completion(dev, RxReset);
+ set_rx_mode(dev);
+ outw(RxEnable, ioaddr + EL3_CMD);
+ }
+ outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD);
+ }
}
- if (fifo_diag & 0x2000) {
- /* Rx underrun */
- tc589_wait_for_completion(dev, RxReset);
- set_rx_mode(dev);
- outw(RxEnable, ioaddr + EL3_CMD);
+ if (++i > 10) {
+ netdev_err(dev, "infinite loop in interrupt, status %4.4x.\n",
+ status);
+ /* Clear all interrupts */
+ outw(AckIntr | 0xFF, ioaddr + EL3_CMD);
+ break;
}
- outw(AckIntr | AdapterFailure, ioaddr + EL3_CMD);
- }
+ /* Acknowledge the IRQ. */
+ outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
}
- if (++i > 10) {
- netdev_err(dev, "infinite loop in interrupt, status %4.4x.\n",
- status);
- /* Clear all interrupts */
- outw(AckIntr | 0xFF, ioaddr + EL3_CMD);
- break;
- }
- /* Acknowledge the IRQ. */
- outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
- }
- lp->last_irq = jiffies;
- spin_unlock(&lp->lock);
- netdev_dbg(dev, "exiting interrupt, status %4.4x.\n",
- inw(ioaddr + EL3_STATUS));
- return IRQ_RETVAL(handled);
+ lp->last_irq = jiffies;
+ spin_unlock(&lp->lock);
+ netdev_dbg(dev, "exiting interrupt, status %4.4x.\n",
+ inw(ioaddr + EL3_STATUS));
+ return IRQ_RETVAL(handled);
}
static void media_check(unsigned long arg)
{
- struct net_device *dev = (struct net_device *)(arg);
- struct el3_private *lp = netdev_priv(dev);
- unsigned int ioaddr = dev->base_addr;
- u16 media, errs;
- unsigned long flags;
+ struct net_device *dev = (struct net_device *)(arg);
+ struct el3_private *lp = netdev_priv(dev);
+ unsigned int ioaddr = dev->base_addr;
+ u16 media, errs;
+ unsigned long flags;
- if (!netif_device_present(dev)) goto reschedule;
+ if (!netif_device_present(dev))
+ goto reschedule;
- /* Check for pending interrupt with expired latency timer: with
- this, we can limp along even if the interrupt is blocked */
- if ((inw(ioaddr + EL3_STATUS) & IntLatch) &&
+ /* Check for pending interrupt with expired latency timer: with
+ * this, we can limp along even if the interrupt is blocked
+ */
+ if ((inw(ioaddr + EL3_STATUS) & IntLatch) &&
(inb(ioaddr + EL3_TIMER) == 0xff)) {
- if (!lp->fast_poll)
- netdev_warn(dev, "interrupt(s) dropped!\n");
-
- local_irq_save(flags);
- el3_interrupt(dev->irq, dev);
- local_irq_restore(flags);
-
- lp->fast_poll = HZ;
- }
- if (lp->fast_poll) {
- lp->fast_poll--;
- lp->media.expires = jiffies + HZ/100;
- add_timer(&lp->media);
- return;
- }
-
- /* lp->lock guards the EL3 window. Window should always be 1 except
- when the lock is held */
- spin_lock_irqsave(&lp->lock, flags);
- EL3WINDOW(4);
- media = inw(ioaddr+WN4_MEDIA) & 0xc810;
-
- /* Ignore collisions unless we've had no irq's recently */
- if (time_before(jiffies, lp->last_irq + HZ)) {
- media &= ~0x0010;
- } else {
- /* Try harder to detect carrier errors */
- EL3WINDOW(6);
- outw(StatsDisable, ioaddr + EL3_CMD);
- errs = inb(ioaddr + 0);
- outw(StatsEnable, ioaddr + EL3_CMD);
- dev->stats.tx_carrier_errors += errs;
- if (errs || (lp->media_status & 0x0010)) media |= 0x0010;
- }
+ if (!lp->fast_poll)
+ netdev_warn(dev, "interrupt(s) dropped!\n");
+
+ local_irq_save(flags);
+ el3_interrupt(dev->irq, dev);
+ local_irq_restore(flags);
+
+ lp->fast_poll = HZ;
+ }
+ if (lp->fast_poll) {
+ lp->fast_poll--;
+ lp->media.expires = jiffies + HZ/100;
+ add_timer(&lp->media);
+ return;
+ }
+
+ /* lp->lock guards the EL3 window. Window should always be 1 except
+ * when the lock is held
+ */
+
+ spin_lock_irqsave(&lp->lock, flags);
+ EL3WINDOW(4);
+ media = inw(ioaddr+WN4_MEDIA) & 0xc810;
+
+ /* Ignore collisions unless we've had no irq's recently */
+ if (time_before(jiffies, lp->last_irq + HZ)) {
+ media &= ~0x0010;
+ } else {
+ /* Try harder to detect carrier errors */
+ EL3WINDOW(6);
+ outw(StatsDisable, ioaddr + EL3_CMD);
+ errs = inb(ioaddr + 0);
+ outw(StatsEnable, ioaddr + EL3_CMD);
+ dev->stats.tx_carrier_errors += errs;
+ if (errs || (lp->media_status & 0x0010))
+ media |= 0x0010;
+ }
- if (media != lp->media_status) {
- if ((media & lp->media_status & 0x8000) &&
- ((lp->media_status ^ media) & 0x0800))
+ if (media != lp->media_status) {
+ if ((media & lp->media_status & 0x8000) &&
+ ((lp->media_status ^ media) & 0x0800))
netdev_info(dev, "%s link beat\n",
- (lp->media_status & 0x0800 ? "lost" : "found"));
- else if ((media & lp->media_status & 0x4000) &&
+ (lp->media_status & 0x0800 ? "lost" : "found"));
+ else if ((media & lp->media_status & 0x4000) &&
((lp->media_status ^ media) & 0x0010))
netdev_info(dev, "coax cable %s\n",
- (lp->media_status & 0x0010 ? "ok" : "problem"));
- if (dev->if_port == 0) {
- if (media & 0x8000) {
- if (media & 0x0800)
- netdev_info(dev, "flipped to 10baseT\n");
- else
+ (lp->media_status & 0x0010 ? "ok" : "problem"));
+ if (dev->if_port == 0) {
+ if (media & 0x8000) {
+ if (media & 0x0800)
+ netdev_info(dev, "flipped to 10baseT\n");
+ else
tc589_set_xcvr(dev, 2);
- } else if (media & 0x4000) {
- if (media & 0x0010)
- tc589_set_xcvr(dev, 1);
- else
- netdev_info(dev, "flipped to 10base2\n");
- }
+ } else if (media & 0x4000) {
+ if (media & 0x0010)
+ tc589_set_xcvr(dev, 1);
+ else
+ netdev_info(dev, "flipped to 10base2\n");
+ }
+ }
+ lp->media_status = media;
}
- lp->media_status = media;
- }
- EL3WINDOW(1);
- spin_unlock_irqrestore(&lp->lock, flags);
+ EL3WINDOW(1);
+ spin_unlock_irqrestore(&lp->lock, flags);
reschedule:
- lp->media.expires = jiffies + HZ;
- add_timer(&lp->media);
+ lp->media.expires = jiffies + HZ;
+ add_timer(&lp->media);
}
static struct net_device_stats *el3_get_stats(struct net_device *dev)
{
- struct el3_private *lp = netdev_priv(dev);
- unsigned long flags;
- struct pcmcia_device *link = lp->p_dev;
+ struct el3_private *lp = netdev_priv(dev);
+ unsigned long flags;
+ struct pcmcia_device *link = lp->p_dev;
- if (pcmcia_dev_present(link)) {
- spin_lock_irqsave(&lp->lock, flags);
- update_stats(dev);
- spin_unlock_irqrestore(&lp->lock, flags);
- }
- return &dev->stats;
+ if (pcmcia_dev_present(link)) {
+ spin_lock_irqsave(&lp->lock, flags);
+ update_stats(dev);
+ spin_unlock_irqrestore(&lp->lock, flags);
+ }
+ return &dev->stats;
}
-/*
- Update statistics. We change to register window 6, so this should be run
- single-threaded if the device is active. This is expected to be a rare
- operation, and it's simpler for the rest of the driver to assume that
- window 1 is always valid rather than use a special window-state variable.
-
- Caller must hold the lock for this
+/* Update statistics. We change to register window 6, so this should be run
+* single-threaded if the device is active. This is expected to be a rare
+* operation, and it's simpler for the rest of the driver to assume that
+* window 1 is always valid rather than use a special window-state variable.
+*
+* Caller must hold the lock for this
*/
+
static void update_stats(struct net_device *dev)
{
- unsigned int ioaddr = dev->base_addr;
-
- netdev_dbg(dev, "updating the statistics.\n");
- /* Turn off statistics updates while reading. */
- outw(StatsDisable, ioaddr + EL3_CMD);
- /* Switch to the stats window, and read everything. */
- EL3WINDOW(6);
- dev->stats.tx_carrier_errors += inb(ioaddr + 0);
- dev->stats.tx_heartbeat_errors += inb(ioaddr + 1);
- /* Multiple collisions. */ inb(ioaddr + 2);
- dev->stats.collisions += inb(ioaddr + 3);
- dev->stats.tx_window_errors += inb(ioaddr + 4);
- dev->stats.rx_fifo_errors += inb(ioaddr + 5);
- dev->stats.tx_packets += inb(ioaddr + 6);
- /* Rx packets */ inb(ioaddr + 7);
- /* Tx deferrals */ inb(ioaddr + 8);
- /* Rx octets */ inw(ioaddr + 10);
- /* Tx octets */ inw(ioaddr + 12);
-
- /* Back to window 1, and turn statistics back on. */
- EL3WINDOW(1);
- outw(StatsEnable, ioaddr + EL3_CMD);
+ unsigned int ioaddr = dev->base_addr;
+
+ netdev_dbg(dev, "updating the statistics.\n");
+ /* Turn off statistics updates while reading. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+ /* Switch to the stats window, and read everything. */
+ EL3WINDOW(6);
+ dev->stats.tx_carrier_errors += inb(ioaddr + 0);
+ dev->stats.tx_heartbeat_errors += inb(ioaddr + 1);
+ /* Multiple collisions. */
+ inb(ioaddr + 2);
+ dev->stats.collisions += inb(ioaddr + 3);
+ dev->stats.tx_window_errors += inb(ioaddr + 4);
+ dev->stats.rx_fifo_errors += inb(ioaddr + 5);
+ dev->stats.tx_packets += inb(ioaddr + 6);
+ /* Rx packets */
+ inb(ioaddr + 7);
+ /* Tx deferrals */
+ inb(ioaddr + 8);
+ /* Rx octets */
+ inw(ioaddr + 10);
+ /* Tx octets */
+ inw(ioaddr + 12);
+
+ /* Back to window 1, and turn statistics back on. */
+ EL3WINDOW(1);
+ outw(StatsEnable, ioaddr + EL3_CMD);
}
static int el3_rx(struct net_device *dev)
{
- unsigned int ioaddr = dev->base_addr;
- int worklimit = 32;
- short rx_status;
+ unsigned int ioaddr = dev->base_addr;
+ int worklimit = 32;
+ short rx_status;
- netdev_dbg(dev, "in rx_packet(), status %4.4x, rx_status %4.4x.\n",
+ netdev_dbg(dev, "in rx_packet(), status %4.4x, rx_status %4.4x.\n",
inw(ioaddr+EL3_STATUS), inw(ioaddr+RX_STATUS));
- while (!((rx_status = inw(ioaddr + RX_STATUS)) & 0x8000) &&
+ while (!((rx_status = inw(ioaddr + RX_STATUS)) & 0x8000) &&
worklimit > 0) {
- worklimit--;
- if (rx_status & 0x4000) { /* Error, update stats. */
- short error = rx_status & 0x3800;
- dev->stats.rx_errors++;
- switch (error) {
- case 0x0000: dev->stats.rx_over_errors++; break;
- case 0x0800: dev->stats.rx_length_errors++; break;
- case 0x1000: dev->stats.rx_frame_errors++; break;
- case 0x1800: dev->stats.rx_length_errors++; break;
- case 0x2000: dev->stats.rx_frame_errors++; break;
- case 0x2800: dev->stats.rx_crc_errors++; break;
- }
- } else {
- short pkt_len = rx_status & 0x7ff;
- struct sk_buff *skb;
-
- skb = netdev_alloc_skb(dev, pkt_len + 5);
-
- netdev_dbg(dev, " Receiving packet size %d status %4.4x.\n",
+ worklimit--;
+ if (rx_status & 0x4000) { /* Error, update stats. */
+ short error = rx_status & 0x3800;
+ dev->stats.rx_errors++;
+ switch (error) {
+ case 0x0000:
+ dev->stats.rx_over_errors++;
+ break;
+ case 0x0800:
+ dev->stats.rx_length_errors++;
+ break;
+ case 0x1000:
+ dev->stats.rx_frame_errors++;
+ break;
+ case 0x1800:
+ dev->stats.rx_length_errors++;
+ break;
+ case 0x2000:
+ dev->stats.rx_frame_errors++;
+ break;
+ case 0x2800:
+ dev->stats.rx_crc_errors++;
+ break;
+ }
+ } else {
+ short pkt_len = rx_status & 0x7ff;
+ struct sk_buff *skb;
+
+ skb = netdev_alloc_skb(dev, pkt_len + 5);
+
+ netdev_dbg(dev, " Receiving packet size %d status %4.4x.\n",
pkt_len, rx_status);
- if (skb != NULL) {
- skb_reserve(skb, 2);
- insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len),
+ if (skb != NULL) {
+ skb_reserve(skb, 2);
+ insl(ioaddr+RX_FIFO, skb_put(skb, pkt_len),
(pkt_len+3)>>2);
- skb->protocol = eth_type_trans(skb, dev);
- netif_rx(skb);
- dev->stats.rx_packets++;
- dev->stats.rx_bytes += pkt_len;
- } else {
- netdev_dbg(dev, "couldn't allocate a sk_buff of size %d.\n",
+ skb->protocol = eth_type_trans(skb, dev);
+ netif_rx(skb);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += pkt_len;
+ } else {
+ netdev_dbg(dev, "couldn't allocate a sk_buff of size %d.\n",
pkt_len);
- dev->stats.rx_dropped++;
- }
+ dev->stats.rx_dropped++;
+ }
+ }
+ /* Pop the top of the Rx FIFO */
+ tc589_wait_for_completion(dev, RxDiscard);
}
- /* Pop the top of the Rx FIFO */
- tc589_wait_for_completion(dev, RxDiscard);
- }
- if (worklimit == 0)
- netdev_warn(dev, "too much work in el3_rx!\n");
- return 0;
+ if (worklimit == 0)
+ netdev_warn(dev, "too much work in el3_rx!\n");
+ return 0;
}
static void set_rx_mode(struct net_device *dev)
{
- unsigned int ioaddr = dev->base_addr;
- u16 opts = SetRxFilter | RxStation | RxBroadcast;
-
- if (dev->flags & IFF_PROMISC)
- opts |= RxMulticast | RxProm;
- else if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI))
- opts |= RxMulticast;
- outw(opts, ioaddr + EL3_CMD);
+ unsigned int ioaddr = dev->base_addr;
+ u16 opts = SetRxFilter | RxStation | RxBroadcast;
+
+ if (dev->flags & IFF_PROMISC)
+ opts |= RxMulticast | RxProm;
+ else if (!netdev_mc_empty(dev) || (dev->flags & IFF_ALLMULTI))
+ opts |= RxMulticast;
+ outw(opts, ioaddr + EL3_CMD);
}
static void set_multicast_list(struct net_device *dev)
@@ -867,44 +908,44 @@ static void set_multicast_list(struct net_device *dev)
static int el3_close(struct net_device *dev)
{
- struct el3_private *lp = netdev_priv(dev);
- struct pcmcia_device *link = lp->p_dev;
- unsigned int ioaddr = dev->base_addr;
-
- dev_dbg(&link->dev, "%s: shutting down ethercard.\n", dev->name);
+ struct el3_private *lp = netdev_priv(dev);
+ struct pcmcia_device *link = lp->p_dev;
+ unsigned int ioaddr = dev->base_addr;
+
+ dev_dbg(&link->dev, "%s: shutting down ethercard.\n", dev->name);
+
+ if (pcmcia_dev_present(link)) {
+ /* Turn off statistics ASAP. We update dev->stats below. */
+ outw(StatsDisable, ioaddr + EL3_CMD);
+
+ /* Disable the receiver and transmitter. */
+ outw(RxDisable, ioaddr + EL3_CMD);
+ outw(TxDisable, ioaddr + EL3_CMD);
+
+ if (dev->if_port == 2)
+ /* Turn off thinnet power. Green! */
+ outw(StopCoax, ioaddr + EL3_CMD);
+ else if (dev->if_port == 1) {
+ /* Disable link beat and jabber */
+ EL3WINDOW(4);
+ outw(0, ioaddr + WN4_MEDIA);
+ }
- if (pcmcia_dev_present(link)) {
- /* Turn off statistics ASAP. We update dev->stats below. */
- outw(StatsDisable, ioaddr + EL3_CMD);
+ /* Switching back to window 0 disables the IRQ. */
+ EL3WINDOW(0);
+ /* But we explicitly zero the IRQ line select anyway. */
+ outw(0x0f00, ioaddr + WN0_IRQ);
- /* Disable the receiver and transmitter. */
- outw(RxDisable, ioaddr + EL3_CMD);
- outw(TxDisable, ioaddr + EL3_CMD);
-
- if (dev->if_port == 2)
- /* Turn off thinnet power. Green! */
- outw(StopCoax, ioaddr + EL3_CMD);
- else if (dev->if_port == 1) {
- /* Disable link beat and jabber */
- EL3WINDOW(4);
- outw(0, ioaddr + WN4_MEDIA);
+ /* Check if the card still exists */
+ if ((inw(ioaddr+EL3_STATUS) & 0xe000) == 0x2000)
+ update_stats(dev);
}
- /* Switching back to window 0 disables the IRQ. */
- EL3WINDOW(0);
- /* But we explicitly zero the IRQ line select anyway. */
- outw(0x0f00, ioaddr + WN0_IRQ);
-
- /* Check if the card still exists */
- if ((inw(ioaddr+EL3_STATUS) & 0xe000) == 0x2000)
- update_stats(dev);
- }
-
- link->open--;
- netif_stop_queue(dev);
- del_timer_sync(&lp->media);
+ link->open--;
+ netif_stop_queue(dev);
+ del_timer_sync(&lp->media);
- return 0;
+ return 0;
}
static const struct pcmcia_device_id tc589_ids[] = {
diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c
index 9339ccc..2ae00ed 100644
--- a/drivers/net/ethernet/amd/pcnet32.c
+++ b/drivers/net/ethernet/amd/pcnet32.c
@@ -549,35 +549,35 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
struct pcnet32_rx_head *new_rx_ring;
struct sk_buff **new_skb_list;
int new, overlap;
+ unsigned int entries = 1 << size;
new_rx_ring = pci_alloc_consistent(lp->pci_dev,
sizeof(struct pcnet32_rx_head) *
- (1 << size),
+ entries,
&new_ring_dma_addr);
if (new_rx_ring == NULL) {
netif_err(lp, drv, dev, "Consistent memory allocation failed\n");
return;
}
- memset(new_rx_ring, 0, sizeof(struct pcnet32_rx_head) * (1 << size));
+ memset(new_rx_ring, 0, sizeof(struct pcnet32_rx_head) * entries);
- new_dma_addr_list = kcalloc(1 << size, sizeof(dma_addr_t), GFP_ATOMIC);
+ new_dma_addr_list = kcalloc(entries, sizeof(dma_addr_t), GFP_ATOMIC);
if (!new_dma_addr_list)
goto free_new_rx_ring;
- new_skb_list = kcalloc(1 << size, sizeof(struct sk_buff *),
- GFP_ATOMIC);
+ new_skb_list = kcalloc(entries, sizeof(struct sk_buff *), GFP_ATOMIC);
if (!new_skb_list)
goto free_new_lists;
/* first copy the current receive buffers */
- overlap = min(size, lp->rx_ring_size);
+ overlap = min(entries, lp->rx_ring_size);
for (new = 0; new < overlap; new++) {
new_rx_ring[new] = lp->rx_ring[new];
new_dma_addr_list[new] = lp->rx_dma_addr[new];
new_skb_list[new] = lp->rx_skbuff[new];
}
/* now allocate any new buffers needed */
- for (; new < size; new++) {
+ for (; new < entries; new++) {
struct sk_buff *rx_skbuff;
new_skb_list[new] = netdev_alloc_skb(dev, PKT_BUF_SKB);
rx_skbuff = new_skb_list[new];
@@ -592,6 +592,13 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
new_dma_addr_list[new] =
pci_map_single(lp->pci_dev, rx_skbuff->data,
PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(lp->pci_dev,
+ new_dma_addr_list[new])) {
+ netif_err(lp, drv, dev, "%s dma mapping failed\n",
+ __func__);
+ dev_kfree_skb(new_skb_list[new]);
+ goto free_all_new;
+ }
new_rx_ring[new].base = cpu_to_le32(new_dma_addr_list[new]);
new_rx_ring[new].buf_length = cpu_to_le16(NEG_BUF_SIZE);
new_rx_ring[new].status = cpu_to_le16(0x8000);
@@ -599,8 +606,12 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
/* and free any unneeded buffers */
for (; new < lp->rx_ring_size; new++) {
if (lp->rx_skbuff[new]) {
- pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[new],
- PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ if (!pci_dma_mapping_error(lp->pci_dev,
+ lp->rx_dma_addr[new]))
+ pci_unmap_single(lp->pci_dev,
+ lp->rx_dma_addr[new],
+ PKT_BUF_SIZE,
+ PCI_DMA_FROMDEVICE);
dev_kfree_skb(lp->rx_skbuff[new]);
}
}
@@ -612,7 +623,7 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
lp->rx_ring_size, lp->rx_ring,
lp->rx_ring_dma_addr);
- lp->rx_ring_size = (1 << size);
+ lp->rx_ring_size = entries;
lp->rx_mod_mask = lp->rx_ring_size - 1;
lp->rx_len_bits = (size << 4);
lp->rx_ring = new_rx_ring;
@@ -624,8 +635,12 @@ static void pcnet32_realloc_rx_ring(struct net_device *dev,
free_all_new:
while (--new >= lp->rx_ring_size) {
if (new_skb_list[new]) {
- pci_unmap_single(lp->pci_dev, new_dma_addr_list[new],
- PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ if (!pci_dma_mapping_error(lp->pci_dev,
+ new_dma_addr_list[new]))
+ pci_unmap_single(lp->pci_dev,
+ new_dma_addr_list[new],
+ PKT_BUF_SIZE,
+ PCI_DMA_FROMDEVICE);
dev_kfree_skb(new_skb_list[new]);
}
}
@@ -634,8 +649,7 @@ free_new_lists:
kfree(new_dma_addr_list);
free_new_rx_ring:
pci_free_consistent(lp->pci_dev,
- sizeof(struct pcnet32_rx_head) *
- (1 << size),
+ sizeof(struct pcnet32_rx_head) * entries,
new_rx_ring,
new_ring_dma_addr);
}
@@ -650,8 +664,12 @@ static void pcnet32_purge_rx_ring(struct net_device *dev)
lp->rx_ring[i].status = 0; /* CPU owns buffer */
wmb(); /* Make sure adapter sees owner change */
if (lp->rx_skbuff[i]) {
- pci_unmap_single(lp->pci_dev, lp->rx_dma_addr[i],
- PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ if (!pci_dma_mapping_error(lp->pci_dev,
+ lp->rx_dma_addr[i]))
+ pci_unmap_single(lp->pci_dev,
+ lp->rx_dma_addr[i],
+ PKT_BUF_SIZE,
+ PCI_DMA_FROMDEVICE);
dev_kfree_skb_any(lp->rx_skbuff[i]);
}
lp->rx_skbuff[i] = NULL;
@@ -930,6 +948,12 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
lp->tx_dma_addr[x] =
pci_map_single(lp->pci_dev, skb->data, skb->len,
PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(lp->pci_dev, lp->tx_dma_addr[x])) {
+ netif_printk(lp, hw, KERN_DEBUG, dev,
+ "DMA mapping error at line: %d!\n",
+ __LINE__);
+ goto clean_up;
+ }
lp->tx_ring[x].base = cpu_to_le32(lp->tx_dma_addr[x]);
wmb(); /* Make sure owner changes after all others are visible */
lp->tx_ring[x].status = cpu_to_le16(status);
@@ -1142,24 +1166,36 @@ static void pcnet32_rx_entry(struct net_device *dev,
if (pkt_len > rx_copybreak) {
struct sk_buff *newskb;
+ dma_addr_t new_dma_addr;
newskb = netdev_alloc_skb(dev, PKT_BUF_SKB);
+ /*
+ * map the new buffer, if mapping fails, drop the packet and
+ * reuse the old buffer
+ */
if (newskb) {
skb_reserve(newskb, NET_IP_ALIGN);
- skb = lp->rx_skbuff[entry];
- pci_unmap_single(lp->pci_dev,
- lp->rx_dma_addr[entry],
- PKT_BUF_SIZE,
- PCI_DMA_FROMDEVICE);
- skb_put(skb, pkt_len);
- lp->rx_skbuff[entry] = newskb;
- lp->rx_dma_addr[entry] =
- pci_map_single(lp->pci_dev,
- newskb->data,
- PKT_BUF_SIZE,
- PCI_DMA_FROMDEVICE);
- rxp->base = cpu_to_le32(lp->rx_dma_addr[entry]);
- rx_in_place = 1;
+ new_dma_addr = pci_map_single(lp->pci_dev,
+ newskb->data,
+ PKT_BUF_SIZE,
+ PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(lp->pci_dev, new_dma_addr)) {
+ netif_err(lp, rx_err, dev,
+ "DMA mapping error.\n");
+ dev_kfree_skb(newskb);
+ skb = NULL;
+ } else {
+ skb = lp->rx_skbuff[entry];
+ pci_unmap_single(lp->pci_dev,
+ lp->rx_dma_addr[entry],
+ PKT_BUF_SIZE,
+ PCI_DMA_FROMDEVICE);
+ skb_put(skb, pkt_len);
+ lp->rx_skbuff[entry] = newskb;
+ lp->rx_dma_addr[entry] = new_dma_addr;
+ rxp->base = cpu_to_le32(new_dma_addr);
+ rx_in_place = 1;
+ }
} else
skb = NULL;
} else
@@ -2229,9 +2265,12 @@ static void pcnet32_purge_tx_ring(struct net_device *dev)
lp->tx_ring[i].status = 0; /* CPU owns buffer */
wmb(); /* Make sure adapter sees owner change */
if (lp->tx_skbuff[i]) {
- pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i],
- lp->tx_skbuff[i]->len,
- PCI_DMA_TODEVICE);
+ if (!pci_dma_mapping_error(lp->pci_dev,
+ lp->tx_dma_addr[i]))
+ pci_unmap_single(lp->pci_dev,
+ lp->tx_dma_addr[i],
+ lp->tx_skbuff[i]->len,
+ PCI_DMA_TODEVICE);
dev_kfree_skb_any(lp->tx_skbuff[i]);
}
lp->tx_skbuff[i] = NULL;
@@ -2264,10 +2303,19 @@ static int pcnet32_init_ring(struct net_device *dev)
}
rmb();
- if (lp->rx_dma_addr[i] == 0)
+ if (lp->rx_dma_addr[i] == 0) {
lp->rx_dma_addr[i] =
pci_map_single(lp->pci_dev, rx_skbuff->data,
PKT_BUF_SIZE, PCI_DMA_FROMDEVICE);
+ if (pci_dma_mapping_error(lp->pci_dev,
+ lp->rx_dma_addr[i])) {
+ /* there is not much we can do at this point */
+ netif_err(lp, drv, dev,
+ "%s pci dma mapping error\n",
+ __func__);
+ return -1;
+ }
+ }
lp->rx_ring[i].base = cpu_to_le32(lp->rx_dma_addr[i]);
lp->rx_ring[i].buf_length = cpu_to_le16(NEG_BUF_SIZE);
wmb(); /* Make sure owner changes after all others are visible */
@@ -2397,9 +2445,14 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
lp->tx_ring[entry].misc = 0x00000000;
- lp->tx_skbuff[entry] = skb;
lp->tx_dma_addr[entry] =
pci_map_single(lp->pci_dev, skb->data, skb->len, PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(lp->pci_dev, lp->tx_dma_addr[entry])) {
+ dev_kfree_skb(skb);
+ dev->stats.tx_dropped++;
+ goto drop_packet;
+ }
+ lp->tx_skbuff[entry] = skb;
lp->tx_ring[entry].base = cpu_to_le32(lp->tx_dma_addr[entry]);
wmb(); /* Make sure owner changes after all others are visible */
lp->tx_ring[entry].status = cpu_to_le16(status);
@@ -2414,6 +2467,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
lp->tx_full = 1;
netif_stop_queue(dev);
}
+drop_packet:
spin_unlock_irqrestore(&lp->lock, flags);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index 3f97d9f..85dbddd 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -60,6 +60,17 @@ config BCM63XX_ENET
This driver supports the ethernet MACs in the Broadcom 63xx
MIPS chipset family (BCM63XX).
+config BCMGENET
+ tristate "Broadcom GENET internal MAC support"
+ depends on OF
+ select MII
+ select PHYLIB
+ select FIXED_PHY if BCMGENET=y
+ select BCM7XXX_PHY
+ help
+ This driver supports the built-in Ethernet MACs found in the
+ Broadcom BCM7xxx Set Top Box family chipset.
+
config BNX2
tristate "Broadcom NetXtremeII support"
depends on PCI
diff --git a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile
index 68efa1a..fd639a0 100644
--- a/drivers/net/ethernet/broadcom/Makefile
+++ b/drivers/net/ethernet/broadcom/Makefile
@@ -4,6 +4,7 @@
obj-$(CONFIG_B44) += b44.o
obj-$(CONFIG_BCM63XX_ENET) += bcm63xx_enet.o
+obj-$(CONFIG_BCMGENET) += genet/
obj-$(CONFIG_BNX2) += bnx2.o
obj-$(CONFIG_CNIC) += cnic.o
obj-$(CONFIG_BNX2X) += bnx2x/
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index cda25ac..ca6b362 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -6206,7 +6206,7 @@ bnx2_free_irq(struct bnx2 *bp)
static void
bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
{
- int i, total_vecs, rc;
+ int i, total_vecs;
struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
struct net_device *dev = bp->dev;
const int len = sizeof(bp->irq_tbl[0].name);
@@ -6229,16 +6229,9 @@ bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
#ifdef BCM_CNIC
total_vecs++;
#endif
- rc = -ENOSPC;
- while (total_vecs >= BNX2_MIN_MSIX_VEC) {
- rc = pci_enable_msix(bp->pdev, msix_ent, total_vecs);
- if (rc <= 0)
- break;
- if (rc > 0)
- total_vecs = rc;
- }
-
- if (rc != 0)
+ total_vecs = pci_enable_msix_range(bp->pdev, msix_ent,
+ BNX2_MIN_MSIX_VEC, total_vecs);
+ if (total_vecs < 0)
return;
msix_vecs = total_vecs;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 391f29e..7221609 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -26,8 +26,8 @@
* (you will need to reboot afterwards) */
/* #define BNX2X_STOP_ON_ERROR */
-#define DRV_MODULE_VERSION "1.78.17-0"
-#define DRV_MODULE_RELDATE "2013/04/11"
+#define DRV_MODULE_VERSION "1.78.19-0"
+#define DRV_MODULE_RELDATE "2014/02/10"
#define BNX2X_BC_VER 0x040200
#if defined(CONFIG_DCB)
@@ -75,13 +75,22 @@ enum bnx2x_int_mode {
#define BNX2X_MSG_DCB 0x8000000
/* regular debug print */
+#define DP_INNER(fmt, ...) \
+ pr_notice("[%s:%d(%s)]" fmt, \
+ __func__, __LINE__, \
+ bp->dev ? (bp->dev->name) : "?", \
+ ##__VA_ARGS__);
+
#define DP(__mask, fmt, ...) \
do { \
if (unlikely(bp->msg_enable & (__mask))) \
- pr_notice("[%s:%d(%s)]" fmt, \
- __func__, __LINE__, \
- bp->dev ? (bp->dev->name) : "?", \
- ##__VA_ARGS__); \
+ DP_INNER(fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define DP_AND(__mask, fmt, ...) \
+do { \
+ if (unlikely((bp->msg_enable & (__mask)) == __mask)) \
+ DP_INNER(fmt, ##__VA_ARGS__); \
} while (0)
#define DP_CONT(__mask, fmt, ...) \
@@ -1261,6 +1270,7 @@ struct bnx2x_slowpath {
union {
struct client_init_ramrod_data init_data;
struct client_update_ramrod_data update_data;
+ struct tpa_update_ramrod_data tpa_data;
} q_rdata;
union {
@@ -1392,7 +1402,7 @@ struct bnx2x_fw_stats_data {
};
/* Public slow path states */
-enum {
+enum sp_rtnl_flag {
BNX2X_SP_RTNL_SETUP_TC,
BNX2X_SP_RTNL_TX_TIMEOUT,
BNX2X_SP_RTNL_FAN_FAILURE,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index dbcff50..117b5c7 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -61,10 +61,14 @@ static void bnx2x_add_all_napi(struct bnx2x *bp)
static int bnx2x_calc_num_queues(struct bnx2x *bp)
{
- return bnx2x_num_queues ?
- min_t(int, bnx2x_num_queues, BNX2X_MAX_QUEUES(bp)) :
- min_t(int, netif_get_num_default_rss_queues(),
- BNX2X_MAX_QUEUES(bp));
+ int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();
+
+ /* Reduce memory usage in kdump environment by using only one queue */
+ if (reset_devices)
+ nq = 1;
+
+ nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
+ return nq;
}
/**
@@ -1638,36 +1642,16 @@ int bnx2x_enable_msix(struct bnx2x *bp)
DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
msix_vec);
- rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], msix_vec);
-
+ rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
+ BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
/*
* reconfigure number of tx/rx queues according to available
* MSI-X vectors
*/
- if (rc >= BNX2X_MIN_MSIX_VEC_CNT(bp)) {
- /* how less vectors we will have? */
- int diff = msix_vec - rc;
-
- BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
-
- rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
-
- if (rc) {
- BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
- goto no_msix;
- }
- /*
- * decrease number of queues by number of unallocated entries
- */
- bp->num_ethernet_queues -= diff;
- bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
-
- BNX2X_DEV_INFO("New queue configuration set: %d\n",
- bp->num_queues);
- } else if (rc > 0) {
+ if (rc == -ENOSPC) {
/* Get by with single vector */
- rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], 1);
- if (rc) {
+ rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
+ if (rc < 0) {
BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
rc);
goto no_msix;
@@ -1680,8 +1664,22 @@ int bnx2x_enable_msix(struct bnx2x *bp)
bp->num_ethernet_queues = 1;
bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
} else if (rc < 0) {
- BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
+ BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
goto no_msix;
+ } else if (rc < msix_vec) {
+ /* how less vectors we will have? */
+ int diff = msix_vec - rc;
+
+ BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
+
+ /*
+ * decrease number of queues by number of unallocated entries
+ */
+ bp->num_ethernet_queues -= diff;
+ bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
+
+ BNX2X_DEV_INFO("New queue configuration set: %d\n",
+ bp->num_queues);
}
bp->flags |= USING_MSIX_FLAG;
@@ -2234,8 +2232,10 @@ static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
sizeof(struct per_queue_stats) * num_queue_stats +
sizeof(struct stats_counter);
- BNX2X_PCI_ALLOC(bp->fw_stats, &bp->fw_stats_mapping,
- bp->fw_stats_data_sz + bp->fw_stats_req_sz);
+ bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping,
+ bp->fw_stats_data_sz + bp->fw_stats_req_sz);
+ if (!bp->fw_stats)
+ goto alloc_mem_err;
/* Set shortcuts */
bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
@@ -4370,14 +4370,17 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
if (!IS_FCOE_IDX(index)) {
/* status blocks */
- if (!CHIP_IS_E1x(bp))
- BNX2X_PCI_ALLOC(sb->e2_sb,
- &bnx2x_fp(bp, index, status_blk_mapping),
- sizeof(struct host_hc_status_block_e2));
- else
- BNX2X_PCI_ALLOC(sb->e1x_sb,
- &bnx2x_fp(bp, index, status_blk_mapping),
- sizeof(struct host_hc_status_block_e1x));
+ if (!CHIP_IS_E1x(bp)) {
+ sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
+ sizeof(struct host_hc_status_block_e2));
+ if (!sb->e2_sb)
+ goto alloc_mem_err;
+ } else {
+ sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
+ sizeof(struct host_hc_status_block_e1x));
+ if (!sb->e1x_sb)
+ goto alloc_mem_err;
+ }
}
/* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
@@ -4396,35 +4399,49 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
"allocating tx memory of fp %d cos %d\n",
index, cos);
- BNX2X_ALLOC(txdata->tx_buf_ring,
- sizeof(struct sw_tx_bd) * NUM_TX_BD);
- BNX2X_PCI_ALLOC(txdata->tx_desc_ring,
- &txdata->tx_desc_mapping,
- sizeof(union eth_tx_bd_types) * NUM_TX_BD);
+ txdata->tx_buf_ring = kcalloc(NUM_TX_BD,
+ sizeof(struct sw_tx_bd),
+ GFP_KERNEL);
+ if (!txdata->tx_buf_ring)
+ goto alloc_mem_err;
+ txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping,
+ sizeof(union eth_tx_bd_types) * NUM_TX_BD);
+ if (!txdata->tx_desc_ring)
+ goto alloc_mem_err;
}
}
/* Rx */
if (!skip_rx_queue(bp, index)) {
/* fastpath rx rings: rx_buf rx_desc rx_comp */
- BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
- sizeof(struct sw_rx_bd) * NUM_RX_BD);
- BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
- &bnx2x_fp(bp, index, rx_desc_mapping),
- sizeof(struct eth_rx_bd) * NUM_RX_BD);
+ bnx2x_fp(bp, index, rx_buf_ring) =
+ kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL);
+ if (!bnx2x_fp(bp, index, rx_buf_ring))
+ goto alloc_mem_err;
+ bnx2x_fp(bp, index, rx_desc_ring) =
+ BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping),
+ sizeof(struct eth_rx_bd) * NUM_RX_BD);
+ if (!bnx2x_fp(bp, index, rx_desc_ring))
+ goto alloc_mem_err;
/* Seed all CQEs by 1s */
- BNX2X_PCI_FALLOC(bnx2x_fp(bp, index, rx_comp_ring),
- &bnx2x_fp(bp, index, rx_comp_mapping),
- sizeof(struct eth_fast_path_rx_cqe) *
- NUM_RCQ_BD);
+ bnx2x_fp(bp, index, rx_comp_ring) =
+ BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping),
+ sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD);
+ if (!bnx2x_fp(bp, index, rx_comp_ring))
+ goto alloc_mem_err;
/* SGE ring */
- BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
- sizeof(struct sw_rx_page) * NUM_RX_SGE);
- BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
- &bnx2x_fp(bp, index, rx_sge_mapping),
- BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
+ bnx2x_fp(bp, index, rx_page_ring) =
+ kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page),
+ GFP_KERNEL);
+ if (!bnx2x_fp(bp, index, rx_page_ring))
+ goto alloc_mem_err;
+ bnx2x_fp(bp, index, rx_sge_ring) =
+ BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping),
+ BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
+ if (!bnx2x_fp(bp, index, rx_sge_ring))
+ goto alloc_mem_err;
/* RX BD ring */
bnx2x_set_next_page_rx_bd(fp);
@@ -4780,12 +4797,8 @@ void bnx2x_tx_timeout(struct net_device *dev)
bnx2x_panic();
#endif
- smp_mb__before_clear_bit();
- set_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state);
- smp_mb__after_clear_bit();
-
/* This allows the netif to be shutdown gracefully before resetting */
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
}
int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
@@ -4913,3 +4926,15 @@ void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
disable = disable ? 1 : (usec ? 0 : 1);
storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
}
+
+void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
+ u32 verbose)
+{
+ smp_mb__before_clear_bit();
+ set_bit(flag, &bp->sp_rtnl_state);
+ smp_mb__after_clear_bit();
+ DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
+ flag);
+ schedule_delayed_work(&bp->sp_rtnl_task, 0);
+}
+EXPORT_SYMBOL(bnx2x_schedule_sp_rtnl);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index a89a40f..05f4f5f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -47,31 +47,26 @@ extern int bnx2x_num_queues;
} \
} while (0)
-#define BNX2X_PCI_ALLOC(x, y, size) \
- do { \
- x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
- if (x == NULL) \
- goto alloc_mem_err; \
- DP(NETIF_MSG_HW, "BNX2X_PCI_ALLOC: Physical %Lx Virtual %p\n", \
- (unsigned long long)(*y), x); \
- } while (0)
-
-#define BNX2X_PCI_FALLOC(x, y, size) \
- do { \
- x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
- if (x == NULL) \
- goto alloc_mem_err; \
- memset((void *)x, 0xFFFFFFFF, size); \
- DP(NETIF_MSG_HW, "BNX2X_PCI_FALLOC: Physical %Lx Virtual %p\n",\
- (unsigned long long)(*y), x); \
- } while (0)
-
-#define BNX2X_ALLOC(x, size) \
- do { \
- x = kzalloc(size, GFP_KERNEL); \
- if (x == NULL) \
- goto alloc_mem_err; \
- } while (0)
+#define BNX2X_PCI_ALLOC(y, size) \
+({ \
+ void *x = dma_zalloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
+ if (x) \
+ DP(NETIF_MSG_HW, \
+ "BNX2X_PCI_ALLOC: Physical %Lx Virtual %p\n", \
+ (unsigned long long)(*y), x); \
+ x; \
+})
+#define BNX2X_PCI_FALLOC(y, size) \
+({ \
+ void *x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
+ if (x) { \
+ memset(x, 0xff, size); \
+ DP(NETIF_MSG_HW, \
+ "BNX2X_PCI_FALLOC: Physical %Lx Virtual %p\n", \
+ (unsigned long long)(*y), x); \
+ } \
+ x; \
+})
/*********************** Interfaces ****************************
* Functions that need to be implemented by each driver version
@@ -1324,4 +1319,7 @@ void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len);
int bnx2x_drain_tx_queues(struct bnx2x *bp);
void bnx2x_squeeze_objects(struct bnx2x *bp);
+void bnx2x_schedule_sp_rtnl(struct bnx2x*, enum sp_rtnl_flag,
+ u32 verbose);
+
#endif /* BNX2X_CMN_H */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
index fdace20..97ea542 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
@@ -710,8 +710,7 @@ static inline void bnx2x_dcbx_update_tc_mapping(struct bnx2x *bp)
* as we are handling an attention on a work queue which must be
* flushed at some rtnl-locked contexts (e.g. if down)
*/
- if (!test_and_set_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_SETUP_TC, 0);
}
void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
@@ -764,10 +763,7 @@ void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
if (IS_MF(bp))
bnx2x_link_sync_notify(bp);
- set_bit(BNX2X_SP_RTNL_TX_STOP, &bp->sp_rtnl_state);
-
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
-
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_STOP, 0);
return;
}
case BNX2X_DCBX_STATE_TX_PAUSED:
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
index 84aecdf..95dc365 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_fw_defs.h
@@ -87,7 +87,6 @@
(IRO[156].base + ((vfId) * IRO[156].m1))
#define CSTORM_VF_TO_PF_OFFSET(funcId) \
(IRO[150].base + ((funcId) * IRO[150].m1))
-#define TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET (IRO[204].base)
#define TSTORM_APPROXIMATE_MATCH_MULTICAST_FILTERING_OFFSET(pfId) \
(IRO[203].base + ((pfId) * IRO[203].m1))
#define TSTORM_ASSERT_LIST_INDEX_OFFSET (IRO[102].base)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
index cf1df8b..46e2f18 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h
@@ -2848,7 +2848,7 @@ struct afex_stats {
#define BCM_5710_FW_MAJOR_VERSION 7
#define BCM_5710_FW_MINOR_VERSION 8
-#define BCM_5710_FW_REVISION_VERSION 17
+#define BCM_5710_FW_REVISION_VERSION 19
#define BCM_5710_FW_ENGINEERING_VERSION 0
#define BCM_5710_FW_COMPILE_FLAGS 1
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 7d43822..5e74599 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -918,7 +918,7 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
u16 start = 0, end = 0;
u8 cos;
#endif
- if (disable_int)
+ if (IS_PF(bp) && disable_int)
bnx2x_int_disable(bp);
bp->stats_state = STATS_STATE_DISABLED;
@@ -929,33 +929,41 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
/* Indices */
/* Common */
- BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
- bp->def_idx, bp->def_att_idx, bp->attn_state,
- bp->spq_prod_idx, bp->stats_counter);
- BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
- bp->def_status_blk->atten_status_block.attn_bits,
- bp->def_status_blk->atten_status_block.attn_bits_ack,
- bp->def_status_blk->atten_status_block.status_block_id,
- bp->def_status_blk->atten_status_block.attn_bits_index);
- BNX2X_ERR(" def (");
- for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
- pr_cont("0x%x%s",
- bp->def_status_blk->sp_sb.index_values[i],
- (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
-
- for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
- *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
- CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
- i*sizeof(u32));
-
- pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n",
- sp_sb_data.igu_sb_id,
- sp_sb_data.igu_seg_id,
- sp_sb_data.p_func.pf_id,
- sp_sb_data.p_func.vnic_id,
- sp_sb_data.p_func.vf_id,
- sp_sb_data.p_func.vf_valid,
- sp_sb_data.state);
+ if (IS_PF(bp)) {
+ struct host_sp_status_block *def_sb = bp->def_status_blk;
+ int data_size, cstorm_offset;
+
+ BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
+ bp->def_idx, bp->def_att_idx, bp->attn_state,
+ bp->spq_prod_idx, bp->stats_counter);
+ BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
+ def_sb->atten_status_block.attn_bits,
+ def_sb->atten_status_block.attn_bits_ack,
+ def_sb->atten_status_block.status_block_id,
+ def_sb->atten_status_block.attn_bits_index);
+ BNX2X_ERR(" def (");
+ for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
+ pr_cont("0x%x%s",
+ def_sb->sp_sb.index_values[i],
+ (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
+
+ data_size = sizeof(struct hc_sp_status_block_data) /
+ sizeof(u32);
+ cstorm_offset = CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func);
+ for (i = 0; i < data_size; i++)
+ *((u32 *)&sp_sb_data + i) =
+ REG_RD(bp, BAR_CSTRORM_INTMEM + cstorm_offset +
+ i * sizeof(u32));
+
+ pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n",
+ sp_sb_data.igu_sb_id,
+ sp_sb_data.igu_seg_id,
+ sp_sb_data.p_func.pf_id,
+ sp_sb_data.p_func.vnic_id,
+ sp_sb_data.p_func.vf_id,
+ sp_sb_data.p_func.vf_valid,
+ sp_sb_data.state);
+ }
for_each_eth_queue(bp, i) {
struct bnx2x_fastpath *fp = &bp->fp[i];
@@ -1013,6 +1021,11 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
pr_cont("0x%x%s",
fp->sb_index_values[j],
(j == loop - 1) ? ")" : " ");
+
+ /* VF cannot access FW refelection for status block */
+ if (IS_VF(bp))
+ continue;
+
/* fw sb data */
data_size = CHIP_IS_E1x(bp) ?
sizeof(struct hc_status_block_data_e1x) :
@@ -1064,16 +1077,18 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
}
#ifdef BNX2X_STOP_ON_ERROR
-
- /* event queue */
- BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod);
- for (i = 0; i < NUM_EQ_DESC; i++) {
- u32 *data = (u32 *)&bp->eq_ring[i].message.data;
-
- BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n",
- i, bp->eq_ring[i].message.opcode,
- bp->eq_ring[i].message.error);
- BNX2X_ERR("data: %x %x %x\n", data[0], data[1], data[2]);
+ if (IS_PF(bp)) {
+ /* event queue */
+ BNX2X_ERR("eq cons %x prod %x\n", bp->eq_cons, bp->eq_prod);
+ for (i = 0; i < NUM_EQ_DESC; i++) {
+ u32 *data = (u32 *)&bp->eq_ring[i].message.data;
+
+ BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n",
+ i, bp->eq_ring[i].message.opcode,
+ bp->eq_ring[i].message.error);
+ BNX2X_ERR("data: %x %x %x\n",
+ data[0], data[1], data[2]);
+ }
}
/* Rings */
@@ -1140,8 +1155,10 @@ void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
}
}
#endif
- bnx2x_fw_dump(bp);
- bnx2x_mc_assert(bp);
+ if (IS_PF(bp)) {
+ bnx2x_fw_dump(bp);
+ bnx2x_mc_assert(bp);
+ }
BNX2X_ERR("end crash dump -----------------\n");
}
@@ -1814,6 +1831,11 @@ void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
drv_cmd = BNX2X_Q_CMD_EMPTY;
break;
+ case (RAMROD_CMD_ID_ETH_TPA_UPDATE):
+ DP(BNX2X_MSG_SP, "got tpa update ramrod CID=%d\n", cid);
+ drv_cmd = BNX2X_Q_CMD_UPDATE_TPA;
+ break;
+
default:
BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n",
command, fp->index);
@@ -3644,10 +3666,18 @@ int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
HW_CID(bp, cid));
- type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
-
- type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
- SPE_HDR_FUNCTION_ID);
+ /* In some cases, type may already contain the func-id
+ * mainly in SRIOV related use cases, so we add it here only
+ * if it's not already set.
+ */
+ if (!(cmd_type & SPE_HDR_FUNCTION_ID)) {
+ type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) &
+ SPE_HDR_CONN_TYPE;
+ type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
+ SPE_HDR_FUNCTION_ID);
+ } else {
+ type = cmd_type;
+ }
spe->hdr.type = cpu_to_le16(type);
@@ -3878,10 +3908,7 @@ static void bnx2x_fan_failure(struct bnx2x *bp)
* This is due to some boards consuming sufficient power when driver is
* up to overheat if fan fails.
*/
- smp_mb__before_clear_bit();
- set_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state);
- smp_mb__after_clear_bit();
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_FAN_FAILURE, 0);
}
static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
@@ -5221,9 +5248,9 @@ static void bnx2x_eq_int(struct bnx2x *bp)
continue;
case EVENT_RING_OPCODE_STAT_QUERY:
- DP(BNX2X_MSG_SP | BNX2X_MSG_STATS,
- "got statistics comp event %d\n",
- bp->stats_comp++);
+ DP_AND((BNX2X_MSG_SP | BNX2X_MSG_STATS),
+ "got statistics comp event %d\n",
+ bp->stats_comp++);
/* nothing to do with stats comp */
goto next_spqe;
@@ -5273,6 +5300,8 @@ static void bnx2x_eq_int(struct bnx2x *bp)
break;
} else {
+ int cmd = BNX2X_SP_RTNL_AFEX_F_UPDATE;
+
DP(BNX2X_MSG_SP | BNX2X_MSG_MCP,
"AFEX: ramrod completed FUNCTION_UPDATE\n");
f_obj->complete_cmd(bp, f_obj,
@@ -5282,12 +5311,7 @@ static void bnx2x_eq_int(struct bnx2x *bp)
* sp_rtnl task as all Queue SP operations
* should run under rtnl_lock.
*/
- smp_mb__before_clear_bit();
- set_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE,
- &bp->sp_rtnl_state);
- smp_mb__after_clear_bit();
-
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
+ bnx2x_schedule_sp_rtnl(bp, cmd, 0);
}
goto next_spqe;
@@ -6005,18 +6029,6 @@ static void bnx2x_init_internal_common(struct bnx2x *bp)
{
int i;
- if (IS_MF_SI(bp))
- /*
- * In switch independent mode, the TSTORM needs to accept
- * packets that failed classification, since approximate match
- * mac addresses aren't written to NIG LLH
- */
- REG_WR8(bp, BAR_TSTRORM_INTMEM +
- TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
- else if (!CHIP_IS_E1(bp)) /* 57710 doesn't support MF */
- REG_WR8(bp, BAR_TSTRORM_INTMEM +
- TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 0);
-
/* Zero this manually as its initialization is
currently missing in the initTool */
for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
@@ -7989,19 +8001,25 @@ void bnx2x_free_mem(struct bnx2x *bp)
int bnx2x_alloc_mem_cnic(struct bnx2x *bp)
{
- if (!CHIP_IS_E1x(bp))
+ if (!CHIP_IS_E1x(bp)) {
/* size = the status block + ramrod buffers */
- BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
- sizeof(struct host_hc_status_block_e2));
- else
- BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb,
- &bp->cnic_sb_mapping,
- sizeof(struct
- host_hc_status_block_e1x));
+ bp->cnic_sb.e2_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping,
+ sizeof(struct host_hc_status_block_e2));
+ if (!bp->cnic_sb.e2_sb)
+ goto alloc_mem_err;
+ } else {
+ bp->cnic_sb.e1x_sb = BNX2X_PCI_ALLOC(&bp->cnic_sb_mapping,
+ sizeof(struct host_hc_status_block_e1x));
+ if (!bp->cnic_sb.e1x_sb)
+ goto alloc_mem_err;
+ }
- if (CONFIGURE_NIC_MODE(bp) && !bp->t2)
+ if (CONFIGURE_NIC_MODE(bp) && !bp->t2) {
/* allocate searcher T2 table, as it wasn't allocated before */
- BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
+ bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ);
+ if (!bp->t2)
+ goto alloc_mem_err;
+ }
/* write address to which L5 should insert its values */
bp->cnic_eth_dev.addr_drv_info_to_mcp =
@@ -8022,15 +8040,22 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
{
int i, allocated, context_size;
- if (!CONFIGURE_NIC_MODE(bp) && !bp->t2)
+ if (!CONFIGURE_NIC_MODE(bp) && !bp->t2) {
/* allocate searcher T2 table */
- BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
+ bp->t2 = BNX2X_PCI_ALLOC(&bp->t2_mapping, SRC_T2_SZ);
+ if (!bp->t2)
+ goto alloc_mem_err;
+ }
- BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
- sizeof(struct host_sp_status_block));
+ bp->def_status_blk = BNX2X_PCI_ALLOC(&bp->def_status_blk_mapping,
+ sizeof(struct host_sp_status_block));
+ if (!bp->def_status_blk)
+ goto alloc_mem_err;
- BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
- sizeof(struct bnx2x_slowpath));
+ bp->slowpath = BNX2X_PCI_ALLOC(&bp->slowpath_mapping,
+ sizeof(struct bnx2x_slowpath));
+ if (!bp->slowpath)
+ goto alloc_mem_err;
/* Allocate memory for CDU context:
* This memory is allocated separately and not in the generic ILT
@@ -8050,12 +8075,16 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
for (i = 0, allocated = 0; allocated < context_size; i++) {
bp->context[i].size = min(CDU_ILT_PAGE_SZ,
(context_size - allocated));
- BNX2X_PCI_ALLOC(bp->context[i].vcxt,
- &bp->context[i].cxt_mapping,
- bp->context[i].size);
+ bp->context[i].vcxt = BNX2X_PCI_ALLOC(&bp->context[i].cxt_mapping,
+ bp->context[i].size);
+ if (!bp->context[i].vcxt)
+ goto alloc_mem_err;
allocated += bp->context[i].size;
}
- BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
+ bp->ilt->lines = kcalloc(ILT_MAX_LINES, sizeof(struct ilt_line),
+ GFP_KERNEL);
+ if (!bp->ilt->lines)
+ goto alloc_mem_err;
if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
goto alloc_mem_err;
@@ -8064,11 +8093,15 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
goto alloc_mem_err;
/* Slow path ring */
- BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
+ bp->spq = BNX2X_PCI_ALLOC(&bp->spq_mapping, BCM_PAGE_SIZE);
+ if (!bp->spq)
+ goto alloc_mem_err;
/* EQ */
- BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
- BCM_PAGE_SIZE * NUM_EQ_PAGES);
+ bp->eq_ring = BNX2X_PCI_ALLOC(&bp->eq_mapping,
+ BCM_PAGE_SIZE * NUM_EQ_PAGES);
+ if (!bp->eq_ring)
+ goto alloc_mem_err;
return 0;
@@ -11771,6 +11804,8 @@ static int bnx2x_init_bp(struct bnx2x *bp)
bp->disable_tpa = disable_tpa;
bp->disable_tpa |= IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp);
+ /* Reduce memory usage in kdump environment by disabling TPA */
+ bp->disable_tpa |= reset_devices;
/* Set TPA flags */
if (bp->disable_tpa) {
@@ -11942,7 +11977,7 @@ static int bnx2x_init_mcast_macs_list(struct bnx2x *bp,
{
int mc_count = netdev_mc_count(bp->dev);
struct bnx2x_mcast_list_elem *mc_mac =
- kzalloc(sizeof(*mc_mac) * mc_count, GFP_ATOMIC);
+ kcalloc(mc_count, sizeof(*mc_mac), GFP_ATOMIC);
struct netdev_hw_addr *ha;
if (!mc_mac)
@@ -12064,11 +12099,8 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
return;
} else {
/* Schedule an SP task to handle rest of change */
- DP(NETIF_MSG_IFUP, "Scheduling an Rx mode change\n");
- smp_mb__before_clear_bit();
- set_bit(BNX2X_SP_RTNL_RX_MODE, &bp->sp_rtnl_state);
- smp_mb__after_clear_bit();
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_RX_MODE,
+ NETIF_MSG_IFUP);
}
}
@@ -12101,11 +12133,8 @@ void bnx2x_set_rx_mode_inner(struct bnx2x *bp)
/* configuring mcast to a vf involves sleeping (when we
* wait for the pf's response).
*/
- smp_mb__before_clear_bit();
- set_bit(BNX2X_SP_RTNL_VFPF_MCAST,
- &bp->sp_rtnl_state);
- smp_mb__after_clear_bit();
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
+ bnx2x_schedule_sp_rtnl(bp,
+ BNX2X_SP_RTNL_VFPF_MCAST, 0);
}
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 0fb6ff2..31297266 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -2277,11 +2277,11 @@ static int bnx2x_set_rx_mode_e2(struct bnx2x *bp,
data->header.rule_cnt, p->rx_accept_flags,
p->tx_accept_flags);
- /* No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
*/
/* Send a ramrod */
@@ -2982,11 +2982,11 @@ static int bnx2x_mcast_setup_e2(struct bnx2x *bp,
raw->clear_pending(raw);
return 0;
} else {
- /* No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
*/
/* Send a ramrod */
@@ -3466,11 +3466,11 @@ static int bnx2x_mcast_setup_e1(struct bnx2x *bp,
raw->clear_pending(raw);
return 0;
} else {
- /* No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
*/
/* Send a ramrod */
@@ -4091,11 +4091,11 @@ static int bnx2x_setup_rss(struct bnx2x *bp,
data->capabilities |= ETH_RSS_UPDATE_RAMROD_DATA_UPDATE_RSS_KEY;
}
- /* No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
*/
/* Send a ramrod */
@@ -4158,16 +4158,6 @@ void bnx2x_init_rss_config_obj(struct bnx2x *bp,
rss_obj->config_rss = bnx2x_setup_rss;
}
-int validate_vlan_mac(struct bnx2x *bp,
- struct bnx2x_vlan_mac_obj *vlan_mac)
-{
- if (!vlan_mac->get_n_elements) {
- BNX2X_ERR("vlan mac object was not intialized\n");
- return -EINVAL;
- }
- return 0;
-}
-
/********************** Queue state object ***********************************/
/**
@@ -4587,13 +4577,12 @@ static inline int bnx2x_q_send_setup_e1x(struct bnx2x *bp,
/* Fill the ramrod data */
bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
- /* No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
*/
-
return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
U64_HI(data_mapping),
U64_LO(data_mapping), ETH_CONNECTION_TYPE);
@@ -4615,13 +4604,12 @@ static inline int bnx2x_q_send_setup_e2(struct bnx2x *bp,
bnx2x_q_fill_setup_data_cmn(bp, params, rdata);
bnx2x_q_fill_setup_data_e2(bp, params, rdata);
- /* No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
*/
-
return bnx2x_sp_post(bp, ramrod, o->cids[BNX2X_PRIMARY_CID_INDEX],
U64_HI(data_mapping),
U64_LO(data_mapping), ETH_CONNECTION_TYPE);
@@ -4659,13 +4647,12 @@ static inline int bnx2x_q_send_setup_tx_only(struct bnx2x *bp,
o->cids[cid_index], rdata->general.client_id,
rdata->general.sp_client_id, rdata->general.cos);
- /* No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
*/
-
return bnx2x_sp_post(bp, ramrod, o->cids[cid_index],
U64_HI(data_mapping),
U64_LO(data_mapping), ETH_CONNECTION_TYPE);
@@ -4760,13 +4747,12 @@ static inline int bnx2x_q_send_update(struct bnx2x *bp,
/* Fill the ramrod data */
bnx2x_q_fill_update_data(bp, o, update_params, rdata);
- /* No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
* and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
*/
-
return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_UPDATE,
o->cids[cid_index], U64_HI(data_mapping),
U64_LO(data_mapping), ETH_CONNECTION_TYPE);
@@ -4813,11 +4799,62 @@ static inline int bnx2x_q_send_activate(struct bnx2x *bp,
return bnx2x_q_send_update(bp, params);
}
+static void bnx2x_q_fill_update_tpa_data(struct bnx2x *bp,
+ struct bnx2x_queue_sp_obj *obj,
+ struct bnx2x_queue_update_tpa_params *params,
+ struct tpa_update_ramrod_data *data)
+{
+ data->client_id = obj->cl_id;
+ data->complete_on_both_clients = params->complete_on_both_clients;
+ data->dont_verify_rings_pause_thr_flg =
+ params->dont_verify_thr;
+ data->max_agg_size = cpu_to_le16(params->max_agg_sz);
+ data->max_sges_for_packet = params->max_sges_pkt;
+ data->max_tpa_queues = params->max_tpa_queues;
+ data->sge_buff_size = cpu_to_le16(params->sge_buff_sz);
+ data->sge_page_base_hi = cpu_to_le32(U64_HI(params->sge_map));
+ data->sge_page_base_lo = cpu_to_le32(U64_LO(params->sge_map));
+ data->sge_pause_thr_high = cpu_to_le16(params->sge_pause_thr_high);
+ data->sge_pause_thr_low = cpu_to_le16(params->sge_pause_thr_low);
+ data->tpa_mode = params->tpa_mode;
+ data->update_ipv4 = params->update_ipv4;
+ data->update_ipv6 = params->update_ipv6;
+}
+
static inline int bnx2x_q_send_update_tpa(struct bnx2x *bp,
struct bnx2x_queue_state_params *params)
{
- /* TODO: Not implemented yet. */
- return -1;
+ struct bnx2x_queue_sp_obj *o = params->q_obj;
+ struct tpa_update_ramrod_data *rdata =
+ (struct tpa_update_ramrod_data *)o->rdata;
+ dma_addr_t data_mapping = o->rdata_mapping;
+ struct bnx2x_queue_update_tpa_params *update_tpa_params =
+ &params->params.update_tpa;
+ u16 type;
+
+ /* Clear the ramrod data */
+ memset(rdata, 0, sizeof(*rdata));
+
+ /* Fill the ramrod data */
+ bnx2x_q_fill_update_tpa_data(bp, o, update_tpa_params, rdata);
+
+ /* Add the function id inside the type, so that sp post function
+ * doesn't automatically add the PF func-id, this is required
+ * for operations done by PFs on behalf of their VFs
+ */
+ type = ETH_CONNECTION_TYPE |
+ ((o->func_id) << SPE_HDR_FUNCTION_ID_SHIFT);
+
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
+ */
+ return bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TPA_UPDATE,
+ o->cids[BNX2X_PRIMARY_CID_INDEX],
+ U64_HI(data_mapping),
+ U64_LO(data_mapping), type);
}
static inline int bnx2x_q_send_halt(struct bnx2x *bp,
@@ -5647,6 +5684,12 @@ static inline int bnx2x_func_send_switch_update(struct bnx2x *bp,
rdata->tx_switch_suspend = switch_update_params->suspend;
rdata->echo = SWITCH_UPDATE;
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
+ */
return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_UPDATE, 0,
U64_HI(data_mapping),
U64_LO(data_mapping), NONE_CONNECTION_TYPE);
@@ -5674,11 +5717,11 @@ static inline int bnx2x_func_send_afex_update(struct bnx2x *bp,
rdata->allowed_priorities = afex_update_params->allowed_priorities;
rdata->echo = AFEX_UPDATE;
- /* No need for an explicit memory barrier here as long we would
- * need to ensure the ordering of writing to the SPQ element
- * and updating of the SPQ producer which involves a memory
- * read and we will have to put a full memory barrier there
- * (inside bnx2x_sp_post()).
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
*/
DP(BNX2X_MSG_SP,
"afex: sending func_update vif_id 0x%x dvlan 0x%x prio 0x%x\n",
@@ -5763,6 +5806,12 @@ static inline int bnx2x_func_send_tx_start(struct bnx2x *bp,
rdata->traffic_type_to_priority_cos[i] =
tx_start_params->traffic_type_to_priority_cos[i];
+ /* No need for an explicit memory barrier here as long as we
+ * ensure the ordering of writing to the SPQ element
+ * and updating of the SPQ producer which involves a memory
+ * read. If the memory read is removed we will have to put a
+ * full memory barrier there (inside bnx2x_sp_post()).
+ */
return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC, 0,
U64_HI(data_mapping),
U64_LO(data_mapping), NONE_CONNECTION_TYPE);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 00d7f21..80f6c79 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -893,6 +893,24 @@ struct bnx2x_queue_update_params {
u8 cid_index;
};
+struct bnx2x_queue_update_tpa_params {
+ dma_addr_t sge_map;
+ u8 update_ipv4;
+ u8 update_ipv6;
+ u8 max_tpa_queues;
+ u8 max_sges_pkt;
+ u8 complete_on_both_clients;
+ u8 dont_verify_thr;
+ u8 tpa_mode;
+ u8 _pad;
+
+ u16 sge_buff_sz;
+ u16 max_agg_sz;
+
+ u16 sge_pause_thr_low;
+ u16 sge_pause_thr_high;
+};
+
struct rxq_pause_params {
u16 bd_th_lo;
u16 bd_th_hi;
@@ -987,6 +1005,7 @@ struct bnx2x_queue_state_params {
/* Params according to the current command */
union {
struct bnx2x_queue_update_params update;
+ struct bnx2x_queue_update_tpa_params update_tpa;
struct bnx2x_queue_setup_params setup;
struct bnx2x_queue_init_params init;
struct bnx2x_queue_setup_tx_only_params tx_only;
@@ -1403,6 +1422,4 @@ int bnx2x_config_rss(struct bnx2x *bp,
void bnx2x_get_rss_ind_table(struct bnx2x_rss_config_obj *rss_obj,
u8 *ind_table);
-int validate_vlan_mac(struct bnx2x *bp,
- struct bnx2x_vlan_mac_obj *vlan_mac);
#endif /* BNX2X_SP_VERBS */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index e42f48d..61e6f60 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -102,6 +102,21 @@ static void bnx2x_vf_igu_ack_sb(struct bnx2x *bp, struct bnx2x_virtf *vf,
mmiowb();
barrier();
}
+
+static bool bnx2x_validate_vf_sp_objs(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ bool print_err)
+{
+ if (!bnx2x_leading_vfq(vf, sp_initialized)) {
+ if (print_err)
+ BNX2X_ERR("Slowpath objects not yet initialized!\n");
+ else
+ DP(BNX2X_MSG_IOV, "Slowpath objects not yet initialized!\n");
+ return false;
+ }
+ return true;
+}
+
/* VFOP - VF slow-path operation support */
#define BNX2X_VFOP_FILTER_ADD_CNT_MAX 0x10000
@@ -176,6 +191,11 @@ enum bnx2x_vfop_rss_state {
BNX2X_VFOP_RSS_DONE
};
+enum bnx2x_vfop_tpa_state {
+ BNX2X_VFOP_TPA_CONFIG,
+ BNX2X_VFOP_TPA_DONE
+};
+
#define bnx2x_vfop_reset_wq(vf) atomic_set(&vf->op_in_progress, 0)
void bnx2x_vfop_qctor_dump_tx(struct bnx2x *bp, struct bnx2x_virtf *vf,
@@ -716,7 +736,6 @@ static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp,
int qid, bool drv_only)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
- int rc;
if (vfop) {
struct bnx2x_vfop_args_filters filters = {
@@ -736,9 +755,6 @@ static int bnx2x_vfop_mac_delall_cmd(struct bnx2x *bp,
bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
/* set object */
- rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj));
- if (rc)
- return rc;
ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
/* set extra args */
@@ -758,9 +774,12 @@ int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
struct bnx2x_vfop_filters *macs,
int qid, bool drv_only)
{
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
- int rc;
+ struct bnx2x_vfop *vfop;
+
+ if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
+ return -EINVAL;
+ vfop = bnx2x_vfop_add(bp, vf);
if (vfop) {
struct bnx2x_vfop_args_filters filters = {
.multi_filter = macs,
@@ -782,9 +801,6 @@ int bnx2x_vfop_mac_list_cmd(struct bnx2x *bp,
bnx2x_vfop_mac_prep_ramrod(ramrod, &flags);
/* set object */
- rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj));
- if (rc)
- return rc;
ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, mac_obj);
/* set extra args */
@@ -804,9 +820,12 @@ static int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
struct bnx2x_vfop_cmd *cmd,
int qid, u16 vid, bool add)
{
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
- int rc;
+ struct bnx2x_vfop *vfop;
+ if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
+ return -EINVAL;
+
+ vfop = bnx2x_vfop_add(bp, vf);
if (vfop) {
struct bnx2x_vfop_args_filters filters = {
.multi_filter = NULL, /* single command */
@@ -826,9 +845,6 @@ static int bnx2x_vfop_vlan_set_cmd(struct bnx2x *bp,
ramrod->user_req.u.vlan.vlan = vid;
/* set object */
- rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
- if (rc)
- return rc;
ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
/* set extra args */
@@ -848,7 +864,6 @@ static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp,
int qid, bool drv_only)
{
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
- int rc;
if (vfop) {
struct bnx2x_vfop_args_filters filters = {
@@ -868,9 +883,6 @@ static int bnx2x_vfop_vlan_delall_cmd(struct bnx2x *bp,
bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
/* set object */
- rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
- if (rc)
- return rc;
ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
/* set extra args */
@@ -890,9 +902,12 @@ int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
struct bnx2x_vfop_filters *vlans,
int qid, bool drv_only)
{
- struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
- int rc;
+ struct bnx2x_vfop *vfop;
+ if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
+ return -EINVAL;
+
+ vfop = bnx2x_vfop_add(bp, vf);
if (vfop) {
struct bnx2x_vfop_args_filters filters = {
.multi_filter = vlans,
@@ -911,9 +926,6 @@ int bnx2x_vfop_vlan_list_cmd(struct bnx2x *bp,
bnx2x_vfop_vlan_mac_prep_ramrod(ramrod, &flags);
/* set object */
- rc = validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj));
- if (rc)
- return rc;
ramrod->vlan_mac_obj = &bnx2x_vfq(vf, qid, vlan_obj);
/* set extra args */
@@ -971,11 +983,8 @@ op_err:
op_done:
case BNX2X_VFOP_QSETUP_DONE:
vf->cfg_flags |= VF_CFG_VLAN;
- smp_mb__before_clear_bit();
- set_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
- &bp->sp_rtnl_state);
- smp_mb__after_clear_bit();
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_HYPERVISOR_VLAN,
+ BNX2X_MSG_IOV);
bnx2x_vfop_end(bp, vf, vfop);
return;
default:
@@ -1025,34 +1034,20 @@ static void bnx2x_vfop_qflr(struct bnx2x *bp, struct bnx2x_virtf *vf)
/* vlan-clear-all: driver-only, don't consume credit */
vfop->state = BNX2X_VFOP_QFLR_CLR_MAC;
- if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, vlan_obj))) {
- /* the vlan_mac vfop will re-schedule us */
- vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd,
- qid, true);
- if (vfop->rc)
- goto op_err;
- return;
-
- } else {
- /* need to reschedule ourselves */
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
- }
+ /* the vlan_mac vfop will re-schedule us */
+ vfop->rc = bnx2x_vfop_vlan_delall_cmd(bp, vf, &cmd, qid, true);
+ if (vfop->rc)
+ goto op_err;
+ return;
case BNX2X_VFOP_QFLR_CLR_MAC:
/* mac-clear-all: driver only consume credit */
vfop->state = BNX2X_VFOP_QFLR_TERMINATE;
- if (!validate_vlan_mac(bp, &bnx2x_vfq(vf, qid, mac_obj))) {
- /* the vlan_mac vfop will re-schedule us */
- vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd,
- qid, true);
- if (vfop->rc)
- goto op_err;
- return;
-
- } else {
- /* need to reschedule ourselves */
- bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
- }
+ /* the vlan_mac vfop will re-schedule us */
+ vfop->rc = bnx2x_vfop_mac_delall_cmd(bp, vf, &cmd, qid, true);
+ if (vfop->rc)
+ goto op_err;
+ return;
case BNX2X_VFOP_QFLR_TERMINATE:
qstate = &vfop->op_p->qctor.qstate;
@@ -1095,8 +1090,13 @@ static int bnx2x_vfop_qflr_cmd(struct bnx2x *bp,
if (vfop) {
vfop->args.qx.qid = qid;
- bnx2x_vfop_opset(BNX2X_VFOP_QFLR_CLR_VLAN,
- bnx2x_vfop_qflr, cmd->done);
+ if ((qid == LEADING_IDX) &&
+ bnx2x_validate_vf_sp_objs(bp, vf, false))
+ bnx2x_vfop_opset(BNX2X_VFOP_QFLR_CLR_VLAN,
+ bnx2x_vfop_qflr, cmd->done);
+ else
+ bnx2x_vfop_opset(BNX2X_VFOP_QFLR_TERMINATE,
+ bnx2x_vfop_qflr, cmd->done);
return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_qflr,
cmd->block);
}
@@ -1310,7 +1310,10 @@ static void bnx2x_vfop_qdown(struct bnx2x *bp, struct bnx2x_virtf *vf)
switch (state) {
case BNX2X_VFOP_QTEARDOWN_RXMODE:
/* Drop all */
- vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_VLAN;
+ if (bnx2x_validate_vf_sp_objs(bp, vf, true))
+ vfop->state = BNX2X_VFOP_QTEARDOWN_CLR_VLAN;
+ else
+ vfop->state = BNX2X_VFOP_QTEARDOWN_QDTOR;
vfop->rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd, qid, 0);
if (vfop->rc)
goto op_err;
@@ -2117,7 +2120,9 @@ int bnx2x_iov_alloc_mem(struct bnx2x *bp)
cxt->size = min_t(size_t, tot_size, CDU_ILT_PAGE_SZ);
if (cxt->size) {
- BNX2X_PCI_ALLOC(cxt->addr, &cxt->mapping, cxt->size);
+ cxt->addr = BNX2X_PCI_ALLOC(&cxt->mapping, cxt->size);
+ if (!cxt->addr)
+ goto alloc_mem_err;
} else {
cxt->addr = NULL;
cxt->mapping = 0;
@@ -2127,20 +2132,28 @@ int bnx2x_iov_alloc_mem(struct bnx2x *bp)
/* allocate vfs ramrods dma memory - client_init and set_mac */
tot_size = BNX2X_NR_VIRTFN(bp) * sizeof(struct bnx2x_vf_sp);
- BNX2X_PCI_ALLOC(BP_VFDB(bp)->sp_dma.addr, &BP_VFDB(bp)->sp_dma.mapping,
- tot_size);
+ BP_VFDB(bp)->sp_dma.addr = BNX2X_PCI_ALLOC(&BP_VFDB(bp)->sp_dma.mapping,
+ tot_size);
+ if (!BP_VFDB(bp)->sp_dma.addr)
+ goto alloc_mem_err;
BP_VFDB(bp)->sp_dma.size = tot_size;
/* allocate mailboxes */
tot_size = BNX2X_NR_VIRTFN(bp) * MBX_MSG_ALIGNED_SIZE;
- BNX2X_PCI_ALLOC(BP_VF_MBX_DMA(bp)->addr, &BP_VF_MBX_DMA(bp)->mapping,
- tot_size);
+ BP_VF_MBX_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_MBX_DMA(bp)->mapping,
+ tot_size);
+ if (!BP_VF_MBX_DMA(bp)->addr)
+ goto alloc_mem_err;
+
BP_VF_MBX_DMA(bp)->size = tot_size;
/* allocate local bulletin boards */
tot_size = BNX2X_NR_VIRTFN(bp) * BULLETIN_CONTENT_SIZE;
- BNX2X_PCI_ALLOC(BP_VF_BULLETIN_DMA(bp)->addr,
- &BP_VF_BULLETIN_DMA(bp)->mapping, tot_size);
+ BP_VF_BULLETIN_DMA(bp)->addr = BNX2X_PCI_ALLOC(&BP_VF_BULLETIN_DMA(bp)->mapping,
+ tot_size);
+ if (!BP_VF_BULLETIN_DMA(bp)->addr)
+ goto alloc_mem_err;
+
BP_VF_BULLETIN_DMA(bp)->size = tot_size;
return 0;
@@ -2166,6 +2179,9 @@ static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
bnx2x_vf_sp_map(bp, vf, q_data),
q_type);
+ /* sp indication is set only when vlan/mac/etc. are initialized */
+ q->sp_initialized = false;
+
DP(BNX2X_MSG_IOV,
"initialized vf %d's queue object. func id set to %d. cid set to 0x%x\n",
vf->abs_vfid, q->sp_obj.func_id, q->cid);
@@ -2527,10 +2543,10 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX -
(is_fcoe ? 0 : 1);
- DP(BNX2X_MSG_IOV,
- "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n",
- BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index,
- first_queue_query_index + num_queues_req);
+ DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
+ "BNX2X_NUM_ETH_QUEUES %d, is_fcoe %d, first_queue_query_index %d => determined the last non virtual statistics query index is %d. Will add queries on top of that\n",
+ BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index,
+ first_queue_query_index + num_queues_req);
cur_data_offset = bp->fw_stats_data_mapping +
offsetof(struct bnx2x_fw_stats_data, queue_stats) +
@@ -2544,9 +2560,9 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
struct bnx2x_virtf *vf = BP_VF(bp, i);
if (vf->state != VF_ENABLED) {
- DP(BNX2X_MSG_IOV,
- "vf %d not enabled so no stats for it\n",
- vf->abs_vfid);
+ DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
+ "vf %d not enabled so no stats for it\n",
+ vf->abs_vfid);
continue;
}
@@ -2597,7 +2613,8 @@ void bnx2x_iov_sp_task(struct bnx2x *bp)
/* Iterate over all VFs and invoke state transition for VFs with
* 'in-progress' slow-path operations
*/
- DP(BNX2X_MSG_IOV, "searching for pending vf operations\n");
+ DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_SP),
+ "searching for pending vf operations\n");
for_each_vf(bp, i) {
struct bnx2x_virtf *vf = BP_VF(bp, i);
@@ -3046,6 +3063,83 @@ int bnx2x_vfop_rss_cmd(struct bnx2x *bp,
return -ENOMEM;
}
+/* VFOP tpa update, send update on all queues */
+static void bnx2x_vfop_tpa(struct bnx2x *bp, struct bnx2x_virtf *vf)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_cur(bp, vf);
+ struct bnx2x_vfop_args_tpa *tpa_args = &vfop->args.tpa;
+ enum bnx2x_vfop_tpa_state state = vfop->state;
+
+ bnx2x_vfop_reset_wq(vf);
+
+ if (vfop->rc < 0)
+ goto op_err;
+
+ DP(BNX2X_MSG_IOV, "vf[%d:%d] STATE: %d\n",
+ vf->abs_vfid, tpa_args->qid,
+ state);
+
+ switch (state) {
+ case BNX2X_VFOP_TPA_CONFIG:
+
+ if (tpa_args->qid < vf_rxq_count(vf)) {
+ struct bnx2x_queue_state_params *qstate =
+ &vf->op_params.qstate;
+
+ qstate->q_obj = &bnx2x_vfq(vf, tpa_args->qid, sp_obj);
+
+ /* The only thing that changes for the ramrod params
+ * between calls is the sge_map
+ */
+ qstate->params.update_tpa.sge_map =
+ tpa_args->sge_map[tpa_args->qid];
+
+ DP(BNX2X_MSG_IOV, "sge_addr[%d] %08x:%08x\n",
+ tpa_args->qid,
+ U64_HI(qstate->params.update_tpa.sge_map),
+ U64_LO(qstate->params.update_tpa.sge_map));
+ qstate->cmd = BNX2X_Q_CMD_UPDATE_TPA;
+ vfop->rc = bnx2x_queue_state_change(bp, qstate);
+
+ tpa_args->qid++;
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_CONT);
+ }
+ vfop->state = BNX2X_VFOP_TPA_DONE;
+ vfop->rc = 0;
+ bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
+op_err:
+ BNX2X_ERR("TPA update error: rc %d\n", vfop->rc);
+op_done:
+ case BNX2X_VFOP_TPA_DONE:
+ bnx2x_vfop_end(bp, vf, vfop);
+ return;
+ default:
+ bnx2x_vfop_default(state);
+ }
+op_pending:
+ return;
+}
+
+int bnx2x_vfop_tpa_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ struct vfpf_tpa_tlv *tpa_tlv)
+{
+ struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
+
+ if (vfop) {
+ vfop->args.qx.qid = 0; /* loop */
+ memcpy(&vfop->args.tpa.sge_map,
+ tpa_tlv->tpa_client_info.sge_addr,
+ sizeof(vfop->args.tpa.sge_map));
+ bnx2x_vfop_opset(BNX2X_VFOP_TPA_CONFIG,
+ bnx2x_vfop_tpa, cmd->done);
+ return bnx2x_vfop_transition(bp, vf, bnx2x_vfop_tpa,
+ cmd->block);
+ }
+ return -ENOMEM;
+}
+
/* VF release ~ VF close + VF release-resources
* Release is the ultimate SW shutdown and is called whenever an
* irrecoverable error is encountered.
@@ -3074,16 +3168,6 @@ static inline void bnx2x_vf_get_sbdf(struct bnx2x *bp,
*sbdf = vf->devfn | (vf->bus << 8);
}
-static inline void bnx2x_vf_get_bars(struct bnx2x *bp, struct bnx2x_virtf *vf,
- struct bnx2x_vf_bar_info *bar_info)
-{
- int n;
-
- bar_info->nr_bars = bp->vfdb->sriov.nres;
- for (n = 0; n < bar_info->nr_bars; n++)
- bar_info->bars[n] = vf->bars[n];
-}
-
void bnx2x_lock_vf_pf_channel(struct bnx2x *bp, struct bnx2x_virtf *vf,
enum channel_tlvs tlv)
{
@@ -3405,13 +3489,13 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
ivi->spoofchk = 1; /*always enabled */
if (vf->state == VF_ENABLED) {
/* mac and vlan are in vlan_mac objects */
- if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj)))
+ if (bnx2x_validate_vf_sp_objs(bp, vf, false)) {
mac_obj->get_n_elements(bp, mac_obj, 1, (u8 *)&ivi->mac,
0, ETH_ALEN);
- if (validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, vlan_obj)))
vlan_obj->get_n_elements(bp, vlan_obj, 1,
(u8 *)&ivi->vlan, 0,
VLAN_HLEN);
+ }
} else {
/* mac */
if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID))
@@ -3485,17 +3569,17 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
/* configure the mac in device on this vf's queue */
unsigned long ramrod_flags = 0;
- struct bnx2x_vlan_mac_obj *mac_obj =
- &bnx2x_leading_vfq(vf, mac_obj);
+ struct bnx2x_vlan_mac_obj *mac_obj;
- rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj));
- if (rc)
- return rc;
+ /* User should be able to see failure reason in system logs */
+ if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
+ return -EINVAL;
/* must lock vfpf channel to protect against vf flows */
bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_MAC);
/* remove existing eth macs */
+ mac_obj = &bnx2x_leading_vfq(vf, mac_obj);
rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_ETH_MAC, true);
if (rc) {
BNX2X_ERR("failed to delete eth macs\n");
@@ -3569,17 +3653,16 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
BNX2X_Q_LOGICAL_STATE_ACTIVE)
return rc;
- /* configure the vlan in device on this vf's queue */
- vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
- rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj));
- if (rc)
- return rc;
+ /* User should be able to see error in system logs */
+ if (!bnx2x_validate_vf_sp_objs(bp, vf, true))
+ return -EINVAL;
/* must lock vfpf channel to protect against vf flows */
bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
/* remove existing vlans */
__set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
&ramrod_flags);
if (rc) {
@@ -3736,13 +3819,9 @@ void bnx2x_timer_sriov(struct bnx2x *bp)
bnx2x_sample_bulletin(bp);
/* if channel is down we need to self destruct */
- if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN) {
- smp_mb__before_clear_bit();
- set_bit(BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
- &bp->sp_rtnl_state);
- smp_mb__after_clear_bit();
- schedule_delayed_work(&bp->sp_rtnl_task, 0);
- }
+ if (bp->old_bulletin.valid_bitmap & 1 << CHANNEL_DOWN)
+ bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_VFPF_CHANNEL_DOWN,
+ BNX2X_MSG_IOV);
}
void __iomem *bnx2x_vf_doorbells(struct bnx2x *bp)
@@ -3756,12 +3835,16 @@ int bnx2x_vf_pci_alloc(struct bnx2x *bp)
mutex_init(&bp->vf2pf_mutex);
/* allocate vf2pf mailbox for vf to pf channel */
- BNX2X_PCI_ALLOC(bp->vf2pf_mbox, &bp->vf2pf_mbox_mapping,
- sizeof(struct bnx2x_vf_mbx_msg));
+ bp->vf2pf_mbox = BNX2X_PCI_ALLOC(&bp->vf2pf_mbox_mapping,
+ sizeof(struct bnx2x_vf_mbx_msg));
+ if (!bp->vf2pf_mbox)
+ goto alloc_mem_err;
/* allocate pf 2 vf bulletin board */
- BNX2X_PCI_ALLOC(bp->pf2vf_bulletin, &bp->pf2vf_bulletin_mapping,
- sizeof(union pf_vf_bulletin));
+ bp->pf2vf_bulletin = BNX2X_PCI_ALLOC(&bp->pf2vf_bulletin_mapping,
+ sizeof(union pf_vf_bulletin));
+ if (!bp->pf2vf_bulletin)
+ goto alloc_mem_err;
return 0;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index d9fcca1..b1dc751 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -83,6 +83,7 @@ struct bnx2x_vf_queue {
u16 index;
u16 sb_idx;
bool is_leading;
+ bool sp_initialized;
};
/* struct bnx2x_vfop_qctor_params - prepare queue construction parameters:
@@ -100,6 +101,7 @@ union bnx2x_vfop_params {
struct bnx2x_mcast_ramrod_params mcast;
struct bnx2x_config_rss_params rss;
struct bnx2x_vfop_qctor_params qctor;
+ struct bnx2x_queue_state_params qstate;
};
/* forward */
@@ -166,6 +168,11 @@ struct bnx2x_vfop_args_filters {
atomic_t *credit; /* non NULL means 'don't consume credit' */
};
+struct bnx2x_vfop_args_tpa {
+ int qid;
+ dma_addr_t sge_map[PFVF_MAX_QUEUES_PER_VF];
+};
+
union bnx2x_vfop_args {
struct bnx2x_vfop_args_mcast mc_list;
struct bnx2x_vfop_args_qctor qctor;
@@ -173,6 +180,7 @@ union bnx2x_vfop_args {
struct bnx2x_vfop_args_defvlan defvlan;
struct bnx2x_vfop_args_qx qx;
struct bnx2x_vfop_args_filters filters;
+ struct bnx2x_vfop_args_tpa tpa;
};
struct bnx2x_vfop {
@@ -704,6 +712,11 @@ int bnx2x_vfop_rss_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd);
+int bnx2x_vfop_tpa_cmd(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vfop_cmd *cmd,
+ struct vfpf_tpa_tlv *tpa_tlv);
+
/* VF release ~ VF close + VF release-resources
*
* Release is the ultimate SW shutdown and is called whenever an
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 3fa6c2a..1117ed7 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -548,6 +548,7 @@ static void bnx2x_leading_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
vf->leading_rss = cl_id;
q->is_leading = true;
+ q->sp_initialized = true;
}
/* ask the pf to open a queue for the vf */
@@ -1159,7 +1160,8 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
resp->pfdev_info.db_size = bp->db_size;
resp->pfdev_info.indices_per_sb = HC_SB_MAX_INDICES_E2;
resp->pfdev_info.pf_cap = (PFVF_CAP_RSS |
- /* PFVF_CAP_DHC |*/ PFVF_CAP_TPA);
+ PFVF_CAP_TPA |
+ PFVF_CAP_TPA_UPDATE);
bnx2x_fill_fw_str(bp, resp->pfdev_info.fw_ver,
sizeof(resp->pfdev_info.fw_ver));
@@ -1694,16 +1696,12 @@ static int bnx2x_vfop_mbx_qfilters_cmd(struct bnx2x *bp,
return -ENOMEM;
}
-static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
- struct bnx2x_virtf *vf,
- struct bnx2x_vf_mbx *mbx)
+static int bnx2x_filters_validate_mac(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct vfpf_set_q_filters_tlv *filters)
{
- struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters;
struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index);
- struct bnx2x_vfop_cmd cmd = {
- .done = bnx2x_vf_mbx_resp,
- .block = false,
- };
+ int rc = 0;
/* if a mac was already set for this VF via the set vf mac ndo, we only
* accept mac configurations of that mac. Why accept them at all?
@@ -1716,6 +1714,7 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
BNX2X_ERR("VF[%d] requested the addition of multiple macs after set_vf_mac ndo was called\n",
vf->abs_vfid);
vf->op_rc = -EPERM;
+ rc = -EPERM;
goto response;
}
@@ -1726,9 +1725,22 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
vf->abs_vfid);
vf->op_rc = -EPERM;
+ rc = -EPERM;
goto response;
}
}
+
+response:
+ return rc;
+}
+
+static int bnx2x_filters_validate_vlan(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct vfpf_set_q_filters_tlv *filters)
+{
+ struct pf_vf_bulletin_content *bulletin = BP_VF_BULLETIN(bp, vf->index);
+ int rc = 0;
+
/* if vlan was set by hypervisor we don't allow guest to config vlan */
if (bulletin->valid_bitmap & 1 << VLAN_VALID) {
int i;
@@ -1740,13 +1752,36 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n",
vf->abs_vfid);
vf->op_rc = -EPERM;
+ rc = -EPERM;
goto response;
}
}
}
/* verify vf_qid */
- if (filters->vf_qid > vf_rxq_count(vf))
+ if (filters->vf_qid > vf_rxq_count(vf)) {
+ rc = -EPERM;
+ goto response;
+ }
+
+response:
+ return rc;
+}
+
+static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
+ struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters;
+ struct bnx2x_vfop_cmd cmd = {
+ .done = bnx2x_vf_mbx_resp,
+ .block = false,
+ };
+
+ if (bnx2x_filters_validate_mac(bp, vf, filters))
+ goto response;
+
+ if (bnx2x_filters_validate_vlan(bp, vf, filters))
goto response;
DP(BNX2X_MSG_IOV, "VF[%d] Q_FILTERS: queue[%d]\n",
@@ -1877,6 +1912,75 @@ mbx_resp:
bnx2x_vf_mbx_resp(bp, vf);
}
+static int bnx2x_validate_tpa_params(struct bnx2x *bp,
+ struct vfpf_tpa_tlv *tpa_tlv)
+{
+ int rc = 0;
+
+ if (tpa_tlv->tpa_client_info.max_sges_for_packet >
+ U_ETH_MAX_SGES_FOR_PACKET) {
+ rc = -EINVAL;
+ BNX2X_ERR("TPA update: max_sges received %d, max is %d\n",
+ tpa_tlv->tpa_client_info.max_sges_for_packet,
+ U_ETH_MAX_SGES_FOR_PACKET);
+ }
+
+ if (tpa_tlv->tpa_client_info.max_tpa_queues > MAX_AGG_QS(bp)) {
+ rc = -EINVAL;
+ BNX2X_ERR("TPA update: max_tpa_queues received %d, max is %d\n",
+ tpa_tlv->tpa_client_info.max_tpa_queues,
+ MAX_AGG_QS(bp));
+ }
+
+ return rc;
+}
+
+static void bnx2x_vf_mbx_update_tpa(struct bnx2x *bp, struct bnx2x_virtf *vf,
+ struct bnx2x_vf_mbx *mbx)
+{
+ struct bnx2x_vfop_cmd cmd = {
+ .done = bnx2x_vf_mbx_resp,
+ .block = false,
+ };
+ struct bnx2x_queue_update_tpa_params *vf_op_params =
+ &vf->op_params.qstate.params.update_tpa;
+ struct vfpf_tpa_tlv *tpa_tlv = &mbx->msg->req.update_tpa;
+
+ memset(vf_op_params, 0, sizeof(*vf_op_params));
+
+ if (bnx2x_validate_tpa_params(bp, tpa_tlv))
+ goto mbx_resp;
+
+ vf_op_params->complete_on_both_clients =
+ tpa_tlv->tpa_client_info.complete_on_both_clients;
+ vf_op_params->dont_verify_thr =
+ tpa_tlv->tpa_client_info.dont_verify_thr;
+ vf_op_params->max_agg_sz =
+ tpa_tlv->tpa_client_info.max_agg_size;
+ vf_op_params->max_sges_pkt =
+ tpa_tlv->tpa_client_info.max_sges_for_packet;
+ vf_op_params->max_tpa_queues =
+ tpa_tlv->tpa_client_info.max_tpa_queues;
+ vf_op_params->sge_buff_sz =
+ tpa_tlv->tpa_client_info.sge_buff_size;
+ vf_op_params->sge_pause_thr_high =
+ tpa_tlv->tpa_client_info.sge_pause_thr_high;
+ vf_op_params->sge_pause_thr_low =
+ tpa_tlv->tpa_client_info.sge_pause_thr_low;
+ vf_op_params->tpa_mode =
+ tpa_tlv->tpa_client_info.tpa_mode;
+ vf_op_params->update_ipv4 =
+ tpa_tlv->tpa_client_info.update_ipv4;
+ vf_op_params->update_ipv6 =
+ tpa_tlv->tpa_client_info.update_ipv6;
+
+ vf->op_rc = bnx2x_vfop_tpa_cmd(bp, vf, &cmd, tpa_tlv);
+
+mbx_resp:
+ if (vf->op_rc)
+ bnx2x_vf_mbx_resp(bp, vf);
+}
+
/* dispatch request */
static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
struct bnx2x_vf_mbx *mbx)
@@ -1916,6 +2020,9 @@ static void bnx2x_vf_mbx_request(struct bnx2x *bp, struct bnx2x_virtf *vf,
case CHANNEL_TLV_UPDATE_RSS:
bnx2x_vf_mbx_update_rss(bp, vf, mbx);
return;
+ case CHANNEL_TLV_UPDATE_TPA:
+ bnx2x_vf_mbx_update_tpa(bp, vf, mbx);
+ return;
}
} else {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
index 208568b..c922b81 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
@@ -162,6 +162,7 @@ struct pfvf_acquire_resp_tlv {
#define PFVF_CAP_RSS 0x00000001
#define PFVF_CAP_DHC 0x00000002
#define PFVF_CAP_TPA 0x00000004
+#define PFVF_CAP_TPA_UPDATE 0x00000008
char fw_ver[32];
u16 db_size;
u8 indices_per_sb;
@@ -303,6 +304,25 @@ struct vfpf_set_q_filters_tlv {
u32 rx_mask; /* see mask constants at the top of the file */
};
+struct vfpf_tpa_tlv {
+ struct vfpf_first_tlv first_tlv;
+
+ struct vf_pf_tpa_client_info {
+ aligned_u64 sge_addr[PFVF_MAX_QUEUES_PER_VF];
+ u8 update_ipv4;
+ u8 update_ipv6;
+ u8 max_tpa_queues;
+ u8 max_sges_for_packet;
+ u8 complete_on_both_clients;
+ u8 dont_verify_thr;
+ u8 tpa_mode;
+ u16 sge_buff_size;
+ u16 max_agg_size;
+ u16 sge_pause_thr_low;
+ u16 sge_pause_thr_high;
+ } tpa_client_info;
+};
+
/* close VF (disable VF) */
struct vfpf_close_tlv {
struct vfpf_first_tlv first_tlv;
@@ -331,6 +351,7 @@ union vfpf_tlvs {
struct vfpf_set_q_filters_tlv set_q_filters;
struct vfpf_release_tlv release;
struct vfpf_rss_tlv update_rss;
+ struct vfpf_tpa_tlv update_tpa;
struct channel_list_end_tlv list_end;
struct tlv_buffer_size tlv_buf_size;
};
@@ -405,6 +426,7 @@ enum channel_tlvs {
CHANNEL_TLV_PF_SET_VLAN,
CHANNEL_TLV_UPDATE_RSS,
CHANNEL_TLV_PHYS_PORT_ID,
+ CHANNEL_TLV_UPDATE_TPA,
CHANNEL_TLV_MAX
};
diff --git a/drivers/net/ethernet/broadcom/genet/Makefile b/drivers/net/ethernet/broadcom/genet/Makefile
new file mode 100644
index 0000000..31f55a9
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/genet/Makefile
@@ -0,0 +1,2 @@
+obj-$(CONFIG_BCMGENET) += genet.o
+genet-objs := bcmgenet.o bcmmii.o
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
new file mode 100644
index 0000000..72ce6e8
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -0,0 +1,2578 @@
+/*
+ * Broadcom GENET (Gigabit Ethernet) controller driver
+ *
+ * Copyright (c) 2014 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#define pr_fmt(fmt) "bcmgenet: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+#include <linux/if_ether.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/pm.h>
+#include <linux/clk.h>
+#include <linux/version.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_net.h>
+#include <linux/of_platform.h>
+#include <net/arp.h>
+
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/phy.h>
+
+#include <asm/unaligned.h>
+
+#include "bcmgenet.h"
+
+/* Maximum number of hardware queues, downsized if needed */
+#define GENET_MAX_MQ_CNT 4
+
+/* Default highest priority queue for multi queue support */
+#define GENET_Q0_PRIORITY 0
+
+#define GENET_DEFAULT_BD_CNT \
+ (TOTAL_DESC - priv->hw_params->tx_queues * priv->hw_params->bds_cnt)
+
+#define RX_BUF_LENGTH 2048
+#define SKB_ALIGNMENT 32
+
+/* Tx/Rx DMA register offset, skip 256 descriptors */
+#define WORDS_PER_BD(p) (p->hw_params->words_per_bd)
+#define DMA_DESC_SIZE (WORDS_PER_BD(priv) * sizeof(u32))
+
+#define GENET_TDMA_REG_OFF (priv->hw_params->tdma_offset + \
+ TOTAL_DESC * DMA_DESC_SIZE)
+
+#define GENET_RDMA_REG_OFF (priv->hw_params->rdma_offset + \
+ TOTAL_DESC * DMA_DESC_SIZE)
+
+static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv,
+ void __iomem *d, u32 value)
+{
+ __raw_writel(value, d + DMA_DESC_LENGTH_STATUS);
+}
+
+static inline u32 dmadesc_get_length_status(struct bcmgenet_priv *priv,
+ void __iomem *d)
+{
+ return __raw_readl(d + DMA_DESC_LENGTH_STATUS);
+}
+
+static inline void dmadesc_set_addr(struct bcmgenet_priv *priv,
+ void __iomem *d,
+ dma_addr_t addr)
+{
+ __raw_writel(lower_32_bits(addr), d + DMA_DESC_ADDRESS_LO);
+
+ /* Register writes to GISB bus can take couple hundred nanoseconds
+ * and are done for each packet, save these expensive writes unless
+ * the platform is explicitely configured for 64-bits/LPAE.
+ */
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+ if (priv->hw_params->flags & GENET_HAS_40BITS)
+ __raw_writel(upper_32_bits(addr), d + DMA_DESC_ADDRESS_HI);
+#endif
+}
+
+/* Combined address + length/status setter */
+static inline void dmadesc_set(struct bcmgenet_priv *priv,
+ void __iomem *d, dma_addr_t addr, u32 val)
+{
+ dmadesc_set_length_status(priv, d, val);
+ dmadesc_set_addr(priv, d, addr);
+}
+
+static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv,
+ void __iomem *d)
+{
+ dma_addr_t addr;
+
+ addr = __raw_readl(d + DMA_DESC_ADDRESS_LO);
+
+ /* Register writes to GISB bus can take couple hundred nanoseconds
+ * and are done for each packet, save these expensive writes unless
+ * the platform is explicitely configured for 64-bits/LPAE.
+ */
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+ if (priv->hw_params->flags & GENET_HAS_40BITS)
+ addr |= (u64)__raw_readl(d + DMA_DESC_ADDRESS_HI) << 32;
+#endif
+ return addr;
+}
+
+#define GENET_VER_FMT "%1d.%1d EPHY: 0x%04x"
+
+#define GENET_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
+ NETIF_MSG_LINK)
+
+static inline u32 bcmgenet_rbuf_ctrl_get(struct bcmgenet_priv *priv)
+{
+ if (GENET_IS_V1(priv))
+ return bcmgenet_rbuf_readl(priv, RBUF_FLUSH_CTRL_V1);
+ else
+ return bcmgenet_sys_readl(priv, SYS_RBUF_FLUSH_CTRL);
+}
+
+static inline void bcmgenet_rbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
+{
+ if (GENET_IS_V1(priv))
+ bcmgenet_rbuf_writel(priv, val, RBUF_FLUSH_CTRL_V1);
+ else
+ bcmgenet_sys_writel(priv, val, SYS_RBUF_FLUSH_CTRL);
+}
+
+/* These macros are defined to deal with register map change
+ * between GENET1.1 and GENET2. Only those currently being used
+ * by driver are defined.
+ */
+static inline u32 bcmgenet_tbuf_ctrl_get(struct bcmgenet_priv *priv)
+{
+ if (GENET_IS_V1(priv))
+ return bcmgenet_rbuf_readl(priv, TBUF_CTRL_V1);
+ else
+ return __raw_readl(priv->base +
+ priv->hw_params->tbuf_offset + TBUF_CTRL);
+}
+
+static inline void bcmgenet_tbuf_ctrl_set(struct bcmgenet_priv *priv, u32 val)
+{
+ if (GENET_IS_V1(priv))
+ bcmgenet_rbuf_writel(priv, val, TBUF_CTRL_V1);
+ else
+ __raw_writel(val, priv->base +
+ priv->hw_params->tbuf_offset + TBUF_CTRL);
+}
+
+static inline u32 bcmgenet_bp_mc_get(struct bcmgenet_priv *priv)
+{
+ if (GENET_IS_V1(priv))
+ return bcmgenet_rbuf_readl(priv, TBUF_BP_MC_V1);
+ else
+ return __raw_readl(priv->base +
+ priv->hw_params->tbuf_offset + TBUF_BP_MC);
+}
+
+static inline void bcmgenet_bp_mc_set(struct bcmgenet_priv *priv, u32 val)
+{
+ if (GENET_IS_V1(priv))
+ bcmgenet_rbuf_writel(priv, val, TBUF_BP_MC_V1);
+ else
+ __raw_writel(val, priv->base +
+ priv->hw_params->tbuf_offset + TBUF_BP_MC);
+}
+
+/* RX/TX DMA register accessors */
+enum dma_reg {
+ DMA_RING_CFG = 0,
+ DMA_CTRL,
+ DMA_STATUS,
+ DMA_SCB_BURST_SIZE,
+ DMA_ARB_CTRL,
+ DMA_PRIORITY,
+ DMA_RING_PRIORITY,
+};
+
+static const u8 bcmgenet_dma_regs_v3plus[] = {
+ [DMA_RING_CFG] = 0x00,
+ [DMA_CTRL] = 0x04,
+ [DMA_STATUS] = 0x08,
+ [DMA_SCB_BURST_SIZE] = 0x0C,
+ [DMA_ARB_CTRL] = 0x2C,
+ [DMA_PRIORITY] = 0x30,
+ [DMA_RING_PRIORITY] = 0x38,
+};
+
+static const u8 bcmgenet_dma_regs_v2[] = {
+ [DMA_RING_CFG] = 0x00,
+ [DMA_CTRL] = 0x04,
+ [DMA_STATUS] = 0x08,
+ [DMA_SCB_BURST_SIZE] = 0x0C,
+ [DMA_ARB_CTRL] = 0x30,
+ [DMA_PRIORITY] = 0x34,
+ [DMA_RING_PRIORITY] = 0x3C,
+};
+
+static const u8 bcmgenet_dma_regs_v1[] = {
+ [DMA_CTRL] = 0x00,
+ [DMA_STATUS] = 0x04,
+ [DMA_SCB_BURST_SIZE] = 0x0C,
+ [DMA_ARB_CTRL] = 0x30,
+ [DMA_PRIORITY] = 0x34,
+ [DMA_RING_PRIORITY] = 0x3C,
+};
+
+/* Set at runtime once bcmgenet version is known */
+static const u8 *bcmgenet_dma_regs;
+
+static inline struct bcmgenet_priv *dev_to_priv(struct device *dev)
+{
+ return netdev_priv(dev_get_drvdata(dev));
+}
+
+static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv,
+ enum dma_reg r)
+{
+ return __raw_readl(priv->base + GENET_TDMA_REG_OFF +
+ DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
+}
+
+static inline void bcmgenet_tdma_writel(struct bcmgenet_priv *priv,
+ u32 val, enum dma_reg r)
+{
+ __raw_writel(val, priv->base + GENET_TDMA_REG_OFF +
+ DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
+}
+
+static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv,
+ enum dma_reg r)
+{
+ return __raw_readl(priv->base + GENET_RDMA_REG_OFF +
+ DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
+}
+
+static inline void bcmgenet_rdma_writel(struct bcmgenet_priv *priv,
+ u32 val, enum dma_reg r)
+{
+ __raw_writel(val, priv->base + GENET_RDMA_REG_OFF +
+ DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
+}
+
+/* RDMA/TDMA ring registers and accessors
+ * we merge the common fields and just prefix with T/D the registers
+ * having different meaning depending on the direction
+ */
+enum dma_ring_reg {
+ TDMA_READ_PTR = 0,
+ RDMA_WRITE_PTR = TDMA_READ_PTR,
+ TDMA_READ_PTR_HI,
+ RDMA_WRITE_PTR_HI = TDMA_READ_PTR_HI,
+ TDMA_CONS_INDEX,
+ RDMA_PROD_INDEX = TDMA_CONS_INDEX,
+ TDMA_PROD_INDEX,
+ RDMA_CONS_INDEX = TDMA_PROD_INDEX,
+ DMA_RING_BUF_SIZE,
+ DMA_START_ADDR,
+ DMA_START_ADDR_HI,
+ DMA_END_ADDR,
+ DMA_END_ADDR_HI,
+ DMA_MBUF_DONE_THRESH,
+ TDMA_FLOW_PERIOD,
+ RDMA_XON_XOFF_THRESH = TDMA_FLOW_PERIOD,
+ TDMA_WRITE_PTR,
+ RDMA_READ_PTR = TDMA_WRITE_PTR,
+ TDMA_WRITE_PTR_HI,
+ RDMA_READ_PTR_HI = TDMA_WRITE_PTR_HI
+};
+
+/* GENET v4 supports 40-bits pointer addressing
+ * for obvious reasons the LO and HI word parts
+ * are contiguous, but this offsets the other
+ * registers.
+ */
+static const u8 genet_dma_ring_regs_v4[] = {
+ [TDMA_READ_PTR] = 0x00,
+ [TDMA_READ_PTR_HI] = 0x04,
+ [TDMA_CONS_INDEX] = 0x08,
+ [TDMA_PROD_INDEX] = 0x0C,
+ [DMA_RING_BUF_SIZE] = 0x10,
+ [DMA_START_ADDR] = 0x14,
+ [DMA_START_ADDR_HI] = 0x18,
+ [DMA_END_ADDR] = 0x1C,
+ [DMA_END_ADDR_HI] = 0x20,
+ [DMA_MBUF_DONE_THRESH] = 0x24,
+ [TDMA_FLOW_PERIOD] = 0x28,
+ [TDMA_WRITE_PTR] = 0x2C,
+ [TDMA_WRITE_PTR_HI] = 0x30,
+};
+
+static const u8 genet_dma_ring_regs_v123[] = {
+ [TDMA_READ_PTR] = 0x00,
+ [TDMA_CONS_INDEX] = 0x04,
+ [TDMA_PROD_INDEX] = 0x08,
+ [DMA_RING_BUF_SIZE] = 0x0C,
+ [DMA_START_ADDR] = 0x10,
+ [DMA_END_ADDR] = 0x14,
+ [DMA_MBUF_DONE_THRESH] = 0x18,
+ [TDMA_FLOW_PERIOD] = 0x1C,
+ [TDMA_WRITE_PTR] = 0x20,
+};
+
+/* Set at runtime once GENET version is known */
+static const u8 *genet_dma_ring_regs;
+
+static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv,
+ unsigned int ring,
+ enum dma_ring_reg r)
+{
+ return __raw_readl(priv->base + GENET_TDMA_REG_OFF +
+ (DMA_RING_SIZE * ring) +
+ genet_dma_ring_regs[r]);
+}
+
+static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv,
+ unsigned int ring,
+ u32 val,
+ enum dma_ring_reg r)
+{
+ __raw_writel(val, priv->base + GENET_TDMA_REG_OFF +
+ (DMA_RING_SIZE * ring) +
+ genet_dma_ring_regs[r]);
+}
+
+static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv,
+ unsigned int ring,
+ enum dma_ring_reg r)
+{
+ return __raw_readl(priv->base + GENET_RDMA_REG_OFF +
+ (DMA_RING_SIZE * ring) +
+ genet_dma_ring_regs[r]);
+}
+
+static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
+ unsigned int ring,
+ u32 val,
+ enum dma_ring_reg r)
+{
+ __raw_writel(val, priv->base + GENET_RDMA_REG_OFF +
+ (DMA_RING_SIZE * ring) +
+ genet_dma_ring_regs[r]);
+}
+
+static int bcmgenet_get_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ if (!priv->phydev)
+ return -ENODEV;
+
+ return phy_ethtool_gset(priv->phydev, cmd);
+}
+
+static int bcmgenet_set_settings(struct net_device *dev,
+ struct ethtool_cmd *cmd)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ if (!priv->phydev)
+ return -ENODEV;
+
+ return phy_ethtool_sset(priv->phydev, cmd);
+}
+
+static int bcmgenet_set_rx_csum(struct net_device *dev,
+ netdev_features_t wanted)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ u32 rbuf_chk_ctrl;
+ bool rx_csum_en;
+
+ rx_csum_en = !!(wanted & NETIF_F_RXCSUM);
+
+ rbuf_chk_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CHK_CTRL);
+
+ /* enable rx checksumming */
+ if (rx_csum_en)
+ rbuf_chk_ctrl |= RBUF_RXCHK_EN;
+ else
+ rbuf_chk_ctrl &= ~RBUF_RXCHK_EN;
+ priv->desc_rxchk_en = rx_csum_en;
+ bcmgenet_rbuf_writel(priv, rbuf_chk_ctrl, RBUF_CHK_CTRL);
+
+ return 0;
+}
+
+static int bcmgenet_set_tx_csum(struct net_device *dev,
+ netdev_features_t wanted)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ bool desc_64b_en;
+ u32 tbuf_ctrl, rbuf_ctrl;
+
+ tbuf_ctrl = bcmgenet_tbuf_ctrl_get(priv);
+ rbuf_ctrl = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
+
+ desc_64b_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
+
+ /* enable 64 bytes descriptor in both directions (RBUF and TBUF) */
+ if (desc_64b_en) {
+ tbuf_ctrl |= RBUF_64B_EN;
+ rbuf_ctrl |= RBUF_64B_EN;
+ } else {
+ tbuf_ctrl &= ~RBUF_64B_EN;
+ rbuf_ctrl &= ~RBUF_64B_EN;
+ }
+ priv->desc_64b_en = desc_64b_en;
+
+ bcmgenet_tbuf_ctrl_set(priv, tbuf_ctrl);
+ bcmgenet_rbuf_writel(priv, rbuf_ctrl, RBUF_CTRL);
+
+ return 0;
+}
+
+static int bcmgenet_set_features(struct net_device *dev,
+ netdev_features_t features)
+{
+ netdev_features_t changed = features ^ dev->features;
+ netdev_features_t wanted = dev->wanted_features;
+ int ret = 0;
+
+ if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))
+ ret = bcmgenet_set_tx_csum(dev, wanted);
+ if (changed & (NETIF_F_RXCSUM))
+ ret = bcmgenet_set_rx_csum(dev, wanted);
+
+ return ret;
+}
+
+static u32 bcmgenet_get_msglevel(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+
+ return priv->msg_enable;
+}
+
+static void bcmgenet_set_msglevel(struct net_device *dev, u32 level)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+
+ priv->msg_enable = level;
+}
+
+/* standard ethtool support functions. */
+enum bcmgenet_stat_type {
+ BCMGENET_STAT_NETDEV = -1,
+ BCMGENET_STAT_MIB_RX,
+ BCMGENET_STAT_MIB_TX,
+ BCMGENET_STAT_RUNT,
+ BCMGENET_STAT_MISC,
+};
+
+struct bcmgenet_stats {
+ char stat_string[ETH_GSTRING_LEN];
+ int stat_sizeof;
+ int stat_offset;
+ enum bcmgenet_stat_type type;
+ /* reg offset from UMAC base for misc counters */
+ u16 reg_offset;
+};
+
+#define STAT_NETDEV(m) { \
+ .stat_string = __stringify(m), \
+ .stat_sizeof = sizeof(((struct net_device_stats *)0)->m), \
+ .stat_offset = offsetof(struct net_device_stats, m), \
+ .type = BCMGENET_STAT_NETDEV, \
+}
+
+#define STAT_GENET_MIB(str, m, _type) { \
+ .stat_string = str, \
+ .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
+ .stat_offset = offsetof(struct bcmgenet_priv, m), \
+ .type = _type, \
+}
+
+#define STAT_GENET_MIB_RX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_RX)
+#define STAT_GENET_MIB_TX(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_MIB_TX)
+#define STAT_GENET_RUNT(str, m) STAT_GENET_MIB(str, m, BCMGENET_STAT_RUNT)
+
+#define STAT_GENET_MISC(str, m, offset) { \
+ .stat_string = str, \
+ .stat_sizeof = sizeof(((struct bcmgenet_priv *)0)->m), \
+ .stat_offset = offsetof(struct bcmgenet_priv, m), \
+ .type = BCMGENET_STAT_MISC, \
+ .reg_offset = offset, \
+}
+
+
+/* There is a 0xC gap between the end of RX and beginning of TX stats and then
+ * between the end of TX stats and the beginning of the RX RUNT
+ */
+#define BCMGENET_STAT_OFFSET 0xc
+
+/* Hardware counters must be kept in sync because the order/offset
+ * is important here (order in structure declaration = order in hardware)
+ */
+static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
+ /* general stats */
+ STAT_NETDEV(rx_packets),
+ STAT_NETDEV(tx_packets),
+ STAT_NETDEV(rx_bytes),
+ STAT_NETDEV(tx_bytes),
+ STAT_NETDEV(rx_errors),
+ STAT_NETDEV(tx_errors),
+ STAT_NETDEV(rx_dropped),
+ STAT_NETDEV(tx_dropped),
+ STAT_NETDEV(multicast),
+ /* UniMAC RSV counters */
+ STAT_GENET_MIB_RX("rx_64_octets", mib.rx.pkt_cnt.cnt_64),
+ STAT_GENET_MIB_RX("rx_65_127_oct", mib.rx.pkt_cnt.cnt_127),
+ STAT_GENET_MIB_RX("rx_128_255_oct", mib.rx.pkt_cnt.cnt_255),
+ STAT_GENET_MIB_RX("rx_256_511_oct", mib.rx.pkt_cnt.cnt_511),
+ STAT_GENET_MIB_RX("rx_512_1023_oct", mib.rx.pkt_cnt.cnt_1023),
+ STAT_GENET_MIB_RX("rx_1024_1518_oct", mib.rx.pkt_cnt.cnt_1518),
+ STAT_GENET_MIB_RX("rx_vlan_1519_1522_oct", mib.rx.pkt_cnt.cnt_mgv),
+ STAT_GENET_MIB_RX("rx_1522_2047_oct", mib.rx.pkt_cnt.cnt_2047),
+ STAT_GENET_MIB_RX("rx_2048_4095_oct", mib.rx.pkt_cnt.cnt_4095),
+ STAT_GENET_MIB_RX("rx_4096_9216_oct", mib.rx.pkt_cnt.cnt_9216),
+ STAT_GENET_MIB_RX("rx_pkts", mib.rx.pkt),
+ STAT_GENET_MIB_RX("rx_bytes", mib.rx.bytes),
+ STAT_GENET_MIB_RX("rx_multicast", mib.rx.mca),
+ STAT_GENET_MIB_RX("rx_broadcast", mib.rx.bca),
+ STAT_GENET_MIB_RX("rx_fcs", mib.rx.fcs),
+ STAT_GENET_MIB_RX("rx_control", mib.rx.cf),
+ STAT_GENET_MIB_RX("rx_pause", mib.rx.pf),
+ STAT_GENET_MIB_RX("rx_unknown", mib.rx.uo),
+ STAT_GENET_MIB_RX("rx_align", mib.rx.aln),
+ STAT_GENET_MIB_RX("rx_outrange", mib.rx.flr),
+ STAT_GENET_MIB_RX("rx_code", mib.rx.cde),
+ STAT_GENET_MIB_RX("rx_carrier", mib.rx.fcr),
+ STAT_GENET_MIB_RX("rx_oversize", mib.rx.ovr),
+ STAT_GENET_MIB_RX("rx_jabber", mib.rx.jbr),
+ STAT_GENET_MIB_RX("rx_mtu_err", mib.rx.mtue),
+ STAT_GENET_MIB_RX("rx_good_pkts", mib.rx.pok),
+ STAT_GENET_MIB_RX("rx_unicast", mib.rx.uc),
+ STAT_GENET_MIB_RX("rx_ppp", mib.rx.ppp),
+ STAT_GENET_MIB_RX("rx_crc", mib.rx.rcrc),
+ /* UniMAC TSV counters */
+ STAT_GENET_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64),
+ STAT_GENET_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127),
+ STAT_GENET_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255),
+ STAT_GENET_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511),
+ STAT_GENET_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023),
+ STAT_GENET_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518),
+ STAT_GENET_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv),
+ STAT_GENET_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047),
+ STAT_GENET_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095),
+ STAT_GENET_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216),
+ STAT_GENET_MIB_TX("tx_pkts", mib.tx.pkts),
+ STAT_GENET_MIB_TX("tx_multicast", mib.tx.mca),
+ STAT_GENET_MIB_TX("tx_broadcast", mib.tx.bca),
+ STAT_GENET_MIB_TX("tx_pause", mib.tx.pf),
+ STAT_GENET_MIB_TX("tx_control", mib.tx.cf),
+ STAT_GENET_MIB_TX("tx_fcs_err", mib.tx.fcs),
+ STAT_GENET_MIB_TX("tx_oversize", mib.tx.ovr),
+ STAT_GENET_MIB_TX("tx_defer", mib.tx.drf),
+ STAT_GENET_MIB_TX("tx_excess_defer", mib.tx.edf),
+ STAT_GENET_MIB_TX("tx_single_col", mib.tx.scl),
+ STAT_GENET_MIB_TX("tx_multi_col", mib.tx.mcl),
+ STAT_GENET_MIB_TX("tx_late_col", mib.tx.lcl),
+ STAT_GENET_MIB_TX("tx_excess_col", mib.tx.ecl),
+ STAT_GENET_MIB_TX("tx_frags", mib.tx.frg),
+ STAT_GENET_MIB_TX("tx_total_col", mib.tx.ncl),
+ STAT_GENET_MIB_TX("tx_jabber", mib.tx.jbr),
+ STAT_GENET_MIB_TX("tx_bytes", mib.tx.bytes),
+ STAT_GENET_MIB_TX("tx_good_pkts", mib.tx.pok),
+ STAT_GENET_MIB_TX("tx_unicast", mib.tx.uc),
+ /* UniMAC RUNT counters */
+ STAT_GENET_RUNT("rx_runt_pkts", mib.rx_runt_cnt),
+ STAT_GENET_RUNT("rx_runt_valid_fcs", mib.rx_runt_fcs),
+ STAT_GENET_RUNT("rx_runt_inval_fcs_align", mib.rx_runt_fcs_align),
+ STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
+ /* Misc UniMAC counters */
+ STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt,
+ UMAC_RBUF_OVFL_CNT),
+ STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT),
+ STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
+};
+
+#define BCMGENET_STATS_LEN ARRAY_SIZE(bcmgenet_gstrings_stats)
+
+static void bcmgenet_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ strlcpy(info->driver, "bcmgenet", sizeof(info->driver));
+ strlcpy(info->version, "v2.0", sizeof(info->version));
+ info->n_stats = BCMGENET_STATS_LEN;
+
+}
+
+static int bcmgenet_get_sset_count(struct net_device *dev, int string_set)
+{
+ switch (string_set) {
+ case ETH_SS_STATS:
+ return BCMGENET_STATS_LEN;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void bcmgenet_get_strings(struct net_device *dev,
+ u32 stringset, u8 *data)
+{
+ int i;
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ for (i = 0; i < BCMGENET_STATS_LEN; i++) {
+ memcpy(data + i * ETH_GSTRING_LEN,
+ bcmgenet_gstrings_stats[i].stat_string,
+ ETH_GSTRING_LEN);
+ }
+ break;
+ }
+}
+
+static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
+{
+ int i, j = 0;
+
+ for (i = 0; i < BCMGENET_STATS_LEN; i++) {
+ const struct bcmgenet_stats *s;
+ u8 offset = 0;
+ u32 val = 0;
+ char *p;
+
+ s = &bcmgenet_gstrings_stats[i];
+ switch (s->type) {
+ case BCMGENET_STAT_NETDEV:
+ continue;
+ case BCMGENET_STAT_MIB_RX:
+ case BCMGENET_STAT_MIB_TX:
+ case BCMGENET_STAT_RUNT:
+ if (s->type != BCMGENET_STAT_MIB_RX)
+ offset = BCMGENET_STAT_OFFSET;
+ val = bcmgenet_umac_readl(priv, UMAC_MIB_START +
+ j + offset);
+ break;
+ case BCMGENET_STAT_MISC:
+ val = bcmgenet_umac_readl(priv, s->reg_offset);
+ /* clear if overflowed */
+ if (val == ~0)
+ bcmgenet_umac_writel(priv, 0, s->reg_offset);
+ break;
+ }
+
+ j += s->stat_sizeof;
+ p = (char *)priv + s->stat_offset;
+ *(u32 *)p = val;
+ }
+}
+
+static void bcmgenet_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats,
+ u64 *data)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ int i;
+
+ if (netif_running(dev))
+ bcmgenet_update_mib_counters(priv);
+
+ for (i = 0; i < BCMGENET_STATS_LEN; i++) {
+ const struct bcmgenet_stats *s;
+ char *p;
+
+ s = &bcmgenet_gstrings_stats[i];
+ if (s->type == BCMGENET_STAT_NETDEV)
+ p = (char *)&dev->stats;
+ else
+ p = (char *)priv;
+ p += s->stat_offset;
+ data[i] = *(u32 *)p;
+ }
+}
+
+/* standard ethtool support functions. */
+static struct ethtool_ops bcmgenet_ethtool_ops = {
+ .get_strings = bcmgenet_get_strings,
+ .get_sset_count = bcmgenet_get_sset_count,
+ .get_ethtool_stats = bcmgenet_get_ethtool_stats,
+ .get_settings = bcmgenet_get_settings,
+ .set_settings = bcmgenet_set_settings,
+ .get_drvinfo = bcmgenet_get_drvinfo,
+ .get_link = ethtool_op_get_link,
+ .get_msglevel = bcmgenet_get_msglevel,
+ .set_msglevel = bcmgenet_set_msglevel,
+};
+
+/* Power down the unimac, based on mode. */
+static void bcmgenet_power_down(struct bcmgenet_priv *priv,
+ enum bcmgenet_power_mode mode)
+{
+ u32 reg;
+
+ switch (mode) {
+ case GENET_POWER_CABLE_SENSE:
+ phy_detach(priv->phydev);
+ break;
+
+ case GENET_POWER_PASSIVE:
+ /* Power down LED */
+ bcmgenet_mii_reset(priv->dev);
+ if (priv->hw_params->flags & GENET_HAS_EXT) {
+ reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
+ reg |= (EXT_PWR_DOWN_PHY |
+ EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_BIAS);
+ bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void bcmgenet_power_up(struct bcmgenet_priv *priv,
+ enum bcmgenet_power_mode mode)
+{
+ u32 reg;
+
+ if (!(priv->hw_params->flags & GENET_HAS_EXT))
+ return;
+
+ reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
+
+ switch (mode) {
+ case GENET_POWER_PASSIVE:
+ reg &= ~(EXT_PWR_DOWN_DLL | EXT_PWR_DOWN_PHY |
+ EXT_PWR_DOWN_BIAS);
+ /* fallthrough */
+ case GENET_POWER_CABLE_SENSE:
+ /* enable APD */
+ reg |= EXT_PWR_DN_EN_LD;
+ break;
+ default:
+ break;
+ }
+
+ bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
+ bcmgenet_mii_reset(priv->dev);
+}
+
+/* ioctl handle special commands that are not present in ethtool. */
+static int bcmgenet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ int val = 0;
+
+ if (!netif_running(dev))
+ return -EINVAL;
+
+ switch (cmd) {
+ case SIOCGMIIPHY:
+ case SIOCGMIIREG:
+ case SIOCSMIIREG:
+ if (!priv->phydev)
+ val = -ENODEV;
+ else
+ val = phy_mii_ioctl(priv->phydev, rq, cmd);
+ break;
+
+ default:
+ val = -EINVAL;
+ break;
+ }
+
+ return val;
+}
+
+static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv,
+ struct bcmgenet_tx_ring *ring)
+{
+ struct enet_cb *tx_cb_ptr;
+
+ tx_cb_ptr = ring->cbs;
+ tx_cb_ptr += ring->write_ptr - ring->cb_ptr;
+ tx_cb_ptr->bd_addr = priv->tx_bds + ring->write_ptr * DMA_DESC_SIZE;
+ /* Advancing local write pointer */
+ if (ring->write_ptr == ring->end_ptr)
+ ring->write_ptr = ring->cb_ptr;
+ else
+ ring->write_ptr++;
+
+ return tx_cb_ptr;
+}
+
+/* Simple helper to free a control block's resources */
+static void bcmgenet_free_cb(struct enet_cb *cb)
+{
+ dev_kfree_skb_any(cb->skb);
+ cb->skb = NULL;
+ dma_unmap_addr_set(cb, dma_addr, 0);
+}
+
+static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_priv *priv,
+ struct bcmgenet_tx_ring *ring)
+{
+ bcmgenet_intrl2_0_writel(priv,
+ UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE,
+ INTRL2_CPU_MASK_SET);
+}
+
+static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_priv *priv,
+ struct bcmgenet_tx_ring *ring)
+{
+ bcmgenet_intrl2_0_writel(priv,
+ UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE,
+ INTRL2_CPU_MASK_CLEAR);
+}
+
+static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_priv *priv,
+ struct bcmgenet_tx_ring *ring)
+{
+ bcmgenet_intrl2_1_writel(priv,
+ (1 << ring->index), INTRL2_CPU_MASK_CLEAR);
+ priv->int1_mask &= ~(1 << ring->index);
+}
+
+static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_priv *priv,
+ struct bcmgenet_tx_ring *ring)
+{
+ bcmgenet_intrl2_1_writel(priv,
+ (1 << ring->index), INTRL2_CPU_MASK_SET);
+ priv->int1_mask |= (1 << ring->index);
+}
+
+/* Unlocked version of the reclaim routine */
+static void __bcmgenet_tx_reclaim(struct net_device *dev,
+ struct bcmgenet_tx_ring *ring)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ int last_tx_cn, last_c_index, num_tx_bds;
+ struct enet_cb *tx_cb_ptr;
+ unsigned int c_index;
+
+ /* Compute how many buffers are transmited since last xmit call */
+ c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
+
+ last_c_index = ring->c_index;
+ num_tx_bds = ring->size;
+
+ c_index &= (num_tx_bds - 1);
+
+ if (c_index >= last_c_index)
+ last_tx_cn = c_index - last_c_index;
+ else
+ last_tx_cn = num_tx_bds - last_c_index + c_index;
+
+ netif_dbg(priv, tx_done, dev,
+ "%s ring=%d index=%d last_tx_cn=%d last_index=%d\n",
+ __func__, ring->index,
+ c_index, last_tx_cn, last_c_index);
+
+ /* Reclaim transmitted buffers */
+ while (last_tx_cn-- > 0) {
+ tx_cb_ptr = ring->cbs + last_c_index;
+ if (tx_cb_ptr->skb) {
+ dev->stats.tx_bytes += tx_cb_ptr->skb->len;
+ dma_unmap_single(&dev->dev,
+ dma_unmap_addr(tx_cb_ptr, dma_addr),
+ tx_cb_ptr->skb->len,
+ DMA_TO_DEVICE);
+ bcmgenet_free_cb(tx_cb_ptr);
+ } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) {
+ dev->stats.tx_bytes +=
+ dma_unmap_len(tx_cb_ptr, dma_len);
+ dma_unmap_page(&dev->dev,
+ dma_unmap_addr(tx_cb_ptr, dma_addr),
+ dma_unmap_len(tx_cb_ptr, dma_len),
+ DMA_TO_DEVICE);
+ dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0);
+ }
+ dev->stats.tx_packets++;
+ ring->free_bds += 1;
+
+ last_c_index++;
+ last_c_index &= (num_tx_bds - 1);
+ }
+
+ if (ring->free_bds > (MAX_SKB_FRAGS + 1))
+ ring->int_disable(priv, ring);
+
+ if (__netif_subqueue_stopped(dev, ring->queue))
+ netif_wake_subqueue(dev, ring->queue);
+
+ ring->c_index = c_index;
+}
+
+static void bcmgenet_tx_reclaim(struct net_device *dev,
+ struct bcmgenet_tx_ring *ring)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ring->lock, flags);
+ __bcmgenet_tx_reclaim(dev, ring);
+ spin_unlock_irqrestore(&ring->lock, flags);
+}
+
+static void bcmgenet_tx_reclaim_all(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ int i;
+
+ if (netif_is_multiqueue(dev)) {
+ for (i = 0; i < priv->hw_params->tx_queues; i++)
+ bcmgenet_tx_reclaim(dev, &priv->tx_rings[i]);
+ }
+
+ bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]);
+}
+
+/* Transmits a single SKB (either head of a fragment or a single SKB)
+ * caller must hold priv->lock
+ */
+static int bcmgenet_xmit_single(struct net_device *dev,
+ struct sk_buff *skb,
+ u16 dma_desc_flags,
+ struct bcmgenet_tx_ring *ring)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct device *kdev = &priv->pdev->dev;
+ struct enet_cb *tx_cb_ptr;
+ unsigned int skb_len;
+ dma_addr_t mapping;
+ u32 length_status;
+ int ret;
+
+ tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
+
+ if (unlikely(!tx_cb_ptr))
+ BUG();
+
+ tx_cb_ptr->skb = skb;
+
+ skb_len = skb_headlen(skb) < ETH_ZLEN ? ETH_ZLEN : skb_headlen(skb);
+
+ mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
+ ret = dma_mapping_error(kdev, mapping);
+ if (ret) {
+ netif_err(priv, tx_err, dev, "Tx DMA map failed\n");
+ dev_kfree_skb(skb);
+ return ret;
+ }
+
+ dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
+ dma_unmap_len_set(tx_cb_ptr, dma_len, skb->len);
+ length_status = (skb_len << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
+ (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT) |
+ DMA_TX_APPEND_CRC;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL)
+ length_status |= DMA_TX_DO_CSUM;
+
+ dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, length_status);
+
+ /* Decrement total BD count and advance our write pointer */
+ ring->free_bds -= 1;
+ ring->prod_index += 1;
+ ring->prod_index &= DMA_P_INDEX_MASK;
+
+ return 0;
+}
+
+/* Transmit a SKB fragement */
+static int bcmgenet_xmit_frag(struct net_device *dev,
+ skb_frag_t *frag,
+ u16 dma_desc_flags,
+ struct bcmgenet_tx_ring *ring)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct device *kdev = &priv->pdev->dev;
+ struct enet_cb *tx_cb_ptr;
+ dma_addr_t mapping;
+ int ret;
+
+ tx_cb_ptr = bcmgenet_get_txcb(priv, ring);
+
+ if (unlikely(!tx_cb_ptr))
+ BUG();
+ tx_cb_ptr->skb = NULL;
+
+ mapping = skb_frag_dma_map(kdev, frag, 0,
+ skb_frag_size(frag), DMA_TO_DEVICE);
+ ret = dma_mapping_error(kdev, mapping);
+ if (ret) {
+ netif_err(priv, tx_err, dev, "%s: Tx DMA map failed\n",
+ __func__);
+ return ret;
+ }
+
+ dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping);
+ dma_unmap_len_set(tx_cb_ptr, dma_len, frag->size);
+
+ dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping,
+ (frag->size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
+ (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT));
+
+
+ ring->free_bds -= 1;
+ ring->prod_index += 1;
+ ring->prod_index &= DMA_P_INDEX_MASK;
+
+ return 0;
+}
+
+/* Reallocate the SKB to put enough headroom in front of it and insert
+ * the transmit checksum offsets in the descriptors
+ */
+static int bcmgenet_put_tx_csum(struct net_device *dev, struct sk_buff *skb)
+{
+ struct status_64 *status = NULL;
+ struct sk_buff *new_skb;
+ u16 offset;
+ u8 ip_proto;
+ u16 ip_ver;
+ u32 tx_csum_info;
+
+ if (unlikely(skb_headroom(skb) < sizeof(*status))) {
+ /* If 64 byte status block enabled, must make sure skb has
+ * enough headroom for us to insert 64B status block.
+ */
+ new_skb = skb_realloc_headroom(skb, sizeof(*status));
+ dev_kfree_skb(skb);
+ if (!new_skb) {
+ dev->stats.tx_errors++;
+ dev->stats.tx_dropped++;
+ return -ENOMEM;
+ }
+ skb = new_skb;
+ }
+
+ skb_push(skb, sizeof(*status));
+ status = (struct status_64 *)skb->data;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ ip_ver = htons(skb->protocol);
+ switch (ip_ver) {
+ case ETH_P_IP:
+ ip_proto = ip_hdr(skb)->protocol;
+ break;
+ case ETH_P_IPV6:
+ ip_proto = ipv6_hdr(skb)->nexthdr;
+ break;
+ default:
+ return 0;
+ }
+
+ offset = skb_checksum_start_offset(skb) - sizeof(*status);
+ tx_csum_info = (offset << STATUS_TX_CSUM_START_SHIFT) |
+ (offset + skb->csum_offset);
+
+ /* Set the length valid bit for TCP and UDP and just set
+ * the special UDP flag for IPv4, else just set to 0.
+ */
+ if (ip_proto == IPPROTO_TCP || ip_proto == IPPROTO_UDP) {
+ tx_csum_info |= STATUS_TX_CSUM_LV;
+ if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP)
+ tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP;
+ } else
+ tx_csum_info = 0;
+
+ status->tx_csum_info = tx_csum_info;
+ }
+
+ return 0;
+}
+
+static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct bcmgenet_tx_ring *ring = NULL;
+ unsigned long flags = 0;
+ int nr_frags, index;
+ u16 dma_desc_flags;
+ int ret;
+ int i;
+
+ index = skb_get_queue_mapping(skb);
+ /* Mapping strategy:
+ * queue_mapping = 0, unclassified, packet xmited through ring16
+ * queue_mapping = 1, goes to ring 0. (highest priority queue
+ * queue_mapping = 2, goes to ring 1.
+ * queue_mapping = 3, goes to ring 2.
+ * queue_mapping = 4, goes to ring 3.
+ */
+ if (index == 0)
+ index = DESC_INDEX;
+ else
+ index -= 1;
+
+ if ((index != DESC_INDEX) && (index > priv->hw_params->tx_queues - 1)) {
+ netdev_err(dev, "%s: queue_mapping %d is invalid\n",
+ __func__, skb_get_queue_mapping(skb));
+ dev->stats.tx_errors++;
+ dev->stats.tx_dropped++;
+ ret = NETDEV_TX_OK;
+ goto out;
+ }
+ nr_frags = skb_shinfo(skb)->nr_frags;
+ ring = &priv->tx_rings[index];
+
+ spin_lock_irqsave(&ring->lock, flags);
+ if (ring->free_bds <= nr_frags + 1) {
+ netif_stop_subqueue(dev, ring->queue);
+ netdev_err(dev, "%s: tx ring %d full when queue %d awake\n",
+ __func__, index, ring->queue);
+ ret = NETDEV_TX_BUSY;
+ goto out;
+ }
+
+ /* set the SKB transmit checksum */
+ if (priv->desc_64b_en) {
+ ret = bcmgenet_put_tx_csum(dev, skb);
+ if (ret) {
+ ret = NETDEV_TX_OK;
+ goto out;
+ }
+ }
+
+ dma_desc_flags = DMA_SOP;
+ if (nr_frags == 0)
+ dma_desc_flags |= DMA_EOP;
+
+ /* Transmit single SKB or head of fragment list */
+ ret = bcmgenet_xmit_single(dev, skb, dma_desc_flags, ring);
+ if (ret) {
+ ret = NETDEV_TX_OK;
+ goto out;
+ }
+
+ /* xmit fragment */
+ for (i = 0; i < nr_frags; i++) {
+ ret = bcmgenet_xmit_frag(dev,
+ &skb_shinfo(skb)->frags[i],
+ (i == nr_frags - 1) ? DMA_EOP : 0, ring);
+ if (ret) {
+ ret = NETDEV_TX_OK;
+ goto out;
+ }
+ }
+
+ /* we kept a software copy of how much we should advance the TDMA
+ * producer index, now write it down to the hardware
+ */
+ bcmgenet_tdma_ring_writel(priv, ring->index,
+ ring->prod_index, TDMA_PROD_INDEX);
+
+ if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) {
+ netif_stop_subqueue(dev, ring->queue);
+ ring->int_enable(priv, ring);
+ }
+
+out:
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ return ret;
+}
+
+
+static int bcmgenet_rx_refill(struct bcmgenet_priv *priv,
+ struct enet_cb *cb)
+{
+ struct device *kdev = &priv->pdev->dev;
+ struct sk_buff *skb;
+ dma_addr_t mapping;
+ int ret;
+
+ skb = netdev_alloc_skb(priv->dev,
+ priv->rx_buf_len + SKB_ALIGNMENT);
+ if (!skb)
+ return -ENOMEM;
+
+ /* a caller did not release this control block */
+ WARN_ON(cb->skb != NULL);
+ cb->skb = skb;
+ mapping = dma_map_single(kdev, skb->data,
+ priv->rx_buf_len, DMA_FROM_DEVICE);
+ ret = dma_mapping_error(kdev, mapping);
+ if (ret) {
+ bcmgenet_free_cb(cb);
+ netif_err(priv, rx_err, priv->dev,
+ "%s DMA map failed\n", __func__);
+ return ret;
+ }
+
+ dma_unmap_addr_set(cb, dma_addr, mapping);
+ /* assign packet, prepare descriptor, and advance pointer */
+
+ dmadesc_set_addr(priv, priv->rx_bd_assign_ptr, mapping);
+
+ /* turn on the newly assigned BD for DMA to use */
+ priv->rx_bd_assign_index++;
+ priv->rx_bd_assign_index &= (priv->num_rx_bds - 1);
+
+ priv->rx_bd_assign_ptr = priv->rx_bds +
+ (priv->rx_bd_assign_index * DMA_DESC_SIZE);
+
+ return 0;
+}
+
+/* bcmgenet_desc_rx - descriptor based rx process.
+ * this could be called from bottom half, or from NAPI polling method.
+ */
+static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
+ unsigned int budget)
+{
+ struct net_device *dev = priv->dev;
+ struct enet_cb *cb;
+ struct sk_buff *skb;
+ u32 dma_length_status;
+ unsigned long dma_flag;
+ int len, err;
+ unsigned int rxpktprocessed = 0, rxpkttoprocess;
+ unsigned int p_index;
+ unsigned int chksum_ok = 0;
+
+ p_index = bcmgenet_rdma_ring_readl(priv,
+ DESC_INDEX, RDMA_PROD_INDEX);
+ p_index &= DMA_P_INDEX_MASK;
+
+ if (p_index < priv->rx_c_index)
+ rxpkttoprocess = (DMA_C_INDEX_MASK + 1) -
+ priv->rx_c_index + p_index;
+ else
+ rxpkttoprocess = p_index - priv->rx_c_index;
+
+ netif_dbg(priv, rx_status, dev,
+ "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess);
+
+ while ((rxpktprocessed < rxpkttoprocess) &&
+ (rxpktprocessed < budget)) {
+
+ /* Unmap the packet contents such that we can use the
+ * RSV from the 64 bytes descriptor when enabled and save
+ * a 32-bits register read
+ */
+ cb = &priv->rx_cbs[priv->rx_read_ptr];
+ skb = cb->skb;
+ dma_unmap_single(&dev->dev, dma_unmap_addr(cb, dma_addr),
+ priv->rx_buf_len, DMA_FROM_DEVICE);
+
+ if (!priv->desc_64b_en) {
+ dma_length_status = dmadesc_get_length_status(priv,
+ priv->rx_bds +
+ (priv->rx_read_ptr *
+ DMA_DESC_SIZE));
+ } else {
+ struct status_64 *status;
+ status = (struct status_64 *)skb->data;
+ dma_length_status = status->length_status;
+ }
+
+ /* DMA flags and length are still valid no matter how
+ * we got the Receive Status Vector (64B RSB or register)
+ */
+ dma_flag = dma_length_status & 0xffff;
+ len = dma_length_status >> DMA_BUFLENGTH_SHIFT;
+
+ netif_dbg(priv, rx_status, dev,
+ "%s: p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n",
+ __func__, p_index, priv->rx_c_index, priv->rx_read_ptr,
+ dma_length_status);
+
+ rxpktprocessed++;
+
+ priv->rx_read_ptr++;
+ priv->rx_read_ptr &= (priv->num_rx_bds - 1);
+
+ /* out of memory, just drop packets at the hardware level */
+ if (unlikely(!skb)) {
+ dev->stats.rx_dropped++;
+ dev->stats.rx_errors++;
+ goto refill;
+ }
+
+ if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
+ netif_err(priv, rx_status, dev,
+ "Droping fragmented packet!\n");
+ dev->stats.rx_dropped++;
+ dev->stats.rx_errors++;
+ dev_kfree_skb_any(cb->skb);
+ cb->skb = NULL;
+ goto refill;
+ }
+ /* report errors */
+ if (unlikely(dma_flag & (DMA_RX_CRC_ERROR |
+ DMA_RX_OV |
+ DMA_RX_NO |
+ DMA_RX_LG |
+ DMA_RX_RXER))) {
+ netif_err(priv, rx_status, dev, "dma_flag=0x%x\n",
+ (unsigned int)dma_flag);
+ if (dma_flag & DMA_RX_CRC_ERROR)
+ dev->stats.rx_crc_errors++;
+ if (dma_flag & DMA_RX_OV)
+ dev->stats.rx_over_errors++;
+ if (dma_flag & DMA_RX_NO)
+ dev->stats.rx_frame_errors++;
+ if (dma_flag & DMA_RX_LG)
+ dev->stats.rx_length_errors++;
+ dev->stats.rx_dropped++;
+ dev->stats.rx_errors++;
+
+ /* discard the packet and advance consumer index.*/
+ dev_kfree_skb_any(cb->skb);
+ cb->skb = NULL;
+ goto refill;
+ } /* error packet */
+
+ chksum_ok = (dma_flag & priv->dma_rx_chk_bit) &&
+ priv->desc_rxchk_en;
+
+ skb_put(skb, len);
+ if (priv->desc_64b_en) {
+ skb_pull(skb, 64);
+ len -= 64;
+ }
+
+ if (likely(chksum_ok))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ /* remove hardware 2bytes added for IP alignment */
+ skb_pull(skb, 2);
+ len -= 2;
+
+ if (priv->crc_fwd_en) {
+ skb_trim(skb, len - ETH_FCS_LEN);
+ len -= ETH_FCS_LEN;
+ }
+
+ /*Finish setting up the received SKB and send it to the kernel*/
+ skb->protocol = eth_type_trans(skb, priv->dev);
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += len;
+ if (dma_flag & DMA_RX_MULT)
+ dev->stats.multicast++;
+
+ /* Notify kernel */
+ napi_gro_receive(&priv->napi, skb);
+ cb->skb = NULL;
+ netif_dbg(priv, rx_status, dev, "pushed up to kernel\n");
+
+ /* refill RX path on the current control block */
+refill:
+ err = bcmgenet_rx_refill(priv, cb);
+ if (err)
+ netif_err(priv, rx_err, dev, "Rx refill failed\n");
+ }
+
+ return rxpktprocessed;
+}
+
+/* Assign skb to RX DMA descriptor. */
+static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv)
+{
+ struct enet_cb *cb;
+ int ret = 0;
+ int i;
+
+ netif_dbg(priv, hw, priv->dev, "%s:\n", __func__);
+
+ /* loop here for each buffer needing assign */
+ for (i = 0; i < priv->num_rx_bds; i++) {
+ cb = &priv->rx_cbs[priv->rx_bd_assign_index];
+ if (cb->skb)
+ continue;
+
+ /* set the DMA descriptor length once and for all
+ * it will only change if we support dynamically sizing
+ * priv->rx_buf_len, but we do not
+ */
+ dmadesc_set_length_status(priv, priv->rx_bd_assign_ptr,
+ priv->rx_buf_len << DMA_BUFLENGTH_SHIFT);
+
+ ret = bcmgenet_rx_refill(priv, cb);
+ if (ret)
+ break;
+
+ }
+
+ return ret;
+}
+
+static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
+{
+ struct enet_cb *cb;
+ int i;
+
+ for (i = 0; i < priv->num_rx_bds; i++) {
+ cb = &priv->rx_cbs[i];
+
+ if (dma_unmap_addr(cb, dma_addr)) {
+ dma_unmap_single(&priv->dev->dev,
+ dma_unmap_addr(cb, dma_addr),
+ priv->rx_buf_len, DMA_FROM_DEVICE);
+ dma_unmap_addr_set(cb, dma_addr, 0);
+ }
+
+ if (cb->skb)
+ bcmgenet_free_cb(cb);
+ }
+}
+
+static int reset_umac(struct bcmgenet_priv *priv)
+{
+ struct device *kdev = &priv->pdev->dev;
+ unsigned int timeout = 0;
+ u32 reg;
+
+ /* 7358a0/7552a0: bad default in RBUF_FLUSH_CTRL.umac_sw_rst */
+ bcmgenet_rbuf_ctrl_set(priv, 0);
+ udelay(10);
+
+ /* disable MAC while updating its registers */
+ bcmgenet_umac_writel(priv, 0, UMAC_CMD);
+
+ /* issue soft reset, wait for it to complete */
+ bcmgenet_umac_writel(priv, CMD_SW_RESET, UMAC_CMD);
+ while (timeout++ < 1000) {
+ reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+ if (!(reg & CMD_SW_RESET))
+ return 0;
+
+ udelay(1);
+ }
+
+ if (timeout == 1000) {
+ dev_err(kdev,
+ "timeout waiting for MAC to come out of resetn\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int init_umac(struct bcmgenet_priv *priv)
+{
+ struct device *kdev = &priv->pdev->dev;
+ int ret;
+ u32 reg, cpu_mask_clear;
+
+ dev_dbg(&priv->pdev->dev, "bcmgenet: init_umac\n");
+
+ ret = reset_umac(priv);
+ if (ret)
+ return ret;
+
+ bcmgenet_umac_writel(priv, 0, UMAC_CMD);
+ /* clear tx/rx counter */
+ bcmgenet_umac_writel(priv,
+ MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT, UMAC_MIB_CTRL);
+ bcmgenet_umac_writel(priv, 0, UMAC_MIB_CTRL);
+
+ bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
+
+ /* init rx registers, enable ip header optimization */
+ reg = bcmgenet_rbuf_readl(priv, RBUF_CTRL);
+ reg |= RBUF_ALIGN_2B;
+ bcmgenet_rbuf_writel(priv, reg, RBUF_CTRL);
+
+ if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv))
+ bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL);
+
+ /* Mask all interrupts.*/
+ bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
+ bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
+ bcmgenet_intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
+
+ cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE;
+
+ dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__);
+
+ /* Monitor cable plug/unpluged event for internal PHY */
+ if (phy_is_internal(priv->phydev))
+ cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP);
+ else if (priv->ext_phy)
+ cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP);
+ else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
+ reg = bcmgenet_bp_mc_get(priv);
+ reg |= BIT(priv->hw_params->bp_in_en_shift);
+
+ /* bp_mask: back pressure mask */
+ if (netif_is_multiqueue(priv->dev))
+ reg |= priv->hw_params->bp_in_mask;
+ else
+ reg &= ~priv->hw_params->bp_in_mask;
+ bcmgenet_bp_mc_set(priv, reg);
+ }
+
+ /* Enable MDIO interrupts on GENET v3+ */
+ if (priv->hw_params->flags & GENET_HAS_MDIO_INTR)
+ cpu_mask_clear |= UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR;
+
+ bcmgenet_intrl2_0_writel(priv, cpu_mask_clear,
+ INTRL2_CPU_MASK_CLEAR);
+
+ /* Enable rx/tx engine.*/
+ dev_dbg(kdev, "done init umac\n");
+
+ return 0;
+}
+
+/* Initialize all house-keeping variables for a TX ring, along
+ * with corresponding hardware registers
+ */
+static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
+ unsigned int index, unsigned int size,
+ unsigned int write_ptr, unsigned int end_ptr)
+{
+ struct bcmgenet_tx_ring *ring = &priv->tx_rings[index];
+ u32 words_per_bd = WORDS_PER_BD(priv);
+ u32 flow_period_val = 0;
+ unsigned int first_bd;
+
+ spin_lock_init(&ring->lock);
+ ring->index = index;
+ if (index == DESC_INDEX) {
+ ring->queue = 0;
+ ring->int_enable = bcmgenet_tx_ring16_int_enable;
+ ring->int_disable = bcmgenet_tx_ring16_int_disable;
+ } else {
+ ring->queue = index + 1;
+ ring->int_enable = bcmgenet_tx_ring_int_enable;
+ ring->int_disable = bcmgenet_tx_ring_int_disable;
+ }
+ ring->cbs = priv->tx_cbs + write_ptr;
+ ring->size = size;
+ ring->c_index = 0;
+ ring->free_bds = size;
+ ring->write_ptr = write_ptr;
+ ring->cb_ptr = write_ptr;
+ ring->end_ptr = end_ptr - 1;
+ ring->prod_index = 0;
+
+ /* Set flow period for ring != 16 */
+ if (index != DESC_INDEX)
+ flow_period_val = ENET_MAX_MTU_SIZE << 16;
+
+ bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_PROD_INDEX);
+ bcmgenet_tdma_ring_writel(priv, index, 0, TDMA_CONS_INDEX);
+ bcmgenet_tdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH);
+ /* Disable rate control for now */
+ bcmgenet_tdma_ring_writel(priv, index, flow_period_val,
+ TDMA_FLOW_PERIOD);
+ /* Unclassified traffic goes to ring 16 */
+ bcmgenet_tdma_ring_writel(priv, index,
+ ((size << DMA_RING_SIZE_SHIFT) | RX_BUF_LENGTH),
+ DMA_RING_BUF_SIZE);
+
+ first_bd = write_ptr;
+
+ /* Set start and end address, read and write pointers */
+ bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd,
+ DMA_START_ADDR);
+ bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd,
+ TDMA_READ_PTR);
+ bcmgenet_tdma_ring_writel(priv, index, first_bd,
+ TDMA_WRITE_PTR);
+ bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
+ DMA_END_ADDR);
+}
+
+/* Initialize a RDMA ring */
+static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
+ unsigned int index, unsigned int size)
+{
+ u32 words_per_bd = WORDS_PER_BD(priv);
+ int ret;
+
+ priv->num_rx_bds = TOTAL_DESC;
+ priv->rx_bds = priv->base + priv->hw_params->rdma_offset;
+ priv->rx_bd_assign_ptr = priv->rx_bds;
+ priv->rx_bd_assign_index = 0;
+ priv->rx_c_index = 0;
+ priv->rx_read_ptr = 0;
+ priv->rx_cbs = kzalloc(priv->num_rx_bds * sizeof(struct enet_cb),
+ GFP_KERNEL);
+ if (!priv->rx_cbs)
+ return -ENOMEM;
+
+ ret = bcmgenet_alloc_rx_buffers(priv);
+ if (ret) {
+ kfree(priv->rx_cbs);
+ return ret;
+ }
+
+ bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_WRITE_PTR);
+ bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX);
+ bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX);
+ bcmgenet_rdma_ring_writel(priv, index,
+ ((size << DMA_RING_SIZE_SHIFT) | RX_BUF_LENGTH),
+ DMA_RING_BUF_SIZE);
+ bcmgenet_rdma_ring_writel(priv, index, 0, DMA_START_ADDR);
+ bcmgenet_rdma_ring_writel(priv, index,
+ words_per_bd * size - 1, DMA_END_ADDR);
+ bcmgenet_rdma_ring_writel(priv, index,
+ (DMA_FC_THRESH_LO << DMA_XOFF_THRESHOLD_SHIFT) |
+ DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH);
+ bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_READ_PTR);
+
+ return ret;
+}
+
+/* init multi xmit queues, only available for GENET2+
+ * the queue is partitioned as follows:
+ *
+ * queue 0 - 3 is priority based, each one has 32 descriptors,
+ * with queue 0 being the highest priority queue.
+ *
+ * queue 16 is the default tx queue with GENET_DEFAULT_BD_CNT
+ * descriptors: 256 - (number of tx queues * bds per queues) = 128
+ * descriptors.
+ *
+ * The transmit control block pool is then partitioned as following:
+ * - tx_cbs[0...127] are for queue 16
+ * - tx_ring_cbs[0] points to tx_cbs[128..159]
+ * - tx_ring_cbs[1] points to tx_cbs[160..191]
+ * - tx_ring_cbs[2] points to tx_cbs[192..223]
+ * - tx_ring_cbs[3] points to tx_cbs[224..255]
+ */
+static void bcmgenet_init_multiq(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ unsigned int i, dma_enable;
+ u32 reg, dma_ctrl, ring_cfg = 0, dma_priority = 0;
+
+ if (!netif_is_multiqueue(dev)) {
+ netdev_warn(dev, "called with non multi queue aware HW\n");
+ return;
+ }
+
+ dma_ctrl = bcmgenet_tdma_readl(priv, DMA_CTRL);
+ dma_enable = dma_ctrl & DMA_EN;
+ dma_ctrl &= ~DMA_EN;
+ bcmgenet_tdma_writel(priv, dma_ctrl, DMA_CTRL);
+
+ /* Enable strict priority arbiter mode */
+ bcmgenet_tdma_writel(priv, DMA_ARBITER_SP, DMA_ARB_CTRL);
+
+ for (i = 0; i < priv->hw_params->tx_queues; i++) {
+ /* first 64 tx_cbs are reserved for default tx queue
+ * (ring 16)
+ */
+ bcmgenet_init_tx_ring(priv, i, priv->hw_params->bds_cnt,
+ i * priv->hw_params->bds_cnt,
+ (i + 1) * priv->hw_params->bds_cnt);
+
+ /* Configure ring as decriptor ring and setup priority */
+ ring_cfg |= 1 << i;
+ dma_priority |= ((GENET_Q0_PRIORITY + i) <<
+ (GENET_MAX_MQ_CNT + 1) * i);
+ dma_ctrl |= 1 << (i + DMA_RING_BUF_EN_SHIFT);
+ }
+
+ /* Enable rings */
+ reg = bcmgenet_tdma_readl(priv, DMA_RING_CFG);
+ reg |= ring_cfg;
+ bcmgenet_tdma_writel(priv, reg, DMA_RING_CFG);
+
+ /* Use configured rings priority and set ring #16 priority */
+ reg = bcmgenet_tdma_readl(priv, DMA_RING_PRIORITY);
+ reg |= ((GENET_Q0_PRIORITY + priv->hw_params->tx_queues) << 20);
+ reg |= dma_priority;
+ bcmgenet_tdma_writel(priv, reg, DMA_PRIORITY);
+
+ /* Configure ring as descriptor ring and re-enable DMA if enabled */
+ reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
+ reg |= dma_ctrl;
+ if (dma_enable)
+ reg |= DMA_EN;
+ bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+}
+
+static void bcmgenet_fini_dma(struct bcmgenet_priv *priv)
+{
+ int i;
+
+ /* disable DMA */
+ bcmgenet_rdma_writel(priv, 0, DMA_CTRL);
+ bcmgenet_tdma_writel(priv, 0, DMA_CTRL);
+
+ for (i = 0; i < priv->num_tx_bds; i++) {
+ if (priv->tx_cbs[i].skb != NULL) {
+ dev_kfree_skb(priv->tx_cbs[i].skb);
+ priv->tx_cbs[i].skb = NULL;
+ }
+ }
+
+ bcmgenet_free_rx_buffers(priv);
+ kfree(priv->rx_cbs);
+ kfree(priv->tx_cbs);
+}
+
+/* init_edma: Initialize DMA control register */
+static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
+{
+ int ret;
+
+ netif_dbg(priv, hw, priv->dev, "bcmgenet: init_edma\n");
+
+ /* by default, enable ring 16 (descriptor based) */
+ ret = bcmgenet_init_rx_ring(priv, DESC_INDEX, TOTAL_DESC);
+ if (ret) {
+ netdev_err(priv->dev, "failed to initialize RX ring\n");
+ return ret;
+ }
+
+ /* init rDma */
+ bcmgenet_rdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
+
+ /* Init tDma */
+ bcmgenet_tdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
+
+ /* Initialize commont TX ring structures */
+ priv->tx_bds = priv->base + priv->hw_params->tdma_offset;
+ priv->num_tx_bds = TOTAL_DESC;
+ priv->tx_cbs = kzalloc(priv->num_tx_bds * sizeof(struct enet_cb),
+ GFP_KERNEL);
+ if (!priv->tx_cbs) {
+ bcmgenet_fini_dma(priv);
+ return -ENOMEM;
+ }
+
+ /* initialize multi xmit queue */
+ bcmgenet_init_multiq(priv->dev);
+
+ /* initialize special ring 16 */
+ bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_DEFAULT_BD_CNT,
+ priv->hw_params->tx_queues * priv->hw_params->bds_cnt,
+ TOTAL_DESC);
+
+ return 0;
+}
+
+/* NAPI polling method*/
+static int bcmgenet_poll(struct napi_struct *napi, int budget)
+{
+ struct bcmgenet_priv *priv = container_of(napi,
+ struct bcmgenet_priv, napi);
+ unsigned int work_done;
+
+ /* tx reclaim */
+ bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]);
+
+ work_done = bcmgenet_desc_rx(priv, budget);
+
+ /* Advancing our consumer index*/
+ priv->rx_c_index += work_done;
+ priv->rx_c_index &= DMA_C_INDEX_MASK;
+ bcmgenet_rdma_ring_writel(priv, DESC_INDEX,
+ priv->rx_c_index, RDMA_CONS_INDEX);
+ if (work_done < budget) {
+ napi_complete(napi);
+ bcmgenet_intrl2_0_writel(priv,
+ UMAC_IRQ_RXDMA_BDONE, INTRL2_CPU_MASK_CLEAR);
+ }
+
+ return work_done;
+}
+
+/* Interrupt bottom half */
+static void bcmgenet_irq_task(struct work_struct *work)
+{
+ struct bcmgenet_priv *priv = container_of(
+ work, struct bcmgenet_priv, bcmgenet_irq_work);
+
+ netif_dbg(priv, intr, priv->dev, "%s\n", __func__);
+
+ /* Link UP/DOWN event */
+ if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
+ (priv->irq0_stat & (UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN))) {
+ phy_mac_interrupt(priv->phydev,
+ priv->irq0_stat & UMAC_IRQ_LINK_UP);
+ priv->irq0_stat &= ~(UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN);
+ }
+}
+
+/* bcmgenet_isr1: interrupt handler for ring buffer. */
+static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
+{
+ struct bcmgenet_priv *priv = dev_id;
+ unsigned int index;
+
+ /* Save irq status for bottom-half processing. */
+ priv->irq1_stat =
+ bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
+ ~priv->int1_mask;
+ /* clear inerrupts*/
+ bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
+
+ netif_dbg(priv, intr, priv->dev,
+ "%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
+ /* Check the MBDONE interrupts.
+ * packet is done, reclaim descriptors
+ */
+ if (priv->irq1_stat & 0x0000ffff) {
+ index = 0;
+ for (index = 0; index < 16; index++) {
+ if (priv->irq1_stat & (1 << index))
+ bcmgenet_tx_reclaim(priv->dev,
+ &priv->tx_rings[index]);
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+/* bcmgenet_isr0: Handle various interrupts. */
+static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
+{
+ struct bcmgenet_priv *priv = dev_id;
+
+ /* Save irq status for bottom-half processing. */
+ priv->irq0_stat =
+ bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
+ ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
+ /* clear inerrupts*/
+ bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
+
+ netif_dbg(priv, intr, priv->dev,
+ "IRQ=0x%x\n", priv->irq0_stat);
+
+ if (priv->irq0_stat & (UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_RXDMA_PDONE)) {
+ /* We use NAPI(software interrupt throttling, if
+ * Rx Descriptor throttling is not used.
+ * Disable interrupt, will be enabled in the poll method.
+ */
+ if (likely(napi_schedule_prep(&priv->napi))) {
+ bcmgenet_intrl2_0_writel(priv,
+ UMAC_IRQ_RXDMA_BDONE, INTRL2_CPU_MASK_SET);
+ __napi_schedule(&priv->napi);
+ }
+ }
+ if (priv->irq0_stat &
+ (UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE)) {
+ /* Tx reclaim */
+ bcmgenet_tx_reclaim(priv->dev, &priv->tx_rings[DESC_INDEX]);
+ }
+ if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
+ UMAC_IRQ_PHY_DET_F |
+ UMAC_IRQ_LINK_UP |
+ UMAC_IRQ_LINK_DOWN |
+ UMAC_IRQ_HFB_SM |
+ UMAC_IRQ_HFB_MM |
+ UMAC_IRQ_MPD_R)) {
+ /* all other interested interrupts handled in bottom half */
+ schedule_work(&priv->bcmgenet_irq_work);
+ }
+
+ if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
+ priv->irq0_stat & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
+ priv->irq0_stat &= ~(UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
+ wake_up(&priv->wq);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void bcmgenet_umac_reset(struct bcmgenet_priv *priv)
+{
+ u32 reg;
+
+ reg = bcmgenet_rbuf_ctrl_get(priv);
+ reg |= BIT(1);
+ bcmgenet_rbuf_ctrl_set(priv, reg);
+ udelay(10);
+
+ reg &= ~BIT(1);
+ bcmgenet_rbuf_ctrl_set(priv, reg);
+ udelay(10);
+}
+
+static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv,
+ unsigned char *addr)
+{
+ bcmgenet_umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) |
+ (addr[2] << 8) | addr[3], UMAC_MAC0);
+ bcmgenet_umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1);
+}
+
+static int bcmgenet_wol_resume(struct bcmgenet_priv *priv)
+{
+ int ret;
+
+ /* From WOL-enabled suspend, switch to regular clock */
+ clk_disable(priv->clk_wol);
+ /* init umac registers to synchronize s/w with h/w */
+ ret = init_umac(priv);
+ if (ret)
+ return ret;
+
+ phy_init_hw(priv->phydev);
+ /* Speed settings must be restored */
+ bcmgenet_mii_config(priv->dev);
+
+ return 0;
+}
+
+/* Returns a reusable dma control register value */
+static u32 bcmgenet_dma_disable(struct bcmgenet_priv *priv)
+{
+ u32 reg;
+ u32 dma_ctrl;
+
+ /* disable DMA */
+ dma_ctrl = 1 << (DESC_INDEX + DMA_RING_BUF_EN_SHIFT) | DMA_EN;
+ reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
+ reg &= ~dma_ctrl;
+ bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+
+ reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
+ reg &= ~dma_ctrl;
+ bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
+
+ bcmgenet_umac_writel(priv, 1, UMAC_TX_FLUSH);
+ udelay(10);
+ bcmgenet_umac_writel(priv, 0, UMAC_TX_FLUSH);
+
+ return dma_ctrl;
+}
+
+static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl)
+{
+ u32 reg;
+
+ reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
+ reg |= dma_ctrl;
+ bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
+
+ reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
+ reg |= dma_ctrl;
+ bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+}
+
+static int bcmgenet_open(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ unsigned long dma_ctrl;
+ u32 reg;
+ int ret;
+
+ netif_dbg(priv, ifup, dev, "bcmgenet_open\n");
+
+ /* Turn on the clock */
+ if (!IS_ERR(priv->clk))
+ clk_prepare_enable(priv->clk);
+
+ /* take MAC out of reset */
+ bcmgenet_umac_reset(priv);
+
+ ret = init_umac(priv);
+ if (ret)
+ goto err_clk_disable;
+
+ /* disable ethernet MAC while updating its registers */
+ reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+ reg &= ~(CMD_TX_EN | CMD_RX_EN);
+ bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+
+ bcmgenet_set_hw_addr(priv, dev->dev_addr);
+
+ if (priv->wol_enabled) {
+ ret = bcmgenet_wol_resume(priv);
+ if (ret)
+ return ret;
+ }
+
+ if (phy_is_internal(priv->phydev)) {
+ reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
+ reg |= EXT_ENERGY_DET_MASK;
+ bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
+ }
+
+ /* Disable RX/TX DMA and flush TX queues */
+ dma_ctrl = bcmgenet_dma_disable(priv);
+
+ /* Reinitialize TDMA and RDMA and SW housekeeping */
+ ret = bcmgenet_init_dma(priv);
+ if (ret) {
+ netdev_err(dev, "failed to initialize DMA\n");
+ goto err_fini_dma;
+ }
+
+ /* Always enable ring 16 - descriptor ring */
+ bcmgenet_enable_dma(priv, dma_ctrl);
+
+ ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED,
+ dev->name, priv);
+ if (ret < 0) {
+ netdev_err(dev, "can't request IRQ %d\n", priv->irq0);
+ goto err_fini_dma;
+ }
+
+ ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED,
+ dev->name, priv);
+ if (ret < 0) {
+ netdev_err(dev, "can't request IRQ %d\n", priv->irq1);
+ goto err_irq0;
+ }
+
+ /* Start the network engine */
+ napi_enable(&priv->napi);
+
+ reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+ reg |= (CMD_TX_EN | CMD_RX_EN);
+ bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+
+ /* Make sure we reflect the value of CRC_CMD_FWD */
+ priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
+
+ device_set_wakeup_capable(&dev->dev, 1);
+
+ if (phy_is_internal(priv->phydev))
+ bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
+
+ netif_tx_start_all_queues(dev);
+
+ phy_start(priv->phydev);
+
+ return 0;
+
+err_irq0:
+ free_irq(priv->irq0, dev);
+err_fini_dma:
+ bcmgenet_fini_dma(priv);
+err_clk_disable:
+ if (!IS_ERR(priv->clk))
+ clk_disable_unprepare(priv->clk);
+ return ret;
+}
+
+static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
+{
+ int ret = 0;
+ int timeout = 0;
+ u32 reg;
+
+ /* Disable TDMA to stop add more frames in TX DMA */
+ reg = bcmgenet_tdma_readl(priv, DMA_CTRL);
+ reg &= ~DMA_EN;
+ bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
+
+ /* Check TDMA status register to confirm TDMA is disabled */
+ while (timeout++ < DMA_TIMEOUT_VAL) {
+ reg = bcmgenet_tdma_readl(priv, DMA_STATUS);
+ if (reg & DMA_DISABLED)
+ break;
+
+ udelay(1);
+ }
+
+ if (timeout == DMA_TIMEOUT_VAL) {
+ netdev_warn(priv->dev,
+ "Timed out while disabling TX DMA\n");
+ ret = -ETIMEDOUT;
+ }
+
+ /* Wait 10ms for packet drain in both tx and rx dma */
+ usleep_range(10000, 20000);
+
+ /* Disable RDMA */
+ reg = bcmgenet_rdma_readl(priv, DMA_CTRL);
+ reg &= ~DMA_EN;
+ bcmgenet_rdma_writel(priv, reg, DMA_CTRL);
+
+ timeout = 0;
+ /* Check RDMA status register to confirm RDMA is disabled */
+ while (timeout++ < DMA_TIMEOUT_VAL) {
+ reg = bcmgenet_rdma_readl(priv, DMA_STATUS);
+ if (reg & DMA_DISABLED)
+ break;
+
+ udelay(1);
+ }
+
+ if (timeout == DMA_TIMEOUT_VAL) {
+ netdev_warn(priv->dev,
+ "Timed out while disabling RX DMA\n");
+ ret = -ETIMEDOUT;
+ }
+
+ return ret;
+}
+
+static int bcmgenet_close(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ int ret;
+ u32 reg;
+
+ netif_dbg(priv, ifdown, dev, "bcmgenet_close\n");
+
+ phy_stop(priv->phydev);
+
+ /* Disable MAC receive */
+ reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+ reg &= ~CMD_RX_EN;
+ bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+
+ netif_tx_stop_all_queues(dev);
+
+ ret = bcmgenet_dma_teardown(priv);
+ if (ret)
+ return ret;
+
+ /* Disable MAC transmit. TX DMA disabled have to done before this */
+ reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+ reg &= ~CMD_TX_EN;
+ bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+
+ napi_disable(&priv->napi);
+
+ /* tx reclaim */
+ bcmgenet_tx_reclaim_all(dev);
+ bcmgenet_fini_dma(priv);
+
+ free_irq(priv->irq0, priv);
+ free_irq(priv->irq1, priv);
+
+ /* Wait for pending work items to complete - we are stopping
+ * the clock now. Since interrupts are disabled, no new work
+ * will be scheduled.
+ */
+ cancel_work_sync(&priv->bcmgenet_irq_work);
+
+ if (phy_is_internal(priv->phydev))
+ bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
+
+ if (priv->wol_enabled)
+ clk_enable(priv->clk_wol);
+
+ if (!IS_ERR(priv->clk))
+ clk_disable_unprepare(priv->clk);
+
+ return 0;
+}
+
+static void bcmgenet_timeout(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+
+ netif_dbg(priv, tx_err, dev, "bcmgenet_timeout\n");
+
+ dev->trans_start = jiffies;
+
+ dev->stats.tx_errors++;
+
+ netif_tx_wake_all_queues(dev);
+}
+
+#define MAX_MC_COUNT 16
+
+static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv,
+ unsigned char *addr,
+ int *i,
+ int *mc)
+{
+ u32 reg;
+
+ bcmgenet_umac_writel(priv,
+ addr[0] << 8 | addr[1], UMAC_MDF_ADDR + (*i * 4));
+ bcmgenet_umac_writel(priv,
+ addr[2] << 24 | addr[3] << 16 |
+ addr[4] << 8 | addr[5],
+ UMAC_MDF_ADDR + ((*i + 1) * 4));
+ reg = bcmgenet_umac_readl(priv, UMAC_MDF_CTRL);
+ reg |= (1 << (MAX_MC_COUNT - *mc));
+ bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL);
+ *i += 2;
+ (*mc)++;
+}
+
+static void bcmgenet_set_rx_mode(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct netdev_hw_addr *ha;
+ int i, mc;
+ u32 reg;
+
+ netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags);
+
+ /* Promiscous mode */
+ reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+ if (dev->flags & IFF_PROMISC) {
+ reg |= CMD_PROMISC;
+ bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ bcmgenet_umac_writel(priv, 0, UMAC_MDF_CTRL);
+ return;
+ } else {
+ reg &= ~CMD_PROMISC;
+ bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ }
+
+ /* UniMac doesn't support ALLMULTI */
+ if (dev->flags & IFF_ALLMULTI) {
+ netdev_warn(dev, "ALLMULTI is not supported\n");
+ return;
+ }
+
+ /* update MDF filter */
+ i = 0;
+ mc = 0;
+ /* Broadcast */
+ bcmgenet_set_mdf_addr(priv, dev->broadcast, &i, &mc);
+ /* my own address.*/
+ bcmgenet_set_mdf_addr(priv, dev->dev_addr, &i, &mc);
+ /* Unicast list*/
+ if (netdev_uc_count(dev) > (MAX_MC_COUNT - mc))
+ return;
+
+ if (!netdev_uc_empty(dev))
+ netdev_for_each_uc_addr(ha, dev)
+ bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
+ /* Multicast */
+ if (netdev_mc_empty(dev) || netdev_mc_count(dev) >= (MAX_MC_COUNT - mc))
+ return;
+
+ netdev_for_each_mc_addr(ha, dev)
+ bcmgenet_set_mdf_addr(priv, ha->addr, &i, &mc);
+}
+
+/* Set the hardware MAC address. */
+static int bcmgenet_set_mac_addr(struct net_device *dev, void *p)
+{
+ struct sockaddr *addr = p;
+
+ /* Setting the MAC address at the hardware level is not possible
+ * without disabling the UniMAC RX/TX enable bits.
+ */
+ if (netif_running(dev))
+ return -EBUSY;
+
+ ether_addr_copy(dev->dev_addr, addr->sa_data);
+
+ return 0;
+}
+
+static const struct net_device_ops bcmgenet_netdev_ops = {
+ .ndo_open = bcmgenet_open,
+ .ndo_stop = bcmgenet_close,
+ .ndo_start_xmit = bcmgenet_xmit,
+ .ndo_tx_timeout = bcmgenet_timeout,
+ .ndo_set_rx_mode = bcmgenet_set_rx_mode,
+ .ndo_set_mac_address = bcmgenet_set_mac_addr,
+ .ndo_do_ioctl = bcmgenet_ioctl,
+ .ndo_set_features = bcmgenet_set_features,
+};
+
+/* Array of GENET hardware parameters/characteristics */
+static struct bcmgenet_hw_params bcmgenet_hw_params[] = {
+ [GENET_V1] = {
+ .tx_queues = 0,
+ .rx_queues = 0,
+ .bds_cnt = 0,
+ .bp_in_en_shift = 16,
+ .bp_in_mask = 0xffff,
+ .hfb_filter_cnt = 16,
+ .qtag_mask = 0x1F,
+ .hfb_offset = 0x1000,
+ .rdma_offset = 0x2000,
+ .tdma_offset = 0x3000,
+ .words_per_bd = 2,
+ },
+ [GENET_V2] = {
+ .tx_queues = 4,
+ .rx_queues = 4,
+ .bds_cnt = 32,
+ .bp_in_en_shift = 16,
+ .bp_in_mask = 0xffff,
+ .hfb_filter_cnt = 16,
+ .qtag_mask = 0x1F,
+ .tbuf_offset = 0x0600,
+ .hfb_offset = 0x1000,
+ .hfb_reg_offset = 0x2000,
+ .rdma_offset = 0x3000,
+ .tdma_offset = 0x4000,
+ .words_per_bd = 2,
+ .flags = GENET_HAS_EXT,
+ },
+ [GENET_V3] = {
+ .tx_queues = 4,
+ .rx_queues = 4,
+ .bds_cnt = 32,
+ .bp_in_en_shift = 17,
+ .bp_in_mask = 0x1ffff,
+ .hfb_filter_cnt = 48,
+ .qtag_mask = 0x3F,
+ .tbuf_offset = 0x0600,
+ .hfb_offset = 0x8000,
+ .hfb_reg_offset = 0xfc00,
+ .rdma_offset = 0x10000,
+ .tdma_offset = 0x11000,
+ .words_per_bd = 2,
+ .flags = GENET_HAS_EXT | GENET_HAS_MDIO_INTR,
+ },
+ [GENET_V4] = {
+ .tx_queues = 4,
+ .rx_queues = 4,
+ .bds_cnt = 32,
+ .bp_in_en_shift = 17,
+ .bp_in_mask = 0x1ffff,
+ .hfb_filter_cnt = 48,
+ .qtag_mask = 0x3F,
+ .tbuf_offset = 0x0600,
+ .hfb_offset = 0x8000,
+ .hfb_reg_offset = 0xfc00,
+ .rdma_offset = 0x2000,
+ .tdma_offset = 0x4000,
+ .words_per_bd = 3,
+ .flags = GENET_HAS_40BITS | GENET_HAS_EXT | GENET_HAS_MDIO_INTR,
+ },
+};
+
+/* Infer hardware parameters from the detected GENET version */
+static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
+{
+ struct bcmgenet_hw_params *params;
+ u32 reg;
+ u8 major;
+
+ if (GENET_IS_V4(priv)) {
+ bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
+ genet_dma_ring_regs = genet_dma_ring_regs_v4;
+ priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
+ priv->version = GENET_V4;
+ } else if (GENET_IS_V3(priv)) {
+ bcmgenet_dma_regs = bcmgenet_dma_regs_v3plus;
+ genet_dma_ring_regs = genet_dma_ring_regs_v123;
+ priv->dma_rx_chk_bit = DMA_RX_CHK_V3PLUS;
+ priv->version = GENET_V3;
+ } else if (GENET_IS_V2(priv)) {
+ bcmgenet_dma_regs = bcmgenet_dma_regs_v2;
+ genet_dma_ring_regs = genet_dma_ring_regs_v123;
+ priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
+ priv->version = GENET_V2;
+ } else if (GENET_IS_V1(priv)) {
+ bcmgenet_dma_regs = bcmgenet_dma_regs_v1;
+ genet_dma_ring_regs = genet_dma_ring_regs_v123;
+ priv->dma_rx_chk_bit = DMA_RX_CHK_V12;
+ priv->version = GENET_V1;
+ }
+
+ /* enum genet_version starts at 1 */
+ priv->hw_params = &bcmgenet_hw_params[priv->version];
+ params = priv->hw_params;
+
+ /* Read GENET HW version */
+ reg = bcmgenet_sys_readl(priv, SYS_REV_CTRL);
+ major = (reg >> 24 & 0x0f);
+ if (major == 5)
+ major = 4;
+ else if (major == 0)
+ major = 1;
+ if (major != priv->version) {
+ dev_err(&priv->pdev->dev,
+ "GENET version mismatch, got: %d, configured for: %d\n",
+ major, priv->version);
+ }
+
+ /* Print the GENET core version */
+ dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT,
+ major, (reg >> 16) & 0x0f, reg & 0xffff);
+
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+ if (!(params->flags & GENET_HAS_40BITS))
+ pr_warn("GENET does not support 40-bits PA\n");
+#endif
+
+ pr_debug("Configuration for version: %d\n"
+ "TXq: %1d, RXq: %1d, BDs: %1d\n"
+ "BP << en: %2d, BP msk: 0x%05x\n"
+ "HFB count: %2d, QTAQ msk: 0x%05x\n"
+ "TBUF: 0x%04x, HFB: 0x%04x, HFBreg: 0x%04x\n"
+ "RDMA: 0x%05x, TDMA: 0x%05x\n"
+ "Words/BD: %d\n",
+ priv->version,
+ params->tx_queues, params->rx_queues, params->bds_cnt,
+ params->bp_in_en_shift, params->bp_in_mask,
+ params->hfb_filter_cnt, params->qtag_mask,
+ params->tbuf_offset, params->hfb_offset,
+ params->hfb_reg_offset,
+ params->rdma_offset, params->tdma_offset,
+ params->words_per_bd);
+}
+
+static const struct of_device_id bcmgenet_match[] = {
+ { .compatible = "brcm,genet-v1", .data = (void *)GENET_V1 },
+ { .compatible = "brcm,genet-v2", .data = (void *)GENET_V2 },
+ { .compatible = "brcm,genet-v3", .data = (void *)GENET_V3 },
+ { .compatible = "brcm,genet-v4", .data = (void *)GENET_V4 },
+ { },
+};
+
+static int bcmgenet_probe(struct platform_device *pdev)
+{
+ struct device_node *dn = pdev->dev.of_node;
+ const struct of_device_id *of_id;
+ struct bcmgenet_priv *priv;
+ struct net_device *dev;
+ const void *macaddr;
+ struct resource *r;
+ int err = -EIO;
+
+ /* Up to GENET_MAX_MQ_CNT + 1 TX queues and a single RX queue */
+ dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1, 1);
+ if (!dev) {
+ dev_err(&pdev->dev, "can't allocate net device\n");
+ return -ENOMEM;
+ }
+
+ of_id = of_match_node(bcmgenet_match, dn);
+ if (!of_id)
+ return -EINVAL;
+
+ priv = netdev_priv(dev);
+ priv->irq0 = platform_get_irq(pdev, 0);
+ priv->irq1 = platform_get_irq(pdev, 1);
+ if (!priv->irq0 || !priv->irq1) {
+ dev_err(&pdev->dev, "can't find IRQs\n");
+ err = -EINVAL;
+ goto err;
+ }
+
+ macaddr = of_get_mac_address(dn);
+ if (!macaddr) {
+ dev_err(&pdev->dev, "can't find MAC address\n");
+ err = -EINVAL;
+ goto err;
+ }
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ priv->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(priv->base)) {
+ err = PTR_ERR(priv->base);
+ goto err;
+ }
+
+ SET_NETDEV_DEV(dev, &pdev->dev);
+ dev_set_drvdata(&pdev->dev, dev);
+ ether_addr_copy(dev->dev_addr, macaddr);
+ dev->watchdog_timeo = 2 * HZ;
+ SET_ETHTOOL_OPS(dev, &bcmgenet_ethtool_ops);
+ dev->netdev_ops = &bcmgenet_netdev_ops;
+ netif_napi_add(dev, &priv->napi, bcmgenet_poll, 64);
+
+ priv->msg_enable = netif_msg_init(-1, GENET_MSG_DEFAULT);
+
+ /* Set hardware features */
+ dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
+ NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
+
+ /* Set the needed headroom to account for any possible
+ * features enabling/disabling at runtime
+ */
+ dev->needed_headroom += 64;
+
+ netdev_boot_setup_check(dev);
+
+ priv->dev = dev;
+ priv->pdev = pdev;
+ priv->version = (enum bcmgenet_version)of_id->data;
+
+ bcmgenet_set_hw_params(priv);
+
+ spin_lock_init(&priv->lock);
+ /* Mii wait queue */
+ init_waitqueue_head(&priv->wq);
+ /* Always use RX_BUF_LENGTH (2KB) buffer for all chips */
+ priv->rx_buf_len = RX_BUF_LENGTH;
+ INIT_WORK(&priv->bcmgenet_irq_work, bcmgenet_irq_task);
+
+ priv->clk = devm_clk_get(&priv->pdev->dev, "enet");
+ if (IS_ERR(priv->clk))
+ dev_warn(&priv->pdev->dev, "failed to get enet clock\n");
+
+ priv->clk_wol = devm_clk_get(&priv->pdev->dev, "enet-wol");
+ if (IS_ERR(priv->clk_wol))
+ dev_warn(&priv->pdev->dev, "failed to get enet-wol clock\n");
+
+ if (!IS_ERR(priv->clk))
+ clk_prepare_enable(priv->clk);
+
+ err = reset_umac(priv);
+ if (err)
+ goto err_clk_disable;
+
+ err = bcmgenet_mii_init(dev);
+ if (err)
+ goto err_clk_disable;
+
+ /* setup number of real queues + 1 (GENET_V1 has 0 hardware queues
+ * just the ring 16 descriptor based TX
+ */
+ netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1);
+ netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1);
+
+ err = register_netdev(dev);
+ if (err)
+ goto err_clk_disable;
+
+ /* Turn off the main clock, WOL clock is handled separately */
+ if (!IS_ERR(priv->clk))
+ clk_disable_unprepare(priv->clk);
+
+ return err;
+
+err_clk_disable:
+ if (!IS_ERR(priv->clk))
+ clk_disable_unprepare(priv->clk);
+err:
+ free_netdev(dev);
+ return err;
+}
+
+static int bcmgenet_remove(struct platform_device *pdev)
+{
+ struct bcmgenet_priv *priv = dev_to_priv(&pdev->dev);
+
+ dev_set_drvdata(&pdev->dev, NULL);
+ unregister_netdev(priv->dev);
+ bcmgenet_mii_exit(priv->dev);
+ free_netdev(priv->dev);
+
+ return 0;
+}
+
+
+static struct platform_driver bcmgenet_driver = {
+ .probe = bcmgenet_probe,
+ .remove = bcmgenet_remove,
+ .driver = {
+ .name = "bcmgenet",
+ .owner = THIS_MODULE,
+ .of_match_table = bcmgenet_match,
+ },
+};
+module_platform_driver(bcmgenet_driver);
+
+MODULE_AUTHOR("Broadcom Corporation");
+MODULE_DESCRIPTION("Broadcom GENET Ethernet controller driver");
+MODULE_ALIAS("platform:bcmgenet");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
new file mode 100644
index 0000000..a6758ad
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -0,0 +1,629 @@
+/*
+ * Copyright (c) 2014 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ *
+*/
+#ifndef __BCMGENET_H__
+#define __BCMGENET_H__
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+#include <linux/clk.h>
+#include <linux/mii.h>
+#include <linux/if_vlan.h>
+#include <linux/phy.h>
+
+/* total number of Buffer Descriptors, same for Rx/Tx */
+#define TOTAL_DESC 256
+
+/* which ring is descriptor based */
+#define DESC_INDEX 16
+
+/* Body(1500) + EH_SIZE(14) + VLANTAG(4) + BRCMTAG(6) + FCS(4) = 1528.
+ * 1536 is multiple of 256 bytes
+ */
+#define ENET_BRCM_TAG_LEN 6
+#define ENET_PAD 8
+#define ENET_MAX_MTU_SIZE (ETH_DATA_LEN + ETH_HLEN + VLAN_HLEN + \
+ ENET_BRCM_TAG_LEN + ETH_FCS_LEN + ENET_PAD)
+#define DMA_MAX_BURST_LENGTH 0x10
+
+/* misc. configuration */
+#define CLEAR_ALL_HFB 0xFF
+#define DMA_FC_THRESH_HI (TOTAL_DESC >> 4)
+#define DMA_FC_THRESH_LO 5
+
+/* 64B receive/transmit status block */
+struct status_64 {
+ u32 length_status; /* length and peripheral status */
+ u32 ext_status; /* Extended status*/
+ u32 rx_csum; /* partial rx checksum */
+ u32 unused1[9]; /* unused */
+ u32 tx_csum_info; /* Tx checksum info. */
+ u32 unused2[3]; /* unused */
+};
+
+/* Rx status bits */
+#define STATUS_RX_EXT_MASK 0x1FFFFF
+#define STATUS_RX_CSUM_MASK 0xFFFF
+#define STATUS_RX_CSUM_OK 0x10000
+#define STATUS_RX_CSUM_FR 0x20000
+#define STATUS_RX_PROTO_TCP 0
+#define STATUS_RX_PROTO_UDP 1
+#define STATUS_RX_PROTO_ICMP 2
+#define STATUS_RX_PROTO_OTHER 3
+#define STATUS_RX_PROTO_MASK 3
+#define STATUS_RX_PROTO_SHIFT 18
+#define STATUS_FILTER_INDEX_MASK 0xFFFF
+/* Tx status bits */
+#define STATUS_TX_CSUM_START_MASK 0X7FFF
+#define STATUS_TX_CSUM_START_SHIFT 16
+#define STATUS_TX_CSUM_PROTO_UDP 0x8000
+#define STATUS_TX_CSUM_OFFSET_MASK 0x7FFF
+#define STATUS_TX_CSUM_LV 0x80000000
+
+/* DMA Descriptor */
+#define DMA_DESC_LENGTH_STATUS 0x00 /* in bytes of data in buffer */
+#define DMA_DESC_ADDRESS_LO 0x04 /* lower bits of PA */
+#define DMA_DESC_ADDRESS_HI 0x08 /* upper 32 bits of PA, GENETv4+ */
+
+/* Rx/Tx common counter group */
+struct bcmgenet_pkt_counters {
+ u32 cnt_64; /* RO Received/Transmited 64 bytes packet */
+ u32 cnt_127; /* RO Rx/Tx 127 bytes packet */
+ u32 cnt_255; /* RO Rx/Tx 65-255 bytes packet */
+ u32 cnt_511; /* RO Rx/Tx 256-511 bytes packet */
+ u32 cnt_1023; /* RO Rx/Tx 512-1023 bytes packet */
+ u32 cnt_1518; /* RO Rx/Tx 1024-1518 bytes packet */
+ u32 cnt_mgv; /* RO Rx/Tx 1519-1522 good VLAN packet */
+ u32 cnt_2047; /* RO Rx/Tx 1522-2047 bytes packet*/
+ u32 cnt_4095; /* RO Rx/Tx 2048-4095 bytes packet*/
+ u32 cnt_9216; /* RO Rx/Tx 4096-9216 bytes packet*/
+};
+
+/* RSV, Receive Status Vector */
+struct bcmgenet_rx_counters {
+ struct bcmgenet_pkt_counters pkt_cnt;
+ u32 pkt; /* RO (0x428) Received pkt count*/
+ u32 bytes; /* RO Received byte count */
+ u32 mca; /* RO # of Received multicast pkt */
+ u32 bca; /* RO # of Receive broadcast pkt */
+ u32 fcs; /* RO # of Received FCS error */
+ u32 cf; /* RO # of Received control frame pkt*/
+ u32 pf; /* RO # of Received pause frame pkt */
+ u32 uo; /* RO # of unknown op code pkt */
+ u32 aln; /* RO # of alignment error count */
+ u32 flr; /* RO # of frame length out of range count */
+ u32 cde; /* RO # of code error pkt */
+ u32 fcr; /* RO # of carrier sense error pkt */
+ u32 ovr; /* RO # of oversize pkt*/
+ u32 jbr; /* RO # of jabber count */
+ u32 mtue; /* RO # of MTU error pkt*/
+ u32 pok; /* RO # of Received good pkt */
+ u32 uc; /* RO # of unicast pkt */
+ u32 ppp; /* RO # of PPP pkt */
+ u32 rcrc; /* RO (0x470),# of CRC match pkt */
+};
+
+/* TSV, Transmit Status Vector */
+struct bcmgenet_tx_counters {
+ struct bcmgenet_pkt_counters pkt_cnt;
+ u32 pkts; /* RO (0x4a8) Transmited pkt */
+ u32 mca; /* RO # of xmited multicast pkt */
+ u32 bca; /* RO # of xmited broadcast pkt */
+ u32 pf; /* RO # of xmited pause frame count */
+ u32 cf; /* RO # of xmited control frame count */
+ u32 fcs; /* RO # of xmited FCS error count */
+ u32 ovr; /* RO # of xmited oversize pkt */
+ u32 drf; /* RO # of xmited deferral pkt */
+ u32 edf; /* RO # of xmited Excessive deferral pkt*/
+ u32 scl; /* RO # of xmited single collision pkt */
+ u32 mcl; /* RO # of xmited multiple collision pkt*/
+ u32 lcl; /* RO # of xmited late collision pkt */
+ u32 ecl; /* RO # of xmited excessive collision pkt*/
+ u32 frg; /* RO # of xmited fragments pkt*/
+ u32 ncl; /* RO # of xmited total collision count */
+ u32 jbr; /* RO # of xmited jabber count*/
+ u32 bytes; /* RO # of xmited byte count */
+ u32 pok; /* RO # of xmited good pkt */
+ u32 uc; /* RO (0x0x4f0)# of xmited unitcast pkt */
+};
+
+struct bcmgenet_mib_counters {
+ struct bcmgenet_rx_counters rx;
+ struct bcmgenet_tx_counters tx;
+ u32 rx_runt_cnt;
+ u32 rx_runt_fcs;
+ u32 rx_runt_fcs_align;
+ u32 rx_runt_bytes;
+ u32 rbuf_ovflow_cnt;
+ u32 rbuf_err_cnt;
+ u32 mdf_err_cnt;
+};
+
+#define UMAC_HD_BKP_CTRL 0x004
+#define HD_FC_EN (1 << 0)
+#define HD_FC_BKOFF_OK (1 << 1)
+#define IPG_CONFIG_RX_SHIFT 2
+#define IPG_CONFIG_RX_MASK 0x1F
+
+#define UMAC_CMD 0x008
+#define CMD_TX_EN (1 << 0)
+#define CMD_RX_EN (1 << 1)
+#define UMAC_SPEED_10 0
+#define UMAC_SPEED_100 1
+#define UMAC_SPEED_1000 2
+#define UMAC_SPEED_2500 3
+#define CMD_SPEED_SHIFT 2
+#define CMD_SPEED_MASK 3
+#define CMD_PROMISC (1 << 4)
+#define CMD_PAD_EN (1 << 5)
+#define CMD_CRC_FWD (1 << 6)
+#define CMD_PAUSE_FWD (1 << 7)
+#define CMD_RX_PAUSE_IGNORE (1 << 8)
+#define CMD_TX_ADDR_INS (1 << 9)
+#define CMD_HD_EN (1 << 10)
+#define CMD_SW_RESET (1 << 13)
+#define CMD_LCL_LOOP_EN (1 << 15)
+#define CMD_AUTO_CONFIG (1 << 22)
+#define CMD_CNTL_FRM_EN (1 << 23)
+#define CMD_NO_LEN_CHK (1 << 24)
+#define CMD_RMT_LOOP_EN (1 << 25)
+#define CMD_PRBL_EN (1 << 27)
+#define CMD_TX_PAUSE_IGNORE (1 << 28)
+#define CMD_TX_RX_EN (1 << 29)
+#define CMD_RUNT_FILTER_DIS (1 << 30)
+
+#define UMAC_MAC0 0x00C
+#define UMAC_MAC1 0x010
+#define UMAC_MAX_FRAME_LEN 0x014
+
+#define UMAC_TX_FLUSH 0x334
+
+#define UMAC_MIB_START 0x400
+
+#define UMAC_MDIO_CMD 0x614
+#define MDIO_START_BUSY (1 << 29)
+#define MDIO_READ_FAIL (1 << 28)
+#define MDIO_RD (2 << 26)
+#define MDIO_WR (1 << 26)
+#define MDIO_PMD_SHIFT 21
+#define MDIO_PMD_MASK 0x1F
+#define MDIO_REG_SHIFT 16
+#define MDIO_REG_MASK 0x1F
+
+#define UMAC_RBUF_OVFL_CNT 0x61C
+
+#define UMAC_MPD_CTRL 0x620
+#define MPD_EN (1 << 0)
+#define MPD_PW_EN (1 << 27)
+#define MPD_MSEQ_LEN_SHIFT 16
+#define MPD_MSEQ_LEN_MASK 0xFF
+
+#define UMAC_MPD_PW_MS 0x624
+#define UMAC_MPD_PW_LS 0x628
+#define UMAC_RBUF_ERR_CNT 0x634
+#define UMAC_MDF_ERR_CNT 0x638
+#define UMAC_MDF_CTRL 0x650
+#define UMAC_MDF_ADDR 0x654
+#define UMAC_MIB_CTRL 0x580
+#define MIB_RESET_RX (1 << 0)
+#define MIB_RESET_RUNT (1 << 1)
+#define MIB_RESET_TX (1 << 2)
+
+#define RBUF_CTRL 0x00
+#define RBUF_64B_EN (1 << 0)
+#define RBUF_ALIGN_2B (1 << 1)
+#define RBUF_BAD_DIS (1 << 2)
+
+#define RBUF_STATUS 0x0C
+#define RBUF_STATUS_WOL (1 << 0)
+#define RBUF_STATUS_MPD_INTR_ACTIVE (1 << 1)
+#define RBUF_STATUS_ACPI_INTR_ACTIVE (1 << 2)
+
+#define RBUF_CHK_CTRL 0x14
+#define RBUF_RXCHK_EN (1 << 0)
+#define RBUF_SKIP_FCS (1 << 4)
+
+#define RBUF_TBUF_SIZE_CTRL 0xb4
+
+#define RBUF_HFB_CTRL_V1 0x38
+#define RBUF_HFB_FILTER_EN_SHIFT 16
+#define RBUF_HFB_FILTER_EN_MASK 0xffff0000
+#define RBUF_HFB_EN (1 << 0)
+#define RBUF_HFB_256B (1 << 1)
+#define RBUF_ACPI_EN (1 << 2)
+
+#define RBUF_HFB_LEN_V1 0x3C
+#define RBUF_FLTR_LEN_MASK 0xFF
+#define RBUF_FLTR_LEN_SHIFT 8
+
+#define TBUF_CTRL 0x00
+#define TBUF_BP_MC 0x0C
+
+#define TBUF_CTRL_V1 0x80
+#define TBUF_BP_MC_V1 0xA0
+
+#define HFB_CTRL 0x00
+#define HFB_FLT_ENABLE_V3PLUS 0x04
+#define HFB_FLT_LEN_V2 0x04
+#define HFB_FLT_LEN_V3PLUS 0x1C
+
+/* uniMac intrl2 registers */
+#define INTRL2_CPU_STAT 0x00
+#define INTRL2_CPU_SET 0x04
+#define INTRL2_CPU_CLEAR 0x08
+#define INTRL2_CPU_MASK_STATUS 0x0C
+#define INTRL2_CPU_MASK_SET 0x10
+#define INTRL2_CPU_MASK_CLEAR 0x14
+
+/* INTRL2 instance 0 definitions */
+#define UMAC_IRQ_SCB (1 << 0)
+#define UMAC_IRQ_EPHY (1 << 1)
+#define UMAC_IRQ_PHY_DET_R (1 << 2)
+#define UMAC_IRQ_PHY_DET_F (1 << 3)
+#define UMAC_IRQ_LINK_UP (1 << 4)
+#define UMAC_IRQ_LINK_DOWN (1 << 5)
+#define UMAC_IRQ_UMAC (1 << 6)
+#define UMAC_IRQ_UMAC_TSV (1 << 7)
+#define UMAC_IRQ_TBUF_UNDERRUN (1 << 8)
+#define UMAC_IRQ_RBUF_OVERFLOW (1 << 9)
+#define UMAC_IRQ_HFB_SM (1 << 10)
+#define UMAC_IRQ_HFB_MM (1 << 11)
+#define UMAC_IRQ_MPD_R (1 << 12)
+#define UMAC_IRQ_RXDMA_MBDONE (1 << 13)
+#define UMAC_IRQ_RXDMA_PDONE (1 << 14)
+#define UMAC_IRQ_RXDMA_BDONE (1 << 15)
+#define UMAC_IRQ_TXDMA_MBDONE (1 << 16)
+#define UMAC_IRQ_TXDMA_PDONE (1 << 17)
+#define UMAC_IRQ_TXDMA_BDONE (1 << 18)
+/* Only valid for GENETv3+ */
+#define UMAC_IRQ_MDIO_DONE (1 << 23)
+#define UMAC_IRQ_MDIO_ERROR (1 << 24)
+
+/* Register block offsets */
+#define GENET_SYS_OFF 0x0000
+#define GENET_GR_BRIDGE_OFF 0x0040
+#define GENET_EXT_OFF 0x0080
+#define GENET_INTRL2_0_OFF 0x0200
+#define GENET_INTRL2_1_OFF 0x0240
+#define GENET_RBUF_OFF 0x0300
+#define GENET_UMAC_OFF 0x0800
+
+/* SYS block offsets and register definitions */
+#define SYS_REV_CTRL 0x00
+#define SYS_PORT_CTRL 0x04
+#define PORT_MODE_INT_EPHY 0
+#define PORT_MODE_INT_GPHY 1
+#define PORT_MODE_EXT_EPHY 2
+#define PORT_MODE_EXT_GPHY 3
+#define PORT_MODE_EXT_RVMII_25 (4 | BIT(4))
+#define PORT_MODE_EXT_RVMII_50 4
+#define LED_ACT_SOURCE_MAC (1 << 9)
+
+#define SYS_RBUF_FLUSH_CTRL 0x08
+#define SYS_TBUF_FLUSH_CTRL 0x0C
+#define RBUF_FLUSH_CTRL_V1 0x04
+
+/* Ext block register offsets and definitions */
+#define EXT_EXT_PWR_MGMT 0x00
+#define EXT_PWR_DOWN_BIAS (1 << 0)
+#define EXT_PWR_DOWN_DLL (1 << 1)
+#define EXT_PWR_DOWN_PHY (1 << 2)
+#define EXT_PWR_DN_EN_LD (1 << 3)
+#define EXT_ENERGY_DET (1 << 4)
+#define EXT_IDDQ_FROM_PHY (1 << 5)
+#define EXT_PHY_RESET (1 << 8)
+#define EXT_ENERGY_DET_MASK (1 << 12)
+
+#define EXT_RGMII_OOB_CTRL 0x0C
+#define RGMII_MODE_EN (1 << 0)
+#define RGMII_LINK (1 << 4)
+#define OOB_DISABLE (1 << 5)
+#define ID_MODE_DIS (1 << 16)
+
+#define EXT_GPHY_CTRL 0x1C
+#define EXT_CFG_IDDQ_BIAS (1 << 0)
+#define EXT_CFG_PWR_DOWN (1 << 1)
+#define EXT_GPHY_RESET (1 << 5)
+
+/* DMA rings size */
+#define DMA_RING_SIZE (0x40)
+#define DMA_RINGS_SIZE (DMA_RING_SIZE * (DESC_INDEX + 1))
+
+/* DMA registers common definitions */
+#define DMA_RW_POINTER_MASK 0x1FF
+#define DMA_P_INDEX_DISCARD_CNT_MASK 0xFFFF
+#define DMA_P_INDEX_DISCARD_CNT_SHIFT 16
+#define DMA_BUFFER_DONE_CNT_MASK 0xFFFF
+#define DMA_BUFFER_DONE_CNT_SHIFT 16
+#define DMA_P_INDEX_MASK 0xFFFF
+#define DMA_C_INDEX_MASK 0xFFFF
+
+/* DMA ring size register */
+#define DMA_RING_SIZE_MASK 0xFFFF
+#define DMA_RING_SIZE_SHIFT 16
+#define DMA_RING_BUFFER_SIZE_MASK 0xFFFF
+
+/* DMA interrupt threshold register */
+#define DMA_INTR_THRESHOLD_MASK 0x00FF
+
+/* DMA XON/XOFF register */
+#define DMA_XON_THREHOLD_MASK 0xFFFF
+#define DMA_XOFF_THRESHOLD_MASK 0xFFFF
+#define DMA_XOFF_THRESHOLD_SHIFT 16
+
+/* DMA flow period register */
+#define DMA_FLOW_PERIOD_MASK 0xFFFF
+#define DMA_MAX_PKT_SIZE_MASK 0xFFFF
+#define DMA_MAX_PKT_SIZE_SHIFT 16
+
+
+/* DMA control register */
+#define DMA_EN (1 << 0)
+#define DMA_RING_BUF_EN_SHIFT 0x01
+#define DMA_RING_BUF_EN_MASK 0xFFFF
+#define DMA_TSB_SWAP_EN (1 << 20)
+
+/* DMA status register */
+#define DMA_DISABLED (1 << 0)
+#define DMA_DESC_RAM_INIT_BUSY (1 << 1)
+
+/* DMA SCB burst size register */
+#define DMA_SCB_BURST_SIZE_MASK 0x1F
+
+/* DMA activity vector register */
+#define DMA_ACTIVITY_VECTOR_MASK 0x1FFFF
+
+/* DMA backpressure mask register */
+#define DMA_BACKPRESSURE_MASK 0x1FFFF
+#define DMA_PFC_ENABLE (1 << 31)
+
+/* DMA backpressure status register */
+#define DMA_BACKPRESSURE_STATUS_MASK 0x1FFFF
+
+/* DMA override register */
+#define DMA_LITTLE_ENDIAN_MODE (1 << 0)
+#define DMA_REGISTER_MODE (1 << 1)
+
+/* DMA timeout register */
+#define DMA_TIMEOUT_MASK 0xFFFF
+#define DMA_TIMEOUT_VAL 5000 /* micro seconds */
+
+/* TDMA rate limiting control register */
+#define DMA_RATE_LIMIT_EN_MASK 0xFFFF
+
+/* TDMA arbitration control register */
+#define DMA_ARBITER_MODE_MASK 0x03
+#define DMA_RING_BUF_PRIORITY_MASK 0x1F
+#define DMA_RING_BUF_PRIORITY_SHIFT 5
+#define DMA_RATE_ADJ_MASK 0xFF
+
+/* Tx/Rx Dma Descriptor common bits*/
+#define DMA_BUFLENGTH_MASK 0x0fff
+#define DMA_BUFLENGTH_SHIFT 16
+#define DMA_OWN 0x8000
+#define DMA_EOP 0x4000
+#define DMA_SOP 0x2000
+#define DMA_WRAP 0x1000
+/* Tx specific Dma descriptor bits */
+#define DMA_TX_UNDERRUN 0x0200
+#define DMA_TX_APPEND_CRC 0x0040
+#define DMA_TX_OW_CRC 0x0020
+#define DMA_TX_DO_CSUM 0x0010
+#define DMA_TX_QTAG_SHIFT 7
+
+/* Rx Specific Dma descriptor bits */
+#define DMA_RX_CHK_V3PLUS 0x8000
+#define DMA_RX_CHK_V12 0x1000
+#define DMA_RX_BRDCAST 0x0040
+#define DMA_RX_MULT 0x0020
+#define DMA_RX_LG 0x0010
+#define DMA_RX_NO 0x0008
+#define DMA_RX_RXER 0x0004
+#define DMA_RX_CRC_ERROR 0x0002
+#define DMA_RX_OV 0x0001
+#define DMA_RX_FI_MASK 0x001F
+#define DMA_RX_FI_SHIFT 0x0007
+#define DMA_DESC_ALLOC_MASK 0x00FF
+
+#define DMA_ARBITER_RR 0x00
+#define DMA_ARBITER_WRR 0x01
+#define DMA_ARBITER_SP 0x02
+
+struct enet_cb {
+ struct sk_buff *skb;
+ void __iomem *bd_addr;
+ DEFINE_DMA_UNMAP_ADDR(dma_addr);
+ DEFINE_DMA_UNMAP_LEN(dma_len);
+};
+
+/* power management mode */
+enum bcmgenet_power_mode {
+ GENET_POWER_CABLE_SENSE = 0,
+ GENET_POWER_PASSIVE,
+};
+
+struct bcmgenet_priv;
+
+/* We support both runtime GENET detection and compile-time
+ * to optimize code-paths for a given hardware
+ */
+enum bcmgenet_version {
+ GENET_V1 = 1,
+ GENET_V2,
+ GENET_V3,
+ GENET_V4
+};
+
+#define GENET_IS_V1(p) ((p)->version == GENET_V1)
+#define GENET_IS_V2(p) ((p)->version == GENET_V2)
+#define GENET_IS_V3(p) ((p)->version == GENET_V3)
+#define GENET_IS_V4(p) ((p)->version == GENET_V4)
+
+/* Hardware flags */
+#define GENET_HAS_40BITS (1 << 0)
+#define GENET_HAS_EXT (1 << 1)
+#define GENET_HAS_MDIO_INTR (1 << 2)
+
+/* BCMGENET hardware parameters, keep this structure nicely aligned
+ * since it is going to be used in hot paths
+ */
+struct bcmgenet_hw_params {
+ u8 tx_queues;
+ u8 rx_queues;
+ u8 bds_cnt;
+ u8 bp_in_en_shift;
+ u32 bp_in_mask;
+ u8 hfb_filter_cnt;
+ u8 qtag_mask;
+ u16 tbuf_offset;
+ u32 hfb_offset;
+ u32 hfb_reg_offset;
+ u32 rdma_offset;
+ u32 tdma_offset;
+ u32 words_per_bd;
+ u32 flags;
+};
+
+struct bcmgenet_tx_ring {
+ spinlock_t lock; /* ring lock */
+ unsigned int index; /* ring index */
+ unsigned int queue; /* queue index */
+ struct enet_cb *cbs; /* tx ring buffer control block*/
+ unsigned int size; /* size of each tx ring */
+ unsigned int c_index; /* last consumer index of each ring*/
+ unsigned int free_bds; /* # of free bds for each ring */
+ unsigned int write_ptr; /* Tx ring write pointer SW copy */
+ unsigned int prod_index; /* Tx ring producer index SW copy */
+ unsigned int cb_ptr; /* Tx ring initial CB ptr */
+ unsigned int end_ptr; /* Tx ring end CB ptr */
+ void (*int_enable)(struct bcmgenet_priv *priv,
+ struct bcmgenet_tx_ring *);
+ void (*int_disable)(struct bcmgenet_priv *priv,
+ struct bcmgenet_tx_ring *);
+};
+
+/* device context */
+struct bcmgenet_priv {
+ void __iomem *base;
+ enum bcmgenet_version version;
+ struct net_device *dev;
+ spinlock_t lock;
+ u32 int0_mask;
+ u32 int1_mask;
+
+ /* NAPI for descriptor based rx */
+ struct napi_struct napi ____cacheline_aligned;
+
+ /* transmit variables */
+ void __iomem *tx_bds;
+ struct enet_cb *tx_cbs;
+ unsigned int num_tx_bds;
+
+ struct bcmgenet_tx_ring tx_rings[DESC_INDEX + 1];
+
+ /* receive variables */
+ void __iomem *rx_bds;
+ void __iomem *rx_bd_assign_ptr;
+ int rx_bd_assign_index;
+ struct enet_cb *rx_cbs;
+ unsigned int num_rx_bds;
+ unsigned int rx_buf_len;
+ unsigned int rx_read_ptr;
+ unsigned int rx_c_index;
+
+ /* other misc variables */
+ struct bcmgenet_hw_params *hw_params;
+
+ /* MDIO bus variables */
+ wait_queue_head_t wq;
+ struct phy_device *phydev;
+ struct device_node *phy_dn;
+ struct mii_bus *mii_bus;
+
+ /* PHY device variables */
+ int old_duplex;
+ int old_link;
+ int old_pause;
+ phy_interface_t phy_interface;
+ int phy_addr;
+ int ext_phy;
+
+ /* Interrupt variables */
+ struct work_struct bcmgenet_irq_work;
+ int irq0;
+ int irq1;
+ unsigned int irq0_stat;
+ unsigned int irq1_stat;
+
+ /* HW descriptors/checksum variables */
+ bool desc_64b_en;
+ bool desc_rxchk_en;
+ bool crc_fwd_en;
+
+ unsigned int dma_rx_chk_bit;
+
+ u32 msg_enable;
+
+ struct clk *clk;
+ struct platform_device *pdev;
+
+ /* WOL */
+ unsigned long wol_enabled;
+ struct clk *clk_wol;
+ u32 wolopts;
+
+ struct bcmgenet_mib_counters mib;
+};
+
+#define GENET_IO_MACRO(name, offset) \
+static inline u32 bcmgenet_##name##_readl(struct bcmgenet_priv *priv, \
+ u32 off) \
+{ \
+ return __raw_readl(priv->base + offset + off); \
+} \
+static inline void bcmgenet_##name##_writel(struct bcmgenet_priv *priv, \
+ u32 val, u32 off) \
+{ \
+ __raw_writel(val, priv->base + offset + off); \
+}
+
+GENET_IO_MACRO(ext, GENET_EXT_OFF);
+GENET_IO_MACRO(umac, GENET_UMAC_OFF);
+GENET_IO_MACRO(sys, GENET_SYS_OFF);
+
+/* interrupt l2 registers accessors */
+GENET_IO_MACRO(intrl2_0, GENET_INTRL2_0_OFF);
+GENET_IO_MACRO(intrl2_1, GENET_INTRL2_1_OFF);
+
+/* HFB register accessors */
+GENET_IO_MACRO(hfb, priv->hw_params->hfb_offset);
+
+/* GENET v2+ HFB control and filter len helpers */
+GENET_IO_MACRO(hfb_reg, priv->hw_params->hfb_reg_offset);
+
+/* RBUF register accessors */
+GENET_IO_MACRO(rbuf, GENET_RBUF_OFF);
+
+/* MDIO routines */
+int bcmgenet_mii_init(struct net_device *dev);
+int bcmgenet_mii_config(struct net_device *dev);
+void bcmgenet_mii_exit(struct net_device *dev);
+void bcmgenet_mii_reset(struct net_device *dev);
+
+#endif /* __BCMGENET_H__ */
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
new file mode 100644
index 0000000..4608673
--- /dev/null
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -0,0 +1,464 @@
+/*
+ * Broadcom GENET MDIO routines
+ *
+ * Copyright (c) 2014 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/wait.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/bitops.h>
+#include <linux/netdevice.h>
+#include <linux/platform_device.h>
+#include <linux/phy.h>
+#include <linux/phy_fixed.h>
+#include <linux/brcmphy.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+#include <linux/of_mdio.h>
+
+#include "bcmgenet.h"
+
+/* read a value from the MII */
+static int bcmgenet_mii_read(struct mii_bus *bus, int phy_id, int location)
+{
+ int ret;
+ struct net_device *dev = bus->priv;
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ u32 reg;
+
+ bcmgenet_umac_writel(priv, (MDIO_RD | (phy_id << MDIO_PMD_SHIFT) |
+ (location << MDIO_REG_SHIFT)), UMAC_MDIO_CMD);
+ /* Start MDIO transaction*/
+ reg = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD);
+ reg |= MDIO_START_BUSY;
+ bcmgenet_umac_writel(priv, reg, UMAC_MDIO_CMD);
+ wait_event_timeout(priv->wq,
+ !(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD)
+ & MDIO_START_BUSY),
+ HZ / 100);
+ ret = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD);
+
+ if (ret & MDIO_READ_FAIL)
+ return -EIO;
+
+ return ret & 0xffff;
+}
+
+/* write a value to the MII */
+static int bcmgenet_mii_write(struct mii_bus *bus, int phy_id,
+ int location, u16 val)
+{
+ struct net_device *dev = bus->priv;
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ u32 reg;
+
+ bcmgenet_umac_writel(priv, (MDIO_WR | (phy_id << MDIO_PMD_SHIFT) |
+ (location << MDIO_REG_SHIFT) | (0xffff & val)),
+ UMAC_MDIO_CMD);
+ reg = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD);
+ reg |= MDIO_START_BUSY;
+ bcmgenet_umac_writel(priv, reg, UMAC_MDIO_CMD);
+ wait_event_timeout(priv->wq,
+ !(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD) &
+ MDIO_START_BUSY),
+ HZ / 100);
+
+ return 0;
+}
+
+/* setup netdev link state when PHY link status change and
+ * update UMAC and RGMII block when link up
+ */
+static void bcmgenet_mii_setup(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev = priv->phydev;
+ u32 reg, cmd_bits = 0;
+ unsigned int status_changed = 0;
+
+ if (priv->old_link != phydev->link) {
+ status_changed = 1;
+ priv->old_link = phydev->link;
+ }
+
+ if (phydev->link) {
+ /* program UMAC and RGMII block based on established link
+ * speed, pause, and duplex.
+ * the speed set in umac->cmd tell RGMII block which clock
+ * 25MHz(100Mbps)/125MHz(1Gbps) to use for transmit.
+ * receive clock is provided by PHY.
+ */
+ reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
+ reg &= ~OOB_DISABLE;
+ reg |= RGMII_LINK;
+ bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
+
+ /* speed */
+ if (phydev->speed == SPEED_1000)
+ cmd_bits = UMAC_SPEED_1000;
+ else if (phydev->speed == SPEED_100)
+ cmd_bits = UMAC_SPEED_100;
+ else
+ cmd_bits = UMAC_SPEED_10;
+ cmd_bits <<= CMD_SPEED_SHIFT;
+
+ if (priv->old_duplex != phydev->duplex) {
+ status_changed = 1;
+ priv->old_duplex = phydev->duplex;
+ }
+
+ /* duplex */
+ if (phydev->duplex != DUPLEX_FULL)
+ cmd_bits |= CMD_HD_EN;
+
+ if (priv->old_pause != phydev->pause) {
+ status_changed = 1;
+ priv->old_pause = phydev->pause;
+ }
+
+ /* pause capability */
+ if (!phydev->pause)
+ cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE;
+
+ reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+ reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
+ CMD_HD_EN |
+ CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE);
+ reg |= cmd_bits;
+ bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+ }
+
+ if (status_changed)
+ phy_print_status(phydev);
+}
+
+void bcmgenet_mii_reset(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+
+ if (priv->phydev) {
+ phy_init_hw(priv->phydev);
+ phy_start_aneg(priv->phydev);
+ }
+}
+
+static void bcmgenet_ephy_power_up(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ u32 reg = 0;
+
+ /* EXT_GPHY_CTRL is only valid for GENETv4 and onward */
+ if (!GENET_IS_V4(priv))
+ return;
+
+ reg = bcmgenet_ext_readl(priv, EXT_GPHY_CTRL);
+ reg &= ~(EXT_CFG_IDDQ_BIAS | EXT_CFG_PWR_DOWN);
+ reg |= EXT_GPHY_RESET;
+ bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
+ mdelay(2);
+
+ reg &= ~EXT_GPHY_RESET;
+ bcmgenet_ext_writel(priv, reg, EXT_GPHY_CTRL);
+ udelay(20);
+}
+
+static void bcmgenet_internal_phy_setup(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ u32 reg;
+
+ /* Power up EPHY */
+ bcmgenet_ephy_power_up(dev);
+ /* enable APD */
+ reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
+ reg |= EXT_PWR_DN_EN_LD;
+ bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
+ bcmgenet_mii_reset(dev);
+}
+
+static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
+{
+ u32 reg;
+
+ /* Speed settings are set in bcmgenet_mii_setup() */
+ reg = bcmgenet_sys_readl(priv, SYS_PORT_CTRL);
+ reg |= LED_ACT_SOURCE_MAC;
+ bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL);
+}
+
+int bcmgenet_mii_config(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev = priv->phydev;
+ struct device *kdev = &priv->pdev->dev;
+ const char *phy_name = NULL;
+ u32 id_mode_dis = 0;
+ u32 port_ctrl;
+ u32 reg;
+
+ priv->ext_phy = !phy_is_internal(priv->phydev) &&
+ (priv->phy_interface != PHY_INTERFACE_MODE_MOCA);
+
+ if (phy_is_internal(priv->phydev))
+ priv->phy_interface = PHY_INTERFACE_MODE_NA;
+
+ switch (priv->phy_interface) {
+ case PHY_INTERFACE_MODE_NA:
+ case PHY_INTERFACE_MODE_MOCA:
+ /* Irrespective of the actually configured PHY speed (100 or
+ * 1000) GENETv4 only has an internal GPHY so we will just end
+ * up masking the Gigabit features from what we support, not
+ * switching to the EPHY
+ */
+ if (GENET_IS_V4(priv))
+ port_ctrl = PORT_MODE_INT_GPHY;
+ else
+ port_ctrl = PORT_MODE_INT_EPHY;
+
+ bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL);
+
+ if (phy_is_internal(priv->phydev)) {
+ phy_name = "internal PHY";
+ bcmgenet_internal_phy_setup(dev);
+ } else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
+ phy_name = "MoCA";
+ bcmgenet_moca_phy_setup(priv);
+ }
+ break;
+
+ case PHY_INTERFACE_MODE_MII:
+ phy_name = "external MII";
+ phydev->supported &= PHY_BASIC_FEATURES;
+ bcmgenet_sys_writel(priv,
+ PORT_MODE_EXT_EPHY, SYS_PORT_CTRL);
+ break;
+
+ case PHY_INTERFACE_MODE_REVMII:
+ phy_name = "external RvMII";
+ /* of_mdiobus_register took care of reading the 'max-speed'
+ * PHY property for us, effectively limiting the PHY supported
+ * capabilities, use that knowledge to also configure the
+ * Reverse MII interface correctly.
+ */
+ if ((priv->phydev->supported & PHY_BASIC_FEATURES) ==
+ PHY_BASIC_FEATURES)
+ port_ctrl = PORT_MODE_EXT_RVMII_25;
+ else
+ port_ctrl = PORT_MODE_EXT_RVMII_50;
+ bcmgenet_sys_writel(priv, port_ctrl, SYS_PORT_CTRL);
+ break;
+
+ case PHY_INTERFACE_MODE_RGMII:
+ /* RGMII_NO_ID: TXC transitions at the same time as TXD
+ * (requires PCB or receiver-side delay)
+ * RGMII: Add 2ns delay on TXC (90 degree shift)
+ *
+ * ID is implicitly disabled for 100Mbps (RG)MII operation.
+ */
+ id_mode_dis = BIT(16);
+ /* fall through */
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ if (id_mode_dis)
+ phy_name = "external RGMII (no delay)";
+ else
+ phy_name = "external RGMII (TX delay)";
+ reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
+ reg |= RGMII_MODE_EN | id_mode_dis;
+ bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
+ bcmgenet_sys_writel(priv,
+ PORT_MODE_EXT_GPHY, SYS_PORT_CTRL);
+ break;
+ default:
+ dev_err(kdev, "unknown phy mode: %d\n", priv->phy_interface);
+ return -EINVAL;
+ }
+
+ dev_info(kdev, "configuring instance for %s\n", phy_name);
+
+ return 0;
+}
+
+static int bcmgenet_mii_probe(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ struct phy_device *phydev;
+ unsigned int phy_flags;
+ int ret;
+
+ if (priv->phydev) {
+ pr_info("PHY already attached\n");
+ return 0;
+ }
+
+ if (priv->phy_dn)
+ phydev = of_phy_connect(dev, priv->phy_dn,
+ bcmgenet_mii_setup, 0,
+ priv->phy_interface);
+ else
+ phydev = of_phy_connect_fixed_link(dev,
+ bcmgenet_mii_setup,
+ priv->phy_interface);
+
+ if (!phydev) {
+ pr_err("could not attach to PHY\n");
+ return -ENODEV;
+ }
+
+ priv->old_link = -1;
+ priv->old_duplex = -1;
+ priv->old_pause = -1;
+ priv->phydev = phydev;
+
+ /* Configure port multiplexer based on what the probed PHY device since
+ * reading the 'max-speed' property determines the maximum supported
+ * PHY speed which is needed for bcmgenet_mii_config() to configure
+ * things appropriately.
+ */
+ ret = bcmgenet_mii_config(dev);
+ if (ret) {
+ phy_disconnect(priv->phydev);
+ return ret;
+ }
+
+ phy_flags = PHY_BRCM_100MBPS_WAR;
+
+ /* workarounds are only needed for 100Mpbs PHYs, and
+ * never on GENET V1 hardware
+ */
+ if ((phydev->supported & PHY_GBIT_FEATURES) || GENET_IS_V1(priv))
+ phy_flags = 0;
+
+ phydev->dev_flags |= phy_flags;
+ phydev->advertising = phydev->supported;
+
+ /* The internal PHY has its link interrupts routed to the
+ * Ethernet MAC ISRs
+ */
+ if (phy_is_internal(priv->phydev))
+ priv->mii_bus->irq[phydev->addr] = PHY_IGNORE_INTERRUPT;
+ else
+ priv->mii_bus->irq[phydev->addr] = PHY_POLL;
+
+ pr_info("attached PHY at address %d [%s]\n",
+ phydev->addr, phydev->drv->name);
+
+ return 0;
+}
+
+static int bcmgenet_mii_alloc(struct bcmgenet_priv *priv)
+{
+ struct mii_bus *bus;
+
+ if (priv->mii_bus)
+ return 0;
+
+ priv->mii_bus = mdiobus_alloc();
+ if (!priv->mii_bus) {
+ pr_err("failed to allocate\n");
+ return -ENOMEM;
+ }
+
+ bus = priv->mii_bus;
+ bus->priv = priv->dev;
+ bus->name = "bcmgenet MII bus";
+ bus->parent = &priv->pdev->dev;
+ bus->read = bcmgenet_mii_read;
+ bus->write = bcmgenet_mii_write;
+ snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d",
+ priv->pdev->name, priv->pdev->id);
+
+ bus->irq = kzalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+ if (!bus->irq) {
+ mdiobus_free(priv->mii_bus);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int bcmgenet_mii_of_init(struct bcmgenet_priv *priv)
+{
+ struct device_node *dn = priv->pdev->dev.of_node;
+ struct device *kdev = &priv->pdev->dev;
+ struct device_node *mdio_dn;
+ char *compat;
+ int ret;
+
+ compat = kasprintf(GFP_KERNEL, "brcm,genet-mdio-v%d", priv->version);
+ if (!compat)
+ return -ENOMEM;
+
+ mdio_dn = of_find_compatible_node(dn, NULL, compat);
+ kfree(compat);
+ if (!mdio_dn) {
+ dev_err(kdev, "unable to find MDIO bus node\n");
+ return -ENODEV;
+ }
+
+ ret = of_mdiobus_register(priv->mii_bus, mdio_dn);
+ if (ret) {
+ dev_err(kdev, "failed to register MDIO bus\n");
+ return ret;
+ }
+
+ /* Fetch the PHY phandle */
+ priv->phy_dn = of_parse_phandle(dn, "phy-handle", 0);
+
+ /* Get the link mode */
+ priv->phy_interface = of_get_phy_mode(dn);
+
+ return 0;
+}
+
+int bcmgenet_mii_init(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+ int ret;
+
+ ret = bcmgenet_mii_alloc(priv);
+ if (ret)
+ return ret;
+
+ ret = bcmgenet_mii_of_init(priv);
+ if (ret)
+ goto out_free;
+
+ ret = bcmgenet_mii_probe(dev);
+ if (ret)
+ goto out;
+
+ return 0;
+
+out:
+ mdiobus_unregister(priv->mii_bus);
+out_free:
+ kfree(priv->mii_bus->irq);
+ mdiobus_free(priv->mii_bus);
+ return ret;
+}
+
+void bcmgenet_mii_exit(struct net_device *dev)
+{
+ struct bcmgenet_priv *priv = netdev_priv(dev);
+
+ mdiobus_unregister(priv->mii_bus);
+ kfree(priv->mii_bus->irq);
+ mdiobus_free(priv->mii_bus);
+}
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 3b6d0ba..e12735f 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -11361,12 +11361,10 @@ static bool tg3_enable_msix(struct tg3 *tp)
msix_ent[i].vector = 0;
}
- rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
+ rc = pci_enable_msix_range(tp->pdev, msix_ent, 1, tp->irq_cnt);
if (rc < 0) {
return false;
- } else if (rc != 0) {
- if (pci_enable_msix(tp->pdev, msix_ent, rc))
- return false;
+ } else if (rc < tp->irq_cnt) {
netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
tp->irq_cnt, rc);
tp->irq_cnt = rc;
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 4ad1187..aeec9cc 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -2669,9 +2669,11 @@ bnad_enable_msix(struct bnad *bnad)
for (i = 0; i < bnad->msix_num; i++)
bnad->msix_table[i].entry = i;
- ret = pci_enable_msix(bnad->pcidev, bnad->msix_table, bnad->msix_num);
- if (ret > 0) {
- /* Not enough MSI-X vectors. */
+ ret = pci_enable_msix_range(bnad->pcidev, bnad->msix_table,
+ 1, bnad->msix_num);
+ if (ret < 0) {
+ goto intx_mode;
+ } else if (ret < bnad->msix_num) {
pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n",
ret, bnad->msix_num);
@@ -2684,18 +2686,11 @@ bnad_enable_msix(struct bnad *bnad)
bnad->msix_num = BNAD_NUM_TXQ + BNAD_NUM_RXP +
BNAD_MAILBOX_MSIX_VECTORS;
- if (bnad->msix_num > ret)
- goto intx_mode;
-
- /* Try once more with adjusted numbers */
- /* If this fails, fall back to INTx */
- ret = pci_enable_msix(bnad->pcidev, bnad->msix_table,
- bnad->msix_num);
- if (ret)
+ if (bnad->msix_num > ret) {
+ pci_disable_msix(bnad->pcidev);
goto intx_mode;
-
- } else if (ret < 0)
- goto intx_mode;
+ }
+ }
pci_intx(bnad->pcidev, 0);
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 45d7733..07bbb71 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -3088,30 +3088,22 @@ static int cxgb_enable_msix(struct adapter *adap)
{
struct msix_entry entries[SGE_QSETS + 1];
int vectors;
- int i, err;
+ int i;
vectors = ARRAY_SIZE(entries);
for (i = 0; i < vectors; ++i)
entries[i].entry = i;
- while ((err = pci_enable_msix(adap->pdev, entries, vectors)) > 0)
- vectors = err;
-
- if (err < 0)
- pci_disable_msix(adap->pdev);
-
- if (!err && vectors < (adap->params.nports + 1)) {
- pci_disable_msix(adap->pdev);
- err = -1;
- }
+ vectors = pci_enable_msix_range(adap->pdev, entries,
+ adap->params.nports + 1, vectors);
+ if (vectors < 0)
+ return vectors;
- if (!err) {
- for (i = 0; i < vectors; ++i)
- adap->msix_info[i].vec = entries[i].vector;
- adap->msix_nvectors = vectors;
- }
+ for (i = 0; i < vectors; ++i)
+ adap->msix_info[i].vec = entries[i].vector;
+ adap->msix_nvectors = vectors;
- return err;
+ return 0;
}
static void print_port_info(struct adapter *adap, const struct adapter_info *ai)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index 1f4b9b3..944f2cb 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -66,6 +66,7 @@ enum {
SERNUM_LEN = 24, /* Serial # length */
EC_LEN = 16, /* E/C length */
ID_LEN = 16, /* ID length */
+ PN_LEN = 16, /* Part Number length */
};
enum {
@@ -254,6 +255,7 @@ struct vpd_params {
u8 ec[EC_LEN + 1];
u8 sn[SERNUM_LEN + 1];
u8 id[ID_LEN + 1];
+ u8 pn[PN_LEN + 1];
};
struct pci_params {
@@ -306,6 +308,7 @@ struct adapter_params {
unsigned char bypass;
unsigned int ofldq_wr_cred;
+ bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */
};
#include "t4fw_api.h"
@@ -957,7 +960,7 @@ int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
u64 *parity);
int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
u64 *parity);
-
+const char *t4_get_port_type_description(enum fw_port_type port_type);
void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log);
void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 34e2488..0ac53dd 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -254,6 +254,8 @@ static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
CH_DEVICE(0x5011, 4),
CH_DEVICE(0x5012, 4),
CH_DEVICE(0x5013, 4),
+ CH_DEVICE(0x5014, 4),
+ CH_DEVICE(0x5015, 4),
CH_DEVICE(0x5401, 4),
CH_DEVICE(0x5402, 4),
CH_DEVICE(0x5403, 4),
@@ -273,6 +275,8 @@ static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
CH_DEVICE(0x5411, 4),
CH_DEVICE(0x5412, 4),
CH_DEVICE(0x5413, 4),
+ CH_DEVICE(0x5414, 4),
+ CH_DEVICE(0x5415, 4),
{ 0, }
};
@@ -423,15 +427,18 @@ static void link_report(struct net_device *dev)
const struct port_info *p = netdev_priv(dev);
switch (p->link_cfg.speed) {
- case SPEED_10000:
+ case 10000:
s = "10Gbps";
break;
- case SPEED_1000:
+ case 1000:
s = "1000Mbps";
break;
- case SPEED_100:
+ case 100:
s = "100Mbps";
break;
+ case 40000:
+ s = "40Gbps";
+ break;
}
netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
@@ -2061,7 +2068,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
0x40200, 0x40298,
0x402ac, 0x4033c,
0x403f8, 0x403fc,
- 0x41300, 0x413c4,
+ 0x41304, 0x413c4,
0x41400, 0x4141c,
0x41480, 0x414d0,
0x44000, 0x44078,
@@ -2089,7 +2096,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
0x48200, 0x48298,
0x482ac, 0x4833c,
0x483f8, 0x483fc,
- 0x49300, 0x493c4,
+ 0x49304, 0x493c4,
0x49400, 0x4941c,
0x49480, 0x494d0,
0x4c000, 0x4c078,
@@ -2199,6 +2206,8 @@ static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
else if (type == FW_PORT_TYPE_FIBER_XFI ||
type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
v |= SUPPORTED_FIBRE;
+ else if (type == FW_PORT_TYPE_BP40_BA)
+ v |= SUPPORTED_40000baseSR4_Full;
if (caps & FW_PORT_CAP_ANEG)
v |= SUPPORTED_Autoneg;
@@ -2215,6 +2224,8 @@ static unsigned int to_fw_linkcaps(unsigned int caps)
v |= FW_PORT_CAP_SPEED_1G;
if (caps & ADVERTISED_10000baseT_Full)
v |= FW_PORT_CAP_SPEED_10G;
+ if (caps & ADVERTISED_40000baseSR4_Full)
+ v |= FW_PORT_CAP_SPEED_40G;
return v;
}
@@ -2263,12 +2274,14 @@ static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
static unsigned int speed_to_caps(int speed)
{
- if (speed == SPEED_100)
+ if (speed == 100)
return FW_PORT_CAP_SPEED_100M;
- if (speed == SPEED_1000)
+ if (speed == 1000)
return FW_PORT_CAP_SPEED_1G;
- if (speed == SPEED_10000)
+ if (speed == 10000)
return FW_PORT_CAP_SPEED_10G;
+ if (speed == 40000)
+ return FW_PORT_CAP_SPEED_40G;
return 0;
}
@@ -2296,8 +2309,10 @@ static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
if (cmd->autoneg == AUTONEG_DISABLE) {
cap = speed_to_caps(speed);
- if (!(lc->supported & cap) || (speed == SPEED_1000) ||
- (speed == SPEED_10000))
+ if (!(lc->supported & cap) ||
+ (speed == 1000) ||
+ (speed == 10000) ||
+ (speed == 40000))
return -EINVAL;
lc->requested_speed = cap;
lc->advertising = 0;
@@ -3765,6 +3780,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
lli.dbfifo_int_thresh = dbfifo_int_thresh;
lli.sge_pktshift = adap->sge.pktshift;
lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
+ lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
handle = ulds[uld].add(&lli);
if (IS_ERR(handle)) {
@@ -5370,6 +5386,21 @@ static int adap_init0(struct adapter *adap)
(void) t4_set_params(adap, adap->mbox, adap->fn, 0, 1, params, val);
/*
+ * Find out whether we're allowed to use the T5+ ULPTX MEMWRITE DSGL
+ * capability. Earlier versions of the firmware didn't have the
+ * ULPTX_MEMWRITE_DSGL so we'll interpret a query failure as no
+ * permission to use ULPTX MEMWRITE DSGL.
+ */
+ if (is_t4(adap->params.chip)) {
+ adap->params.ulptx_memwrite_dsgl = false;
+ } else {
+ params[0] = FW_PARAM_DEV(ULPTX_MEMWRITE_DSGL);
+ ret = t4_query_params(adap, adap->mbox, adap->fn, 0,
+ 1, params, val);
+ adap->params.ulptx_memwrite_dsgl = (ret == 0 && val[0] != 0);
+ }
+
+ /*
* Get device capabilities so we can determine what resources we need
* to manage.
*/
@@ -5603,9 +5634,10 @@ static const struct pci_error_handlers cxgb4_eeh = {
.resume = eeh_resume,
};
-static inline bool is_10g_port(const struct link_config *lc)
+static inline bool is_x_10g_port(const struct link_config *lc)
{
- return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
+ return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0 ||
+ (lc->supported & FW_PORT_CAP_SPEED_40G) != 0;
}
static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
@@ -5629,7 +5661,7 @@ static void cfg_queues(struct adapter *adap)
int i, q10g = 0, n10g = 0, qidx = 0;
for_each_port(adap, i)
- n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg);
+ n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
/*
* We default to 1 queue per non-10G port and up to # of cores queues
@@ -5644,7 +5676,7 @@ static void cfg_queues(struct adapter *adap)
struct port_info *pi = adap2pinfo(adap, i);
pi->first_qset = qidx;
- pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1;
+ pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
qidx += pi->nqsets;
}
@@ -5737,7 +5769,7 @@ static void reduce_ethqs(struct adapter *adap, int n)
static int enable_msix(struct adapter *adap)
{
int ofld_need = 0;
- int i, err, want, need;
+ int i, want, need;
struct sge *s = &adap->sge;
unsigned int nchan = adap->params.nports;
struct msix_entry entries[MAX_INGQ + 1];
@@ -5753,32 +5785,30 @@ static int enable_msix(struct adapter *adap)
}
need = adap->params.nports + EXTRA_VECS + ofld_need;
- while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need)
- want = err;
+ want = pci_enable_msix_range(adap->pdev, entries, need, want);
+ if (want < 0)
+ return want;
- if (!err) {
- /*
- * Distribute available vectors to the various queue groups.
- * Every group gets its minimum requirement and NIC gets top
- * priority for leftovers.
- */
- i = want - EXTRA_VECS - ofld_need;
- if (i < s->max_ethqsets) {
- s->max_ethqsets = i;
- if (i < s->ethqsets)
- reduce_ethqs(adap, i);
- }
- if (is_offload(adap)) {
- i = want - EXTRA_VECS - s->max_ethqsets;
- i -= ofld_need - nchan;
- s->ofldqsets = (i / nchan) * nchan; /* round down */
- }
- for (i = 0; i < want; ++i)
- adap->msix_info[i].vec = entries[i].vector;
- } else if (err > 0)
- dev_info(adap->pdev_dev,
- "only %d MSI-X vectors left, not using MSI-X\n", err);
- return err;
+ /*
+ * Distribute available vectors to the various queue groups.
+ * Every group gets its minimum requirement and NIC gets top
+ * priority for leftovers.
+ */
+ i = want - EXTRA_VECS - ofld_need;
+ if (i < s->max_ethqsets) {
+ s->max_ethqsets = i;
+ if (i < s->ethqsets)
+ reduce_ethqs(adap, i);
+ }
+ if (is_offload(adap)) {
+ i = want - EXTRA_VECS - s->max_ethqsets;
+ i -= ofld_need - nchan;
+ s->ofldqsets = (i / nchan) * nchan; /* round down */
+ }
+ for (i = 0; i < want; ++i)
+ adap->msix_info[i].vec = entries[i].vector;
+
+ return 0;
}
#undef EXTRA_VECS
@@ -5801,11 +5831,6 @@ static int init_rss(struct adapter *adap)
static void print_port_info(const struct net_device *dev)
{
- static const char *base[] = {
- "R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4",
- "KX", "KR", "R SFP+", "KR/KX", "KR/KX/KX4"
- };
-
char buf[80];
char *bufp = buf;
const char *spd = "";
@@ -5823,9 +5848,11 @@ static void print_port_info(const struct net_device *dev)
bufp += sprintf(bufp, "1000/");
if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
bufp += sprintf(bufp, "10G/");
+ if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G)
+ bufp += sprintf(bufp, "40G/");
if (bufp != buf)
--bufp;
- sprintf(bufp, "BASE-%s", base[pi->port_type]);
+ sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type));
netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
adap->params.vpd.id,
@@ -5833,8 +5860,8 @@ static void print_port_info(const struct net_device *dev)
is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
(adap->flags & USING_MSIX) ? " MSI-X" :
(adap->flags & USING_MSI) ? " MSI" : "");
- netdev_info(dev, "S/N: %s, E/C: %s\n",
- adap->params.vpd.sn, adap->params.vpd.ec);
+ netdev_info(dev, "S/N: %s, P/N: %s\n",
+ adap->params.vpd.sn, adap->params.vpd.pn);
}
static void enable_pcie_relaxed_ordering(struct pci_dev *dev)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 4dd0a82..e274a04 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -253,6 +253,7 @@ struct cxgb4_lld_info {
/* packet data */
bool enable_fw_ofld_conn; /* Enable connection through fw */
/* WR */
+ bool ulptx_memwrite_dsgl; /* use of T5 DSGL allowed */
};
struct cxgb4_uld_info {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 47ffa64..af76b25 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -706,11 +706,17 @@ static inline unsigned int flits_to_desc(unsigned int n)
* @skb: the packet
*
* Returns whether an Ethernet packet is small enough to fit as
- * immediate data.
+ * immediate data. Return value corresponds to headroom required.
*/
static inline int is_eth_imm(const struct sk_buff *skb)
{
- return skb->len <= MAX_IMM_TX_PKT_LEN - sizeof(struct cpl_tx_pkt);
+ int hdrlen = skb_shinfo(skb)->gso_size ?
+ sizeof(struct cpl_tx_pkt_lso_core) : 0;
+
+ hdrlen += sizeof(struct cpl_tx_pkt);
+ if (skb->len <= MAX_IMM_TX_PKT_LEN - hdrlen)
+ return hdrlen;
+ return 0;
}
/**
@@ -723,9 +729,10 @@ static inline int is_eth_imm(const struct sk_buff *skb)
static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
{
unsigned int flits;
+ int hdrlen = is_eth_imm(skb);
- if (is_eth_imm(skb))
- return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt), 8);
+ if (hdrlen)
+ return DIV_ROUND_UP(skb->len + hdrlen, sizeof(__be64));
flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 4;
if (skb_shinfo(skb)->gso_size)
@@ -971,6 +978,7 @@ static inline void txq_advance(struct sge_txq *q, unsigned int n)
*/
netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
{
+ int len;
u32 wr_mid;
u64 cntrl, *end;
int qidx, credits;
@@ -982,6 +990,7 @@ netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
struct cpl_tx_pkt_core *cpl;
const struct skb_shared_info *ssi;
dma_addr_t addr[MAX_SKB_FRAGS + 1];
+ bool immediate = false;
/*
* The chip min packet length is 10 octets but play safe and reject
@@ -1011,7 +1020,10 @@ out_free: dev_kfree_skb(skb);
return NETDEV_TX_BUSY;
}
- if (!is_eth_imm(skb) &&
+ if (is_eth_imm(skb))
+ immediate = true;
+
+ if (!immediate &&
unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) {
q->mapping_err++;
goto out_free;
@@ -1028,6 +1040,8 @@ out_free: dev_kfree_skb(skb);
wr->r3 = cpu_to_be64(0);
end = (u64 *)wr + flits;
+ len = immediate ? skb->len : 0;
+ len += sizeof(*cpl);
ssi = skb_shinfo(skb);
if (ssi->gso_size) {
struct cpl_tx_pkt_lso *lso = (void *)wr;
@@ -1035,8 +1049,9 @@ out_free: dev_kfree_skb(skb);
int l3hdr_len = skb_network_header_len(skb);
int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
+ len += sizeof(*lso);
wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) |
- FW_WR_IMMDLEN(sizeof(*lso)));
+ FW_WR_IMMDLEN(len));
lso->c.lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) |
LSO_FIRST_SLICE | LSO_LAST_SLICE |
LSO_IPV6(v6) |
@@ -1054,9 +1069,6 @@ out_free: dev_kfree_skb(skb);
q->tso++;
q->tx_cso += ssi->gso_segs;
} else {
- int len;
-
- len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl);
wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) |
FW_WR_IMMDLEN(len));
cpl = (void *)(wr + 1);
@@ -1078,7 +1090,7 @@ out_free: dev_kfree_skb(skb);
cpl->len = htons(skb->len);
cpl->ctrl1 = cpu_to_be64(cntrl);
- if (is_eth_imm(skb)) {
+ if (immediate) {
inline_tx_skb(skb, &q->q, cpl + 1);
dev_kfree_skb(skb);
} else {
@@ -1467,8 +1479,12 @@ static inline int ofld_send(struct adapter *adap, struct sk_buff *skb)
{
unsigned int idx = skb_txq(skb);
- if (unlikely(is_ctrl_pkt(skb)))
+ if (unlikely(is_ctrl_pkt(skb))) {
+ /* Single ctrl queue is a requirement for LE workaround path */
+ if (adap->tids.nsftids)
+ idx = 0;
return ctrl_xmit(&adap->sge.ctrlq[idx], skb);
+ }
return ofld_xmit(&adap->sge.ofldtxq[idx], skb);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 2c10934..d3c2a51 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -573,7 +573,7 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
{
u32 cclk_param, cclk_val;
int i, ret, addr;
- int ec, sn;
+ int ec, sn, pn;
u8 *vpd, csum;
unsigned int vpdr_len, kw_offset, id_len;
@@ -638,6 +638,7 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
FIND_VPD_KW(ec, "EC");
FIND_VPD_KW(sn, "SN");
+ FIND_VPD_KW(pn, "PN");
#undef FIND_VPD_KW
memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
@@ -647,6 +648,8 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
strim(p->sn);
+ memcpy(p->pn, vpd + pn, min(i, PN_LEN));
+ strim(p->pn);
/*
* Ask firmware for the Core Clock since it knows how to translate the
@@ -1155,7 +1158,8 @@ out:
}
#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
- FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG)
+ FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
+ FW_PORT_CAP_ANEG)
/**
* t4_link_start - apply link configuration to MAC/PHY
@@ -2247,6 +2251,36 @@ static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
}
/**
+ * t4_get_port_type_description - return Port Type string description
+ * @port_type: firmware Port Type enumeration
+ */
+const char *t4_get_port_type_description(enum fw_port_type port_type)
+{
+ static const char *const port_type_description[] = {
+ "R XFI",
+ "R XAUI",
+ "T SGMII",
+ "T XFI",
+ "T XAUI",
+ "KX4",
+ "CX4",
+ "KX",
+ "KR",
+ "R SFP+",
+ "KR/KX",
+ "KR/KX/KX4",
+ "R QSFP_10G",
+ "",
+ "R QSFP",
+ "R BP40_BA",
+ };
+
+ if (port_type < ARRAY_SIZE(port_type_description))
+ return port_type_description[port_type];
+ return "UNKNOWN";
+}
+
+/**
* t4_get_port_stats - collect port statistics
* @adap: the adapter
* @idx: the port index
@@ -3533,11 +3567,13 @@ int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
if (stat & FW_PORT_CMD_TXPAUSE)
fc |= PAUSE_TX;
if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
- speed = SPEED_100;
+ speed = 100;
else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
- speed = SPEED_1000;
+ speed = 1000;
else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
- speed = SPEED_10000;
+ speed = 10000;
+ else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_40G))
+ speed = 40000;
if (link_ok != lc->link_ok || speed != lc->speed ||
fc != lc->fc) { /* something changed */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 74fea74..9cc973f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -932,6 +932,7 @@ enum fw_params_param_dev {
FW_PARAMS_PARAM_DEV_FWREV = 0x0B,
FW_PARAMS_PARAM_DEV_TPREV = 0x0C,
FW_PARAMS_PARAM_DEV_CF = 0x0D,
+ FW_PARAMS_PARAM_DEV_ULPTX_MEMWRITE_DSGL = 0x17,
};
/*
@@ -1742,6 +1743,9 @@ enum fw_port_type {
FW_PORT_TYPE_SFP,
FW_PORT_TYPE_BP_AP,
FW_PORT_TYPE_BP4_AP,
+ FW_PORT_TYPE_QSFP_10G,
+ FW_PORT_TYPE_QSFP,
+ FW_PORT_TYPE_BP40_BA,
FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_MASK
};
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 0899c09..1d0fe9b 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -2444,7 +2444,7 @@ static void reduce_ethqs(struct adapter *adapter, int n)
*/
static int enable_msix(struct adapter *adapter)
{
- int i, err, want, need;
+ int i, want, need, nqsets;
struct msix_entry entries[MSIX_ENTRIES];
struct sge *s = &adapter->sge;
@@ -2460,26 +2460,23 @@ static int enable_msix(struct adapter *adapter)
*/
want = s->max_ethqsets + MSIX_EXTRAS;
need = adapter->params.nports + MSIX_EXTRAS;
- while ((err = pci_enable_msix(adapter->pdev, entries, want)) >= need)
- want = err;
- if (err == 0) {
- int nqsets = want - MSIX_EXTRAS;
- if (nqsets < s->max_ethqsets) {
- dev_warn(adapter->pdev_dev, "only enough MSI-X vectors"
- " for %d Queue Sets\n", nqsets);
- s->max_ethqsets = nqsets;
- if (nqsets < s->ethqsets)
- reduce_ethqs(adapter, nqsets);
- }
- for (i = 0; i < want; ++i)
- adapter->msix_info[i].vec = entries[i].vector;
- } else if (err > 0) {
- pci_disable_msix(adapter->pdev);
- dev_info(adapter->pdev_dev, "only %d MSI-X vectors left,"
- " not using MSI-X\n", err);
+ want = pci_enable_msix_range(adapter->pdev, entries, need, want);
+ if (want < 0)
+ return want;
+
+ nqsets = want - MSIX_EXTRAS;
+ if (nqsets < s->max_ethqsets) {
+ dev_warn(adapter->pdev_dev, "only enough MSI-X vectors"
+ " for %d Queue Sets\n", nqsets);
+ s->max_ethqsets = nqsets;
+ if (nqsets < s->ethqsets)
+ reduce_ethqs(adapter, nqsets);
}
- return err;
+ for (i = 0; i < want; ++i)
+ adapter->msix_info[i].vec = entries[i].vector;
+
+ return 0;
}
static const struct net_device_ops cxgb4vf_netdev_ops = {
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index b740bfc..dcd58f2 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1796,7 +1796,8 @@ static int enic_set_intr_mode(struct enic *enic)
enic->cq_count >= n + m &&
enic->intr_count >= n + m + 2) {
- if (!pci_enable_msix(enic->pdev, enic->msix_entry, n + m + 2)) {
+ if (pci_enable_msix_range(enic->pdev, enic->msix_entry,
+ n + m + 2, n + m + 2) > 0) {
enic->rq_count = n;
enic->wq_count = m;
@@ -1815,7 +1816,8 @@ static int enic_set_intr_mode(struct enic *enic)
enic->wq_count >= m &&
enic->cq_count >= 1 + m &&
enic->intr_count >= 1 + m + 2) {
- if (!pci_enable_msix(enic->pdev, enic->msix_entry, 1 + m + 2)) {
+ if (pci_enable_msix_range(enic->pdev, enic->msix_entry,
+ 1 + m + 2, 1 + m + 2) > 0) {
enic->rq_count = 1;
enic->wq_count = m;
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 05529e2..a91267b 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2013 Emulex
+ * Copyright (C) 2005 - 2014 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -88,7 +88,6 @@ static inline char *nic_name(struct pci_dev *pdev)
#define BE_MIN_MTU 256
#define BE_NUM_VLANS_SUPPORTED 64
-#define BE_UMC_NUM_VLANS_SUPPORTED 15
#define BE_MAX_EQD 128u
#define BE_MAX_TX_FRAG_COUNT 30
@@ -262,9 +261,10 @@ struct be_tx_obj {
/* Struct to remember the pages posted for rx frags */
struct be_rx_page_info {
struct page *page;
+ /* set to page-addr for last frag of the page & frag-addr otherwise */
DEFINE_DMA_UNMAP_ADDR(bus);
u16 page_offset;
- bool last_page_user;
+ bool last_frag; /* last frag of the page */
};
struct be_rx_stats {
@@ -293,7 +293,7 @@ struct be_rx_compl_info {
u8 ip_csum;
u8 l4_csum;
u8 ipv6;
- u8 vtm;
+ u8 qnq;
u8 pkt_type;
u8 ip_frag;
};
@@ -467,6 +467,7 @@ struct be_adapter {
u32 port_num;
bool promiscuous;
+ u8 mc_type;
u32 function_mode;
u32 function_caps;
u32 rx_fc; /* Rx flow control */
@@ -536,6 +537,14 @@ static inline u16 be_max_qs(struct be_adapter *adapter)
return min_t(u16, num, num_online_cpus());
}
+/* Is BE in pvid_tagging mode */
+#define be_pvid_tagging_enabled(adapter) (adapter->pvid)
+
+/* Is BE in QNQ multi-channel mode */
+#define be_is_qnq_mode(adapter) (adapter->mc_type == FLEX10 || \
+ adapter->mc_type == vNIC1 || \
+ adapter->mc_type == UFP)
+
#define lancer_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID3 || \
adapter->pdev->device == OC_DEVICE_ID4)
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 48076a6..72bde5d 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2013 Emulex
+ * Copyright (C) 2005 - 2014 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -3296,6 +3296,21 @@ static struct be_pcie_res_desc *be_get_pcie_desc(u8 devfn, u8 *buf,
return NULL;
}
+static struct be_port_res_desc *be_get_port_desc(u8 *buf, u32 desc_count)
+{
+ struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
+ int i;
+
+ for (i = 0; i < desc_count; i++) {
+ if (hdr->desc_type == PORT_RESOURCE_DESC_TYPE_V1)
+ return (struct be_port_res_desc *)hdr;
+
+ hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
+ hdr = (void *)hdr + hdr->desc_len;
+ }
+ return NULL;
+}
+
static void be_copy_nic_desc(struct be_resources *res,
struct be_nic_res_desc *desc)
{
@@ -3439,6 +3454,7 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
{
struct be_cmd_resp_get_profile_config *resp;
struct be_pcie_res_desc *pcie;
+ struct be_port_res_desc *port;
struct be_nic_res_desc *nic;
struct be_queue_info *mccq = &adapter->mcc_obj.q;
struct be_dma_mem cmd;
@@ -3466,6 +3482,10 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
if (pcie)
res->max_vfs = le16_to_cpu(pcie->num_vfs);
+ port = be_get_port_desc(resp->func_param, desc_count);
+ if (port)
+ adapter->mc_type = port->mc_type;
+
nic = be_get_nic_desc(resp->func_param, desc_count);
if (nic)
be_copy_nic_desc(res, nic);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index fc4e076..d0ab980 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2013 Emulex
+ * Copyright (C) 2005 - 2014 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -1098,14 +1098,6 @@ struct be_cmd_resp_query_fw_cfg {
u32 function_caps;
};
-/* Is BE in a multi-channel mode */
-static inline bool be_is_mc(struct be_adapter *adapter)
-{
- return adapter->function_mode & FLEX10_MODE ||
- adapter->function_mode & VNIC_MODE ||
- adapter->function_mode & UMC_ENABLED;
-}
-
/******************** RSS Config ****************************************/
/* RSS type Input parameters used to compute RX hash
* RSS_ENABLE_IPV4 SRC IPv4, DST IPv4
@@ -1828,6 +1820,7 @@ struct be_cmd_req_set_ext_fat_caps {
#define NIC_RESOURCE_DESC_TYPE_V0 0x41
#define PCIE_RESOURCE_DESC_TYPE_V1 0x50
#define NIC_RESOURCE_DESC_TYPE_V1 0x51
+#define PORT_RESOURCE_DESC_TYPE_V1 0x55
#define MAX_RESOURCE_DESC 264
/* QOS unit number */
@@ -1891,6 +1884,33 @@ struct be_nic_res_desc {
u32 rsvd8[7];
} __packed;
+/************ Multi-Channel type ***********/
+enum mc_type {
+ MC_NONE = 0x01,
+ UMC = 0x02,
+ FLEX10 = 0x03,
+ vNIC1 = 0x04,
+ nPAR = 0x05,
+ UFP = 0x06,
+ vNIC2 = 0x07
+};
+
+struct be_port_res_desc {
+ struct be_res_desc_hdr hdr;
+ u8 rsvd0;
+ u8 flags;
+ u8 rsvd1;
+ u8 mc_type;
+ u16 rsvd2;
+ u32 rsvd3[20];
+} __packed;
+
+/* Is BE in a multi-channel mode */
+static inline bool be_is_mc(struct be_adapter *adapter)
+{
+ return adapter->mc_type > MC_NONE;
+}
+
struct be_cmd_req_get_func_config {
struct be_cmd_req_hdr hdr;
};
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index 05be007..cf09d8f 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2013 Emulex
+ * Copyright (C) 2005 - 2014 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index dc88782..28ac8dd 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2013 Emulex
+ * Copyright (C) 2005 - 2014 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -368,7 +368,7 @@ struct amap_eth_rx_compl_v0 {
u8 numfrags[3]; /* dword 1 */
u8 rss_flush; /* dword 2 */
u8 cast_enc[2]; /* dword 2 */
- u8 vtm; /* dword 2 */
+ u8 qnq; /* dword 2 */
u8 rss_bank; /* dword 2 */
u8 rsvd1[23]; /* dword 2 */
u8 lro_pkt; /* dword 2 */
@@ -401,7 +401,7 @@ struct amap_eth_rx_compl_v1 {
u8 numfrags[3]; /* dword 1 */
u8 rss_flush; /* dword 2 */
u8 cast_enc[2]; /* dword 2 */
- u8 vtm; /* dword 2 */
+ u8 qnq; /* dword 2 */
u8 rss_bank; /* dword 2 */
u8 port[2]; /* dword 2 */
u8 vntagp; /* dword 2 */
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 36c8061..6e10230 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2013 Emulex
+ * Copyright (C) 2005 - 2014 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
@@ -935,9 +935,9 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
}
/* If vlan tag is already inlined in the packet, skip HW VLAN
- * tagging in UMC mode
+ * tagging in pvid-tagging mode
*/
- if ((adapter->function_mode & UMC_ENABLED) &&
+ if (be_pvid_tagging_enabled(adapter) &&
veh->h_vlan_proto == htons(ETH_P_8021Q))
*skip_hw_vlan = true;
@@ -1464,11 +1464,15 @@ static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
rx_page_info = &rxo->page_info_tbl[frag_idx];
BUG_ON(!rx_page_info->page);
- if (rx_page_info->last_page_user) {
+ if (rx_page_info->last_frag) {
dma_unmap_page(&adapter->pdev->dev,
dma_unmap_addr(rx_page_info, bus),
adapter->big_page_size, DMA_FROM_DEVICE);
- rx_page_info->last_page_user = false;
+ rx_page_info->last_frag = false;
+ } else {
+ dma_sync_single_for_cpu(&adapter->pdev->dev,
+ dma_unmap_addr(rx_page_info, bus),
+ rx_frag_size, DMA_FROM_DEVICE);
}
queue_tail_inc(rxq);
@@ -1676,7 +1680,7 @@ static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
rxcp->rss_hash =
AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
if (rxcp->vlanf) {
- rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
+ rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, qnq,
compl);
rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
compl);
@@ -1706,7 +1710,7 @@ static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
rxcp->rss_hash =
AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
if (rxcp->vlanf) {
- rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
+ rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, qnq,
compl);
rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
compl);
@@ -1739,9 +1743,11 @@ static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
rxcp->l4_csum = 0;
if (rxcp->vlanf) {
- /* vlanf could be wrongly set in some cards.
- * ignore if vtm is not set */
- if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
+ /* In QNQ modes, if qnq bit is not set, then the packet was
+ * tagged only with the transparent outer vlan-tag and must
+ * not be treated as a vlan packet by host
+ */
+ if (be_is_qnq_mode(adapter) && !rxcp->qnq)
rxcp->vlanf = 0;
if (!lancer_chip(adapter))
@@ -1800,17 +1806,16 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
rx_stats(rxo)->rx_post_fail++;
break;
}
- page_info->page_offset = 0;
+ page_offset = 0;
} else {
get_page(pagep);
- page_info->page_offset = page_offset + rx_frag_size;
+ page_offset += rx_frag_size;
}
- page_offset = page_info->page_offset;
+ page_info->page_offset = page_offset;
page_info->page = pagep;
- dma_unmap_addr_set(page_info, bus, page_dmaaddr);
- frag_dmaaddr = page_dmaaddr + page_info->page_offset;
rxd = queue_head_node(rxq);
+ frag_dmaaddr = page_dmaaddr + page_info->page_offset;
rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
@@ -1818,15 +1823,24 @@ static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
if ((page_offset + rx_frag_size + rx_frag_size) >
adapter->big_page_size) {
pagep = NULL;
- page_info->last_page_user = true;
+ page_info->last_frag = true;
+ dma_unmap_addr_set(page_info, bus, page_dmaaddr);
+ } else {
+ dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
}
prev_page_info = page_info;
queue_head_inc(rxq);
page_info = &rxo->page_info_tbl[rxq->head];
}
- if (pagep)
- prev_page_info->last_page_user = true;
+
+ /* Mark the last frag of a page when we break out of the above loop
+ * with no more slots available in the RXQ
+ */
+ if (pagep) {
+ prev_page_info->last_frag = true;
+ dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
+ }
if (posted) {
atomic_add(posted, &rxq->used);
@@ -2439,6 +2453,9 @@ void be_detect_error(struct be_adapter *adapter)
u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
u32 i;
+ bool error_detected = false;
+ struct device *dev = &adapter->pdev->dev;
+ struct net_device *netdev = adapter->netdev;
if (be_hw_error(adapter))
return;
@@ -2450,6 +2467,21 @@ void be_detect_error(struct be_adapter *adapter)
SLIPORT_ERROR1_OFFSET);
sliport_err2 = ioread32(adapter->db +
SLIPORT_ERROR2_OFFSET);
+ adapter->hw_error = true;
+ /* Do not log error messages if its a FW reset */
+ if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
+ sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
+ dev_info(dev, "Firmware update in progress\n");
+ } else {
+ error_detected = true;
+ dev_err(dev, "Error detected in the card\n");
+ dev_err(dev, "ERR: sliport status 0x%x\n",
+ sliport_status);
+ dev_err(dev, "ERR: sliport error1 0x%x\n",
+ sliport_err1);
+ dev_err(dev, "ERR: sliport error2 0x%x\n",
+ sliport_err2);
+ }
}
} else {
pci_read_config_dword(adapter->pdev,
@@ -2463,51 +2495,33 @@ void be_detect_error(struct be_adapter *adapter)
ue_lo = (ue_lo & ~ue_lo_mask);
ue_hi = (ue_hi & ~ue_hi_mask);
- }
- /* On certain platforms BE hardware can indicate spurious UEs.
- * Allow the h/w to stop working completely in case of a real UE.
- * Hence not setting the hw_error for UE detection.
- */
- if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
- adapter->hw_error = true;
- /* Do not log error messages if its a FW reset */
- if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
- sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
- dev_info(&adapter->pdev->dev,
- "Firmware update in progress\n");
- return;
- } else {
- dev_err(&adapter->pdev->dev,
- "Error detected in the card\n");
- }
- }
-
- if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
- dev_err(&adapter->pdev->dev,
- "ERR: sliport status 0x%x\n", sliport_status);
- dev_err(&adapter->pdev->dev,
- "ERR: sliport error1 0x%x\n", sliport_err1);
- dev_err(&adapter->pdev->dev,
- "ERR: sliport error2 0x%x\n", sliport_err2);
- }
-
- if (ue_lo) {
- for (i = 0; ue_lo; ue_lo >>= 1, i++) {
- if (ue_lo & 1)
- dev_err(&adapter->pdev->dev,
- "UE: %s bit set\n", ue_status_low_desc[i]);
- }
- }
+ /* On certain platforms BE hardware can indicate spurious UEs.
+ * Allow HW to stop working completely in case of a real UE.
+ * Hence not setting the hw_error for UE detection.
+ */
- if (ue_hi) {
- for (i = 0; ue_hi; ue_hi >>= 1, i++) {
- if (ue_hi & 1)
- dev_err(&adapter->pdev->dev,
- "UE: %s bit set\n", ue_status_hi_desc[i]);
+ if (ue_lo || ue_hi) {
+ error_detected = true;
+ dev_err(dev,
+ "Unrecoverable Error detected in the adapter");
+ dev_err(dev, "Please reboot server to recover");
+ if (skyhawk_chip(adapter))
+ adapter->hw_error = true;
+ for (i = 0; ue_lo; ue_lo >>= 1, i++) {
+ if (ue_lo & 1)
+ dev_err(dev, "UE: %s bit set\n",
+ ue_status_low_desc[i]);
+ }
+ for (i = 0; ue_hi; ue_hi >>= 1, i++) {
+ if (ue_hi & 1)
+ dev_err(dev, "UE: %s bit set\n",
+ ue_status_hi_desc[i]);
+ }
}
}
-
+ if (error_detected)
+ netif_carrier_off(netdev);
}
static void be_msix_disable(struct be_adapter *adapter)
@@ -2521,7 +2535,7 @@ static void be_msix_disable(struct be_adapter *adapter)
static int be_msix_enable(struct be_adapter *adapter)
{
- int i, status, num_vec;
+ int i, num_vec;
struct device *dev = &adapter->pdev->dev;
/* If RoCE is supported, program the max number of NIC vectors that
@@ -2537,24 +2551,11 @@ static int be_msix_enable(struct be_adapter *adapter)
for (i = 0; i < num_vec; i++)
adapter->msix_entries[i].entry = i;
- status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
- if (status == 0) {
- goto done;
- } else if (status >= MIN_MSIX_VECTORS) {
- num_vec = status;
- status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
- num_vec);
- if (!status)
- goto done;
- }
+ num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
+ MIN_MSIX_VECTORS, num_vec);
+ if (num_vec < 0)
+ goto fail;
- dev_warn(dev, "MSIx enable failed\n");
-
- /* INTx is not supported in VFs, so fail probe if enable_msix fails */
- if (!be_physfn(adapter))
- return status;
- return 0;
-done:
if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
adapter->num_msix_roce_vec = num_vec / 2;
dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
@@ -2566,6 +2567,14 @@ done:
dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
adapter->num_msix_vec);
return 0;
+
+fail:
+ dev_warn(dev, "MSIx enable failed\n");
+
+ /* INTx is not supported in VFs, so fail probe if enable_msix fails */
+ if (!be_physfn(adapter))
+ return num_vec;
+ return 0;
}
static inline int be_msix_vec_get(struct be_adapter *adapter,
@@ -3119,6 +3128,22 @@ err:
return status;
}
+/* Converting function_mode bits on BE3 to SH mc_type enums */
+
+static u8 be_convert_mc_type(u32 function_mode)
+{
+ if (function_mode & VNIC_MODE && function_mode & FLEX10_MODE)
+ return vNIC1;
+ else if (function_mode & FLEX10_MODE)
+ return FLEX10;
+ else if (function_mode & VNIC_MODE)
+ return vNIC2;
+ else if (function_mode & UMC_ENABLED)
+ return UMC;
+ else
+ return MC_NONE;
+}
+
/* On BE2/BE3 FW does not suggest the supported limits */
static void BEx_get_resources(struct be_adapter *adapter,
struct be_resources *res)
@@ -3139,12 +3164,23 @@ static void BEx_get_resources(struct be_adapter *adapter,
else
res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
- if (adapter->function_mode & FLEX10_MODE)
- res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
- else if (adapter->function_mode & UMC_ENABLED)
- res->max_vlans = BE_UMC_NUM_VLANS_SUPPORTED;
- else
+ adapter->mc_type = be_convert_mc_type(adapter->function_mode);
+
+ if (be_is_mc(adapter)) {
+ /* Assuming that there are 4 channels per port,
+ * when multi-channel is enabled
+ */
+ if (be_is_qnq_mode(adapter))
+ res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
+ else
+ /* In a non-qnq multichannel mode, the pvid
+ * takes up one vlan entry
+ */
+ res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
+ } else {
res->max_vlans = BE_NUM_VLANS_SUPPORTED;
+ }
+
res->max_mcast_mac = BE_MAX_MC;
/* For BE3 1Gb ports, F/W does not properly support multiple TXQs */
@@ -4427,14 +4463,32 @@ static bool be_reset_required(struct be_adapter *adapter)
static char *mc_name(struct be_adapter *adapter)
{
- if (adapter->function_mode & FLEX10_MODE)
- return "FLEX10";
- else if (adapter->function_mode & VNIC_MODE)
- return "vNIC";
- else if (adapter->function_mode & UMC_ENABLED)
- return "UMC";
- else
- return "";
+ char *str = ""; /* default */
+
+ switch (adapter->mc_type) {
+ case UMC:
+ str = "UMC";
+ break;
+ case FLEX10:
+ str = "FLEX10";
+ break;
+ case vNIC1:
+ str = "vNIC-1";
+ break;
+ case nPAR:
+ str = "nPAR";
+ break;
+ case UFP:
+ str = "UFP";
+ break;
+ case vNIC2:
+ str = "vNIC-2";
+ break;
+ default:
+ str = "";
+ }
+
+ return str;
}
static inline char *func_name(struct be_adapter *adapter)
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.c b/drivers/net/ethernet/emulex/benet/be_roce.c
index 9cd5415..a5dae4a 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.c
+++ b/drivers/net/ethernet/emulex/benet/be_roce.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2013 Emulex
+ * Copyright (C) 2005 - 2014 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/net/ethernet/emulex/benet/be_roce.h b/drivers/net/ethernet/emulex/benet/be_roce.h
index 2cd1129..a3ef8f8 100644
--- a/drivers/net/ethernet/emulex/benet/be_roce.h
+++ b/drivers/net/ethernet/emulex/benet/be_roce.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2005 - 2013 Emulex
+ * Copyright (C) 2005 - 2014 Emulex
* All rights reserved.
*
* This program is free software; you can redistribute it and/or
diff --git a/drivers/net/ethernet/freescale/Makefile b/drivers/net/ethernet/freescale/Makefile
index 549ce13..71debd1 100644
--- a/drivers/net/ethernet/freescale/Makefile
+++ b/drivers/net/ethernet/freescale/Makefile
@@ -14,7 +14,6 @@ obj-$(CONFIG_FSL_XGMAC_MDIO) += xgmac_mdio.o
obj-$(CONFIG_GIANFAR) += gianfar_driver.o
obj-$(CONFIG_PTP_1588_CLOCK_GIANFAR) += gianfar_ptp.o
gianfar_driver-objs := gianfar.o \
- gianfar_ethtool.o \
- gianfar_sysfs.o
+ gianfar_ethtool.o
obj-$(CONFIG_UCC_GETH) += ucc_geth_driver.o
ucc_geth_driver-objs := ucc_geth.o ucc_geth_ethtool.o
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index ad5a5aa..c5b9320 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -9,7 +9,7 @@
* Maintainer: Kumar Gala
* Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
*
- * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc.
+ * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
* Copyright 2007 MontaVista Software, Inc.
*
* This program is free software; you can redistribute it and/or modify it
@@ -121,7 +121,6 @@ static irqreturn_t gfar_error(int irq, void *dev_id);
static irqreturn_t gfar_transmit(int irq, void *dev_id);
static irqreturn_t gfar_interrupt(int irq, void *dev_id);
static void adjust_link(struct net_device *dev);
-static void init_registers(struct net_device *dev);
static int init_phy(struct net_device *dev);
static int gfar_probe(struct platform_device *ofdev);
static int gfar_remove(struct platform_device *ofdev);
@@ -138,9 +137,7 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
static void gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
int amount_pull, struct napi_struct *napi);
-void gfar_halt(struct net_device *dev);
-static void gfar_halt_nodisable(struct net_device *dev);
-void gfar_start(struct net_device *dev);
+static void gfar_halt_nodisable(struct gfar_private *priv);
static void gfar_clear_exact_match(struct net_device *dev);
static void gfar_set_mac_for_addr(struct net_device *dev, int num,
const u8 *addr);
@@ -332,23 +329,35 @@ static void gfar_init_tx_rx_base(struct gfar_private *priv)
}
}
-static void gfar_init_mac(struct net_device *ndev)
+static void gfar_rx_buff_size_config(struct gfar_private *priv)
{
- struct gfar_private *priv = netdev_priv(ndev);
- struct gfar __iomem *regs = priv->gfargrp[0].regs;
- u32 rctrl = 0;
- u32 tctrl = 0;
- u32 attrs = 0;
-
- /* write the tx/rx base registers */
- gfar_init_tx_rx_base(priv);
-
- /* Configure the coalescing support */
- gfar_configure_coalescing_all(priv);
+ int frame_size = priv->ndev->mtu + ETH_HLEN;
/* set this when rx hw offload (TOE) functions are being used */
priv->uses_rxfcb = 0;
+ if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX))
+ priv->uses_rxfcb = 1;
+
+ if (priv->hwts_rx_en)
+ priv->uses_rxfcb = 1;
+
+ if (priv->uses_rxfcb)
+ frame_size += GMAC_FCB_LEN;
+
+ frame_size += priv->padding;
+
+ frame_size = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
+ INCREMENTAL_BUFFER_SIZE;
+
+ priv->rx_buffer_size = frame_size;
+}
+
+static void gfar_mac_rx_config(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 rctrl = 0;
+
if (priv->rx_filer_enable) {
rctrl |= RCTRL_FILREN;
/* Program the RIR0 reg with the required distribution */
@@ -356,48 +365,37 @@ static void gfar_init_mac(struct net_device *ndev)
}
/* Restore PROMISC mode */
- if (ndev->flags & IFF_PROMISC)
+ if (priv->ndev->flags & IFF_PROMISC)
rctrl |= RCTRL_PROM;
- if (ndev->features & NETIF_F_RXCSUM) {
+ if (priv->ndev->features & NETIF_F_RXCSUM)
rctrl |= RCTRL_CHECKSUMMING;
- priv->uses_rxfcb = 1;
- }
- if (priv->extended_hash) {
- rctrl |= RCTRL_EXTHASH;
-
- gfar_clear_exact_match(ndev);
- rctrl |= RCTRL_EMEN;
- }
+ if (priv->extended_hash)
+ rctrl |= RCTRL_EXTHASH | RCTRL_EMEN;
if (priv->padding) {
rctrl &= ~RCTRL_PAL_MASK;
rctrl |= RCTRL_PADDING(priv->padding);
}
- /* Insert receive time stamps into padding alignment bytes */
- if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER) {
- rctrl &= ~RCTRL_PAL_MASK;
- rctrl |= RCTRL_PADDING(8);
- priv->padding = 8;
- }
-
/* Enable HW time stamping if requested from user space */
- if (priv->hwts_rx_en) {
+ if (priv->hwts_rx_en)
rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
- priv->uses_rxfcb = 1;
- }
- if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX) {
+ if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
- priv->uses_rxfcb = 1;
- }
/* Init rctrl based on our settings */
gfar_write(&regs->rctrl, rctrl);
+}
- if (ndev->features & NETIF_F_IP_CSUM)
+static void gfar_mac_tx_config(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 tctrl = 0;
+
+ if (priv->ndev->features & NETIF_F_IP_CSUM)
tctrl |= TCTRL_INIT_CSUM;
if (priv->prio_sched_en)
@@ -408,30 +406,51 @@ static void gfar_init_mac(struct net_device *ndev)
gfar_write(&regs->tr47wt, DEFAULT_WRRS_WEIGHT);
}
- gfar_write(&regs->tctrl, tctrl);
+ if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
+ tctrl |= TCTRL_VLINS;
- /* Set the extraction length and index */
- attrs = ATTRELI_EL(priv->rx_stash_size) |
- ATTRELI_EI(priv->rx_stash_index);
+ gfar_write(&regs->tctrl, tctrl);
+}
- gfar_write(&regs->attreli, attrs);
+static void gfar_configure_coalescing(struct gfar_private *priv,
+ unsigned long tx_mask, unsigned long rx_mask)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 __iomem *baddr;
- /* Start with defaults, and add stashing or locking
- * depending on the approprate variables
- */
- attrs = ATTR_INIT_SETTINGS;
+ if (priv->mode == MQ_MG_MODE) {
+ int i = 0;
- if (priv->bd_stash_en)
- attrs |= ATTR_BDSTASH;
+ baddr = &regs->txic0;
+ for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
+ gfar_write(baddr + i, 0);
+ if (likely(priv->tx_queue[i]->txcoalescing))
+ gfar_write(baddr + i, priv->tx_queue[i]->txic);
+ }
- if (priv->rx_stash_size != 0)
- attrs |= ATTR_BUFSTASH;
+ baddr = &regs->rxic0;
+ for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
+ gfar_write(baddr + i, 0);
+ if (likely(priv->rx_queue[i]->rxcoalescing))
+ gfar_write(baddr + i, priv->rx_queue[i]->rxic);
+ }
+ } else {
+ /* Backward compatible case -- even if we enable
+ * multiple queues, there's only single reg to program
+ */
+ gfar_write(&regs->txic, 0);
+ if (likely(priv->tx_queue[0]->txcoalescing))
+ gfar_write(&regs->txic, priv->tx_queue[0]->txic);
- gfar_write(&regs->attr, attrs);
+ gfar_write(&regs->rxic, 0);
+ if (unlikely(priv->rx_queue[0]->rxcoalescing))
+ gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
+ }
+}
- gfar_write(&regs->fifo_tx_thr, priv->fifo_threshold);
- gfar_write(&regs->fifo_tx_starve, priv->fifo_starve);
- gfar_write(&regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
+void gfar_configure_coalescing_all(struct gfar_private *priv)
+{
+ gfar_configure_coalescing(priv, 0xFF, 0xFF);
}
static struct net_device_stats *gfar_get_stats(struct net_device *dev)
@@ -479,12 +498,27 @@ static const struct net_device_ops gfar_netdev_ops = {
#endif
};
-void lock_rx_qs(struct gfar_private *priv)
+static void gfar_ints_disable(struct gfar_private *priv)
{
int i;
+ for (i = 0; i < priv->num_grps; i++) {
+ struct gfar __iomem *regs = priv->gfargrp[i].regs;
+ /* Clear IEVENT */
+ gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
- for (i = 0; i < priv->num_rx_queues; i++)
- spin_lock(&priv->rx_queue[i]->rxlock);
+ /* Initialize IMASK */
+ gfar_write(&regs->imask, IMASK_INIT_CLEAR);
+ }
+}
+
+static void gfar_ints_enable(struct gfar_private *priv)
+{
+ int i;
+ for (i = 0; i < priv->num_grps; i++) {
+ struct gfar __iomem *regs = priv->gfargrp[i].regs;
+ /* Unmask the interrupts we look for */
+ gfar_write(&regs->imask, IMASK_DEFAULT);
+ }
}
void lock_tx_qs(struct gfar_private *priv)
@@ -495,23 +529,50 @@ void lock_tx_qs(struct gfar_private *priv)
spin_lock(&priv->tx_queue[i]->txlock);
}
-void unlock_rx_qs(struct gfar_private *priv)
+void unlock_tx_qs(struct gfar_private *priv)
{
int i;
- for (i = 0; i < priv->num_rx_queues; i++)
- spin_unlock(&priv->rx_queue[i]->rxlock);
+ for (i = 0; i < priv->num_tx_queues; i++)
+ spin_unlock(&priv->tx_queue[i]->txlock);
}
-void unlock_tx_qs(struct gfar_private *priv)
+static int gfar_alloc_tx_queues(struct gfar_private *priv)
{
int i;
- for (i = 0; i < priv->num_tx_queues; i++)
- spin_unlock(&priv->tx_queue[i]->txlock);
+ for (i = 0; i < priv->num_tx_queues; i++) {
+ priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
+ GFP_KERNEL);
+ if (!priv->tx_queue[i])
+ return -ENOMEM;
+
+ priv->tx_queue[i]->tx_skbuff = NULL;
+ priv->tx_queue[i]->qindex = i;
+ priv->tx_queue[i]->dev = priv->ndev;
+ spin_lock_init(&(priv->tx_queue[i]->txlock));
+ }
+ return 0;
+}
+
+static int gfar_alloc_rx_queues(struct gfar_private *priv)
+{
+ int i;
+
+ for (i = 0; i < priv->num_rx_queues; i++) {
+ priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
+ GFP_KERNEL);
+ if (!priv->rx_queue[i])
+ return -ENOMEM;
+
+ priv->rx_queue[i]->rx_skbuff = NULL;
+ priv->rx_queue[i]->qindex = i;
+ priv->rx_queue[i]->dev = priv->ndev;
+ }
+ return 0;
}
-static void free_tx_pointers(struct gfar_private *priv)
+static void gfar_free_tx_queues(struct gfar_private *priv)
{
int i;
@@ -519,7 +580,7 @@ static void free_tx_pointers(struct gfar_private *priv)
kfree(priv->tx_queue[i]);
}
-static void free_rx_pointers(struct gfar_private *priv)
+static void gfar_free_rx_queues(struct gfar_private *priv)
{
int i;
@@ -608,6 +669,30 @@ static int gfar_parse_group(struct device_node *np,
grp->rx_bit_map = 0xFF;
grp->tx_bit_map = 0xFF;
}
+
+ /* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses
+ * right to left, so we need to revert the 8 bits to get the q index
+ */
+ grp->rx_bit_map = bitrev8(grp->rx_bit_map);
+ grp->tx_bit_map = bitrev8(grp->tx_bit_map);
+
+ /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
+ * also assign queues to groups
+ */
+ for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
+ grp->num_rx_queues++;
+ grp->rstat |= (RSTAT_CLEAR_RHALT >> i);
+ priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
+ priv->rx_queue[i]->grp = grp;
+ }
+
+ for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
+ grp->num_tx_queues++;
+ grp->tstat |= (TSTAT_CLEAR_THALT >> i);
+ priv->tqueue |= (TQUEUE_EN0 >> i);
+ priv->tx_queue[i]->grp = grp;
+ }
+
priv->num_grps++;
return 0;
@@ -664,7 +749,14 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
priv->num_tx_queues = num_tx_qs;
netif_set_real_num_rx_queues(dev, num_rx_qs);
priv->num_rx_queues = num_rx_qs;
- priv->num_grps = 0x0;
+
+ err = gfar_alloc_tx_queues(priv);
+ if (err)
+ goto tx_alloc_failed;
+
+ err = gfar_alloc_rx_queues(priv);
+ if (err)
+ goto rx_alloc_failed;
/* Init Rx queue filer rule set linked list */
INIT_LIST_HEAD(&priv->rx_list.list);
@@ -691,38 +783,6 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
goto err_grp_init;
}
- for (i = 0; i < priv->num_tx_queues; i++)
- priv->tx_queue[i] = NULL;
- for (i = 0; i < priv->num_rx_queues; i++)
- priv->rx_queue[i] = NULL;
-
- for (i = 0; i < priv->num_tx_queues; i++) {
- priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
- GFP_KERNEL);
- if (!priv->tx_queue[i]) {
- err = -ENOMEM;
- goto tx_alloc_failed;
- }
- priv->tx_queue[i]->tx_skbuff = NULL;
- priv->tx_queue[i]->qindex = i;
- priv->tx_queue[i]->dev = dev;
- spin_lock_init(&(priv->tx_queue[i]->txlock));
- }
-
- for (i = 0; i < priv->num_rx_queues; i++) {
- priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
- GFP_KERNEL);
- if (!priv->rx_queue[i]) {
- err = -ENOMEM;
- goto rx_alloc_failed;
- }
- priv->rx_queue[i]->rx_skbuff = NULL;
- priv->rx_queue[i]->qindex = i;
- priv->rx_queue[i]->dev = dev;
- spin_lock_init(&(priv->rx_queue[i]->rxlock));
- }
-
-
stash = of_get_property(np, "bd-stash", NULL);
if (stash) {
@@ -749,17 +809,16 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
memcpy(dev->dev_addr, mac_addr, ETH_ALEN);
if (model && !strcasecmp(model, "TSEC"))
- priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT |
+ priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
FSL_GIANFAR_DEV_HAS_COALESCE |
FSL_GIANFAR_DEV_HAS_RMON |
FSL_GIANFAR_DEV_HAS_MULTI_INTR;
if (model && !strcasecmp(model, "eTSEC"))
- priv->device_flags = FSL_GIANFAR_DEV_HAS_GIGABIT |
+ priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
FSL_GIANFAR_DEV_HAS_COALESCE |
FSL_GIANFAR_DEV_HAS_RMON |
FSL_GIANFAR_DEV_HAS_MULTI_INTR |
- FSL_GIANFAR_DEV_HAS_PADDING |
FSL_GIANFAR_DEV_HAS_CSUM |
FSL_GIANFAR_DEV_HAS_VLAN |
FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
@@ -784,12 +843,12 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
return 0;
-rx_alloc_failed:
- free_rx_pointers(priv);
-tx_alloc_failed:
- free_tx_pointers(priv);
err_grp_init:
unmap_group_regs(priv);
+rx_alloc_failed:
+ gfar_free_rx_queues(priv);
+tx_alloc_failed:
+ gfar_free_tx_queues(priv);
free_gfar_dev(priv);
return err;
}
@@ -822,18 +881,16 @@ static int gfar_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
switch (config.rx_filter) {
case HWTSTAMP_FILTER_NONE:
if (priv->hwts_rx_en) {
- stop_gfar(netdev);
priv->hwts_rx_en = 0;
- startup_gfar(netdev);
+ reset_gfar(netdev);
}
break;
default:
if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
return -ERANGE;
if (!priv->hwts_rx_en) {
- stop_gfar(netdev);
priv->hwts_rx_en = 1;
- startup_gfar(netdev);
+ reset_gfar(netdev);
}
config.rx_filter = HWTSTAMP_FILTER_ALL;
break;
@@ -875,19 +932,6 @@ static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
return phy_mii_ioctl(priv->phydev, rq, cmd);
}
-static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs)
-{
- unsigned int new_bit_map = 0x0;
- int mask = 0x1 << (max_qs - 1), i;
-
- for (i = 0; i < max_qs; i++) {
- if (bit_map & mask)
- new_bit_map = new_bit_map + (1 << i);
- mask = mask >> 0x1;
- }
- return new_bit_map;
-}
-
static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
u32 class)
{
@@ -1005,99 +1049,140 @@ static void gfar_detect_errata(struct gfar_private *priv)
priv->errata);
}
-/* Set up the ethernet device structure, private data,
- * and anything else we need before we start
- */
-static int gfar_probe(struct platform_device *ofdev)
+void gfar_mac_reset(struct gfar_private *priv)
{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 tempval;
- struct net_device *dev = NULL;
- struct gfar_private *priv = NULL;
- struct gfar __iomem *regs = NULL;
- int err = 0, i, grp_idx = 0;
- u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0;
- u32 isrg = 0;
- u32 __iomem *baddr;
-
- err = gfar_of_init(ofdev, &dev);
-
- if (err)
- return err;
-
- priv = netdev_priv(dev);
- priv->ndev = dev;
- priv->ofdev = ofdev;
- priv->dev = &ofdev->dev;
- SET_NETDEV_DEV(dev, &ofdev->dev);
-
- spin_lock_init(&priv->bflock);
- INIT_WORK(&priv->reset_task, gfar_reset_task);
-
- platform_set_drvdata(ofdev, priv);
- regs = priv->gfargrp[0].regs;
-
- gfar_detect_errata(priv);
-
- /* Stop the DMA engine now, in case it was running before
- * (The firmware could have used it, and left it running).
- */
- gfar_halt(dev);
/* Reset MAC layer */
gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
/* We need to delay at least 3 TX clocks */
- udelay(2);
+ udelay(3);
- tempval = 0;
- if (!priv->pause_aneg_en && priv->tx_pause_en)
- tempval |= MACCFG1_TX_FLOW;
- if (!priv->pause_aneg_en && priv->rx_pause_en)
- tempval |= MACCFG1_RX_FLOW;
/* the soft reset bit is not self-resetting, so we need to
* clear it before resuming normal operation
*/
- gfar_write(&regs->maccfg1, tempval);
+ gfar_write(&regs->maccfg1, 0);
+
+ udelay(3);
+
+ /* Compute rx_buff_size based on config flags */
+ gfar_rx_buff_size_config(priv);
+
+ /* Initialize the max receive frame/buffer lengths */
+ gfar_write(&regs->maxfrm, priv->rx_buffer_size);
+ gfar_write(&regs->mrblr, priv->rx_buffer_size);
+
+ /* Initialize the Minimum Frame Length Register */
+ gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
/* Initialize MACCFG2. */
tempval = MACCFG2_INIT_SETTINGS;
- if (gfar_has_errata(priv, GFAR_ERRATA_74))
+
+ /* If the mtu is larger than the max size for standard
+ * ethernet frames (ie, a jumbo frame), then set maccfg2
+ * to allow huge frames, and to check the length
+ */
+ if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
+ gfar_has_errata(priv, GFAR_ERRATA_74))
tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
+
gfar_write(&regs->maccfg2, tempval);
+ /* Clear mac addr hash registers */
+ gfar_write(&regs->igaddr0, 0);
+ gfar_write(&regs->igaddr1, 0);
+ gfar_write(&regs->igaddr2, 0);
+ gfar_write(&regs->igaddr3, 0);
+ gfar_write(&regs->igaddr4, 0);
+ gfar_write(&regs->igaddr5, 0);
+ gfar_write(&regs->igaddr6, 0);
+ gfar_write(&regs->igaddr7, 0);
+
+ gfar_write(&regs->gaddr0, 0);
+ gfar_write(&regs->gaddr1, 0);
+ gfar_write(&regs->gaddr2, 0);
+ gfar_write(&regs->gaddr3, 0);
+ gfar_write(&regs->gaddr4, 0);
+ gfar_write(&regs->gaddr5, 0);
+ gfar_write(&regs->gaddr6, 0);
+ gfar_write(&regs->gaddr7, 0);
+
+ if (priv->extended_hash)
+ gfar_clear_exact_match(priv->ndev);
+
+ gfar_mac_rx_config(priv);
+
+ gfar_mac_tx_config(priv);
+
+ gfar_set_mac_address(priv->ndev);
+
+ gfar_set_multi(priv->ndev);
+
+ /* clear ievent and imask before configuring coalescing */
+ gfar_ints_disable(priv);
+
+ /* Configure the coalescing support */
+ gfar_configure_coalescing_all(priv);
+}
+
+static void gfar_hw_init(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 attrs;
+
+ /* Stop the DMA engine now, in case it was running before
+ * (The firmware could have used it, and left it running).
+ */
+ gfar_halt(priv);
+
+ gfar_mac_reset(priv);
+
+ /* Zero out the rmon mib registers if it has them */
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
+ memset_io(&(regs->rmon), 0, sizeof(struct rmon_mib));
+
+ /* Mask off the CAM interrupts */
+ gfar_write(&regs->rmon.cam1, 0xffffffff);
+ gfar_write(&regs->rmon.cam2, 0xffffffff);
+ }
+
/* Initialize ECNTRL */
gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
- /* Set the dev->base_addr to the gfar reg region */
- dev->base_addr = (unsigned long) regs;
+ /* Set the extraction length and index */
+ attrs = ATTRELI_EL(priv->rx_stash_size) |
+ ATTRELI_EI(priv->rx_stash_index);
- /* Fill in the dev structure */
- dev->watchdog_timeo = TX_TIMEOUT;
- dev->mtu = 1500;
- dev->netdev_ops = &gfar_netdev_ops;
- dev->ethtool_ops = &gfar_ethtool_ops;
+ gfar_write(&regs->attreli, attrs);
- /* Register for napi ...We are registering NAPI for each grp */
- if (priv->mode == SQ_SG_MODE)
- netif_napi_add(dev, &priv->gfargrp[0].napi, gfar_poll_sq,
- GFAR_DEV_WEIGHT);
- else
- for (i = 0; i < priv->num_grps; i++)
- netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll,
- GFAR_DEV_WEIGHT);
+ /* Start with defaults, and add stashing
+ * depending on driver parameters
+ */
+ attrs = ATTR_INIT_SETTINGS;
- if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
- dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
- NETIF_F_RXCSUM;
- dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
- NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
- }
+ if (priv->bd_stash_en)
+ attrs |= ATTR_BDSTASH;
- if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
- dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
- NETIF_F_HW_VLAN_CTAG_RX;
- dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
- }
+ if (priv->rx_stash_size != 0)
+ attrs |= ATTR_BUFSTASH;
+
+ gfar_write(&regs->attr, attrs);
+
+ /* FIFO configs */
+ gfar_write(&regs->fifo_tx_thr, DEFAULT_FIFO_TX_THR);
+ gfar_write(&regs->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE);
+ gfar_write(&regs->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF);
+
+ /* Program the interrupt steering regs, only for MG devices */
+ if (priv->num_grps > 1)
+ gfar_write_isrg(priv);
+}
+
+static void __init gfar_init_addr_hash_table(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
priv->extended_hash = 1;
@@ -1133,68 +1218,75 @@ static int gfar_probe(struct platform_device *ofdev)
priv->hash_regs[6] = &regs->gaddr6;
priv->hash_regs[7] = &regs->gaddr7;
}
+}
- if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
- priv->padding = DEFAULT_PADDING;
- else
- priv->padding = 0;
+/* Set up the ethernet device structure, private data,
+ * and anything else we need before we start
+ */
+static int gfar_probe(struct platform_device *ofdev)
+{
+ struct net_device *dev = NULL;
+ struct gfar_private *priv = NULL;
+ int err = 0, i;
- if (dev->features & NETIF_F_IP_CSUM ||
- priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
- dev->needed_headroom = GMAC_FCB_LEN;
+ err = gfar_of_init(ofdev, &dev);
- /* Program the isrg regs only if number of grps > 1 */
- if (priv->num_grps > 1) {
- baddr = &regs->isrg0;
- for (i = 0; i < priv->num_grps; i++) {
- isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX);
- isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX);
- gfar_write(baddr, isrg);
- baddr++;
- isrg = 0x0;
- }
- }
+ if (err)
+ return err;
- /* Need to reverse the bit maps as bit_map's MSB is q0
- * but, for_each_set_bit parses from right to left, which
- * basically reverses the queue numbers
- */
- for (i = 0; i< priv->num_grps; i++) {
- priv->gfargrp[i].tx_bit_map =
- reverse_bitmap(priv->gfargrp[i].tx_bit_map, MAX_TX_QS);
- priv->gfargrp[i].rx_bit_map =
- reverse_bitmap(priv->gfargrp[i].rx_bit_map, MAX_RX_QS);
+ priv = netdev_priv(dev);
+ priv->ndev = dev;
+ priv->ofdev = ofdev;
+ priv->dev = &ofdev->dev;
+ SET_NETDEV_DEV(dev, &ofdev->dev);
+
+ spin_lock_init(&priv->bflock);
+ INIT_WORK(&priv->reset_task, gfar_reset_task);
+
+ platform_set_drvdata(ofdev, priv);
+
+ gfar_detect_errata(priv);
+
+ /* Set the dev->base_addr to the gfar reg region */
+ dev->base_addr = (unsigned long) priv->gfargrp[0].regs;
+
+ /* Fill in the dev structure */
+ dev->watchdog_timeo = TX_TIMEOUT;
+ dev->mtu = 1500;
+ dev->netdev_ops = &gfar_netdev_ops;
+ dev->ethtool_ops = &gfar_ethtool_ops;
+
+ /* Register for napi ...We are registering NAPI for each grp */
+ if (priv->mode == SQ_SG_MODE)
+ netif_napi_add(dev, &priv->gfargrp[0].napi, gfar_poll_sq,
+ GFAR_DEV_WEIGHT);
+ else
+ for (i = 0; i < priv->num_grps; i++)
+ netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll,
+ GFAR_DEV_WEIGHT);
+
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
+ dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
+ NETIF_F_RXCSUM;
+ dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
+ NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
}
- /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
- * also assign queues to groups
- */
- for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
- priv->gfargrp[grp_idx].num_rx_queues = 0x0;
-
- for_each_set_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
- priv->num_rx_queues) {
- priv->gfargrp[grp_idx].num_rx_queues++;
- priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
- rstat = rstat | (RSTAT_CLEAR_RHALT >> i);
- rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
- }
- priv->gfargrp[grp_idx].num_tx_queues = 0x0;
-
- for_each_set_bit(i, &priv->gfargrp[grp_idx].tx_bit_map,
- priv->num_tx_queues) {
- priv->gfargrp[grp_idx].num_tx_queues++;
- priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
- tstat = tstat | (TSTAT_CLEAR_THALT >> i);
- tqueue = tqueue | (TQUEUE_EN0 >> i);
- }
- priv->gfargrp[grp_idx].rstat = rstat;
- priv->gfargrp[grp_idx].tstat = tstat;
- rstat = tstat =0;
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
+ dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX;
+ dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
}
- gfar_write(&regs->rqueue, rqueue);
- gfar_write(&regs->tqueue, tqueue);
+ gfar_init_addr_hash_table(priv);
+
+ /* Insert receive time stamps into padding alignment bytes */
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
+ priv->padding = 8;
+
+ if (dev->features & NETIF_F_IP_CSUM ||
+ priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
+ dev->needed_headroom = GMAC_FCB_LEN;
priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
@@ -1220,8 +1312,9 @@ static int gfar_probe(struct platform_device *ofdev)
if (priv->num_tx_queues == 1)
priv->prio_sched_en = 1;
- /* Carrier starts down, phylib will bring it up */
- netif_carrier_off(dev);
+ set_bit(GFAR_DOWN, &priv->state);
+
+ gfar_hw_init(priv);
err = register_netdev(dev);
@@ -1230,6 +1323,9 @@ static int gfar_probe(struct platform_device *ofdev)
goto register_fail;
}
+ /* Carrier starts down, phylib will bring it up */
+ netif_carrier_off(dev);
+
device_init_wakeup(&dev->dev,
priv->device_flags &
FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
@@ -1251,9 +1347,6 @@ static int gfar_probe(struct platform_device *ofdev)
/* Initialize the filer table */
gfar_init_filer_table(priv);
- /* Create all the sysfs files */
- gfar_init_sysfs(dev);
-
/* Print out the device info */
netdev_info(dev, "mac: %pM\n", dev->dev_addr);
@@ -1272,8 +1365,8 @@ static int gfar_probe(struct platform_device *ofdev)
register_fail:
unmap_group_regs(priv);
- free_tx_pointers(priv);
- free_rx_pointers(priv);
+ gfar_free_rx_queues(priv);
+ gfar_free_tx_queues(priv);
if (priv->phy_node)
of_node_put(priv->phy_node);
if (priv->tbi_node)
@@ -1293,6 +1386,8 @@ static int gfar_remove(struct platform_device *ofdev)
unregister_netdev(priv->ndev);
unmap_group_regs(priv);
+ gfar_free_rx_queues(priv);
+ gfar_free_tx_queues(priv);
free_gfar_dev(priv);
return 0;
@@ -1318,9 +1413,8 @@ static int gfar_suspend(struct device *dev)
local_irq_save(flags);
lock_tx_qs(priv);
- lock_rx_qs(priv);
- gfar_halt_nodisable(ndev);
+ gfar_halt_nodisable(priv);
/* Disable Tx, and Rx if wake-on-LAN is disabled. */
tempval = gfar_read(&regs->maccfg1);
@@ -1332,7 +1426,6 @@ static int gfar_suspend(struct device *dev)
gfar_write(&regs->maccfg1, tempval);
- unlock_rx_qs(priv);
unlock_tx_qs(priv);
local_irq_restore(flags);
@@ -1378,15 +1471,13 @@ static int gfar_resume(struct device *dev)
*/
local_irq_save(flags);
lock_tx_qs(priv);
- lock_rx_qs(priv);
tempval = gfar_read(&regs->maccfg2);
tempval &= ~MACCFG2_MPEN;
gfar_write(&regs->maccfg2, tempval);
- gfar_start(ndev);
+ gfar_start(priv);
- unlock_rx_qs(priv);
unlock_tx_qs(priv);
local_irq_restore(flags);
@@ -1413,10 +1504,11 @@ static int gfar_restore(struct device *dev)
return -ENOMEM;
}
- init_registers(ndev);
- gfar_set_mac_address(ndev);
- gfar_init_mac(ndev);
- gfar_start(ndev);
+ gfar_mac_reset(priv);
+
+ gfar_init_tx_rx_base(priv);
+
+ gfar_start(priv);
priv->oldlink = 0;
priv->oldspeed = 0;
@@ -1574,57 +1666,6 @@ static void gfar_configure_serdes(struct net_device *dev)
BMCR_SPEED1000);
}
-static void init_registers(struct net_device *dev)
-{
- struct gfar_private *priv = netdev_priv(dev);
- struct gfar __iomem *regs = NULL;
- int i;
-
- for (i = 0; i < priv->num_grps; i++) {
- regs = priv->gfargrp[i].regs;
- /* Clear IEVENT */
- gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
-
- /* Initialize IMASK */
- gfar_write(&regs->imask, IMASK_INIT_CLEAR);
- }
-
- regs = priv->gfargrp[0].regs;
- /* Init hash registers to zero */
- gfar_write(&regs->igaddr0, 0);
- gfar_write(&regs->igaddr1, 0);
- gfar_write(&regs->igaddr2, 0);
- gfar_write(&regs->igaddr3, 0);
- gfar_write(&regs->igaddr4, 0);
- gfar_write(&regs->igaddr5, 0);
- gfar_write(&regs->igaddr6, 0);
- gfar_write(&regs->igaddr7, 0);
-
- gfar_write(&regs->gaddr0, 0);
- gfar_write(&regs->gaddr1, 0);
- gfar_write(&regs->gaddr2, 0);
- gfar_write(&regs->gaddr3, 0);
- gfar_write(&regs->gaddr4, 0);
- gfar_write(&regs->gaddr5, 0);
- gfar_write(&regs->gaddr6, 0);
- gfar_write(&regs->gaddr7, 0);
-
- /* Zero out the rmon mib registers if it has them */
- if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
- memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib));
-
- /* Mask off the CAM interrupts */
- gfar_write(&regs->rmon.cam1, 0xffffffff);
- gfar_write(&regs->rmon.cam2, 0xffffffff);
- }
-
- /* Initialize the max receive buffer length */
- gfar_write(&regs->mrblr, priv->rx_buffer_size);
-
- /* Initialize the Minimum Frame Length Register */
- gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
-}
-
static int __gfar_is_rx_idle(struct gfar_private *priv)
{
u32 res;
@@ -1648,23 +1689,13 @@ static int __gfar_is_rx_idle(struct gfar_private *priv)
}
/* Halt the receive and transmit queues */
-static void gfar_halt_nodisable(struct net_device *dev)
+static void gfar_halt_nodisable(struct gfar_private *priv)
{
- struct gfar_private *priv = netdev_priv(dev);
- struct gfar __iomem *regs = NULL;
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 tempval;
- int i;
- for (i = 0; i < priv->num_grps; i++) {
- regs = priv->gfargrp[i].regs;
- /* Mask all interrupts */
- gfar_write(&regs->imask, IMASK_INIT_CLEAR);
+ gfar_ints_disable(priv);
- /* Clear all interrupts */
- gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
- }
-
- regs = priv->gfargrp[0].regs;
/* Stop the DMA, and wait for it to stop */
tempval = gfar_read(&regs->dmactrl);
if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) !=
@@ -1685,56 +1716,41 @@ static void gfar_halt_nodisable(struct net_device *dev)
}
/* Halt the receive and transmit queues */
-void gfar_halt(struct net_device *dev)
+void gfar_halt(struct gfar_private *priv)
{
- struct gfar_private *priv = netdev_priv(dev);
struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 tempval;
- gfar_halt_nodisable(dev);
+ /* Dissable the Rx/Tx hw queues */
+ gfar_write(&regs->rqueue, 0);
+ gfar_write(&regs->tqueue, 0);
- /* Disable Rx and Tx */
+ mdelay(10);
+
+ gfar_halt_nodisable(priv);
+
+ /* Disable Rx/Tx DMA */
tempval = gfar_read(&regs->maccfg1);
tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
gfar_write(&regs->maccfg1, tempval);
}
-static void free_grp_irqs(struct gfar_priv_grp *grp)
-{
- free_irq(gfar_irq(grp, TX)->irq, grp);
- free_irq(gfar_irq(grp, RX)->irq, grp);
- free_irq(gfar_irq(grp, ER)->irq, grp);
-}
-
void stop_gfar(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
- unsigned long flags;
- int i;
-
- phy_stop(priv->phydev);
+ netif_tx_stop_all_queues(dev);
- /* Lock it down */
- local_irq_save(flags);
- lock_tx_qs(priv);
- lock_rx_qs(priv);
+ smp_mb__before_clear_bit();
+ set_bit(GFAR_DOWN, &priv->state);
+ smp_mb__after_clear_bit();
- gfar_halt(dev);
+ disable_napi(priv);
- unlock_rx_qs(priv);
- unlock_tx_qs(priv);
- local_irq_restore(flags);
+ /* disable ints and gracefully shut down Rx/Tx DMA */
+ gfar_halt(priv);
- /* Free the IRQs */
- if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
- for (i = 0; i < priv->num_grps; i++)
- free_grp_irqs(&priv->gfargrp[i]);
- } else {
- for (i = 0; i < priv->num_grps; i++)
- free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
- &priv->gfargrp[i]);
- }
+ phy_stop(priv->phydev);
free_skb_resources(priv);
}
@@ -1825,17 +1841,15 @@ static void free_skb_resources(struct gfar_private *priv)
priv->tx_queue[0]->tx_bd_dma_base);
}
-void gfar_start(struct net_device *dev)
+void gfar_start(struct gfar_private *priv)
{
- struct gfar_private *priv = netdev_priv(dev);
struct gfar __iomem *regs = priv->gfargrp[0].regs;
u32 tempval;
int i = 0;
- /* Enable Rx and Tx in MACCFG1 */
- tempval = gfar_read(&regs->maccfg1);
- tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
- gfar_write(&regs->maccfg1, tempval);
+ /* Enable Rx/Tx hw queues */
+ gfar_write(&regs->rqueue, priv->rqueue);
+ gfar_write(&regs->tqueue, priv->tqueue);
/* Initialize DMACTRL to have WWR and WOP */
tempval = gfar_read(&regs->dmactrl);
@@ -1852,52 +1866,23 @@ void gfar_start(struct net_device *dev)
/* Clear THLT/RHLT, so that the DMA starts polling now */
gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
- /* Unmask the interrupts we look for */
- gfar_write(&regs->imask, IMASK_DEFAULT);
}
- dev->trans_start = jiffies; /* prevent tx timeout */
-}
-
-static void gfar_configure_coalescing(struct gfar_private *priv,
- unsigned long tx_mask, unsigned long rx_mask)
-{
- struct gfar __iomem *regs = priv->gfargrp[0].regs;
- u32 __iomem *baddr;
-
- if (priv->mode == MQ_MG_MODE) {
- int i = 0;
-
- baddr = &regs->txic0;
- for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
- gfar_write(baddr + i, 0);
- if (likely(priv->tx_queue[i]->txcoalescing))
- gfar_write(baddr + i, priv->tx_queue[i]->txic);
- }
+ /* Enable Rx/Tx DMA */
+ tempval = gfar_read(&regs->maccfg1);
+ tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
+ gfar_write(&regs->maccfg1, tempval);
- baddr = &regs->rxic0;
- for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
- gfar_write(baddr + i, 0);
- if (likely(priv->rx_queue[i]->rxcoalescing))
- gfar_write(baddr + i, priv->rx_queue[i]->rxic);
- }
- } else {
- /* Backward compatible case -- even if we enable
- * multiple queues, there's only single reg to program
- */
- gfar_write(&regs->txic, 0);
- if (likely(priv->tx_queue[0]->txcoalescing))
- gfar_write(&regs->txic, priv->tx_queue[0]->txic);
+ gfar_ints_enable(priv);
- gfar_write(&regs->rxic, 0);
- if (unlikely(priv->rx_queue[0]->rxcoalescing))
- gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
- }
+ priv->ndev->trans_start = jiffies; /* prevent tx timeout */
}
-void gfar_configure_coalescing_all(struct gfar_private *priv)
+static void free_grp_irqs(struct gfar_priv_grp *grp)
{
- gfar_configure_coalescing(priv, 0xFF, 0xFF);
+ free_irq(gfar_irq(grp, TX)->irq, grp);
+ free_irq(gfar_irq(grp, RX)->irq, grp);
+ free_irq(gfar_irq(grp, ER)->irq, grp);
}
static int register_grp_irqs(struct gfar_priv_grp *grp)
@@ -1956,46 +1941,65 @@ err_irq_fail:
}
-/* Bring the controller up and running */
-int startup_gfar(struct net_device *ndev)
+static void gfar_free_irq(struct gfar_private *priv)
{
- struct gfar_private *priv = netdev_priv(ndev);
- struct gfar __iomem *regs = NULL;
- int err, i, j;
+ int i;
- for (i = 0; i < priv->num_grps; i++) {
- regs= priv->gfargrp[i].regs;
- gfar_write(&regs->imask, IMASK_INIT_CLEAR);
+ /* Free the IRQs */
+ if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
+ for (i = 0; i < priv->num_grps; i++)
+ free_grp_irqs(&priv->gfargrp[i]);
+ } else {
+ for (i = 0; i < priv->num_grps; i++)
+ free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
+ &priv->gfargrp[i]);
}
+}
- regs= priv->gfargrp[0].regs;
- err = gfar_alloc_skb_resources(ndev);
- if (err)
- return err;
-
- gfar_init_mac(ndev);
+static int gfar_request_irq(struct gfar_private *priv)
+{
+ int err, i, j;
for (i = 0; i < priv->num_grps; i++) {
err = register_grp_irqs(&priv->gfargrp[i]);
if (err) {
for (j = 0; j < i; j++)
free_grp_irqs(&priv->gfargrp[j]);
- goto irq_fail;
+ return err;
}
}
- /* Start the controller */
- gfar_start(ndev);
+ return 0;
+}
+
+/* Bring the controller up and running */
+int startup_gfar(struct net_device *ndev)
+{
+ struct gfar_private *priv = netdev_priv(ndev);
+ int err;
+
+ gfar_mac_reset(priv);
+
+ err = gfar_alloc_skb_resources(ndev);
+ if (err)
+ return err;
+
+ gfar_init_tx_rx_base(priv);
+
+ smp_mb__before_clear_bit();
+ clear_bit(GFAR_DOWN, &priv->state);
+ smp_mb__after_clear_bit();
+
+ /* Start Rx/Tx DMA and enable the interrupts */
+ gfar_start(priv);
phy_start(priv->phydev);
- gfar_configure_coalescing_all(priv);
+ enable_napi(priv);
- return 0;
+ netif_tx_wake_all_queues(ndev);
-irq_fail:
- free_skb_resources(priv);
- return err;
+ return 0;
}
/* Called when something needs to use the ethernet device
@@ -2006,27 +2010,17 @@ static int gfar_enet_open(struct net_device *dev)
struct gfar_private *priv = netdev_priv(dev);
int err;
- enable_napi(priv);
-
- /* Initialize a bunch of registers */
- init_registers(dev);
-
- gfar_set_mac_address(dev);
-
err = init_phy(dev);
+ if (err)
+ return err;
- if (err) {
- disable_napi(priv);
+ err = gfar_request_irq(priv);
+ if (err)
return err;
- }
err = startup_gfar(dev);
- if (err) {
- disable_napi(priv);
+ if (err)
return err;
- }
-
- netif_tx_start_all_queues(dev);
device_set_wakeup_enable(&dev->dev, priv->wol_en);
@@ -2351,8 +2345,6 @@ static int gfar_close(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
- disable_napi(priv);
-
cancel_work_sync(&priv->reset_task);
stop_gfar(dev);
@@ -2360,7 +2352,7 @@ static int gfar_close(struct net_device *dev)
phy_disconnect(priv->phydev);
priv->phydev = NULL;
- netif_tx_stop_all_queues(dev);
+ gfar_free_irq(priv);
return 0;
}
@@ -2373,77 +2365,9 @@ static int gfar_set_mac_address(struct net_device *dev)
return 0;
}
-/* Check if rx parser should be activated */
-void gfar_check_rx_parser_mode(struct gfar_private *priv)
-{
- struct gfar __iomem *regs;
- u32 tempval;
-
- regs = priv->gfargrp[0].regs;
-
- tempval = gfar_read(&regs->rctrl);
- /* If parse is no longer required, then disable parser */
- if (tempval & RCTRL_REQ_PARSER) {
- tempval |= RCTRL_PRSDEP_INIT;
- priv->uses_rxfcb = 1;
- } else {
- tempval &= ~RCTRL_PRSDEP_INIT;
- priv->uses_rxfcb = 0;
- }
- gfar_write(&regs->rctrl, tempval);
-}
-
-/* Enables and disables VLAN insertion/extraction */
-void gfar_vlan_mode(struct net_device *dev, netdev_features_t features)
-{
- struct gfar_private *priv = netdev_priv(dev);
- struct gfar __iomem *regs = NULL;
- unsigned long flags;
- u32 tempval;
-
- regs = priv->gfargrp[0].regs;
- local_irq_save(flags);
- lock_rx_qs(priv);
-
- if (features & NETIF_F_HW_VLAN_CTAG_TX) {
- /* Enable VLAN tag insertion */
- tempval = gfar_read(&regs->tctrl);
- tempval |= TCTRL_VLINS;
- gfar_write(&regs->tctrl, tempval);
- } else {
- /* Disable VLAN tag insertion */
- tempval = gfar_read(&regs->tctrl);
- tempval &= ~TCTRL_VLINS;
- gfar_write(&regs->tctrl, tempval);
- }
-
- if (features & NETIF_F_HW_VLAN_CTAG_RX) {
- /* Enable VLAN tag extraction */
- tempval = gfar_read(&regs->rctrl);
- tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
- gfar_write(&regs->rctrl, tempval);
- priv->uses_rxfcb = 1;
- } else {
- /* Disable VLAN tag extraction */
- tempval = gfar_read(&regs->rctrl);
- tempval &= ~RCTRL_VLEX;
- gfar_write(&regs->rctrl, tempval);
-
- gfar_check_rx_parser_mode(priv);
- }
-
- gfar_change_mtu(dev, dev->mtu);
-
- unlock_rx_qs(priv);
- local_irq_restore(flags);
-}
-
static int gfar_change_mtu(struct net_device *dev, int new_mtu)
{
- int tempsize, tempval;
struct gfar_private *priv = netdev_priv(dev);
- struct gfar __iomem *regs = priv->gfargrp[0].regs;
- int oldsize = priv->rx_buffer_size;
int frame_size = new_mtu + ETH_HLEN;
if ((frame_size < 64) || (frame_size > JUMBO_FRAME_SIZE)) {
@@ -2451,45 +2375,33 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
return -EINVAL;
}
- if (priv->uses_rxfcb)
- frame_size += GMAC_FCB_LEN;
-
- frame_size += priv->padding;
-
- tempsize = (frame_size & ~(INCREMENTAL_BUFFER_SIZE - 1)) +
- INCREMENTAL_BUFFER_SIZE;
+ while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
+ cpu_relax();
- /* Only stop and start the controller if it isn't already
- * stopped, and we changed something
- */
- if ((oldsize != tempsize) && (dev->flags & IFF_UP))
+ if (dev->flags & IFF_UP)
stop_gfar(dev);
- priv->rx_buffer_size = tempsize;
-
dev->mtu = new_mtu;
- gfar_write(&regs->mrblr, priv->rx_buffer_size);
- gfar_write(&regs->maxfrm, priv->rx_buffer_size);
+ if (dev->flags & IFF_UP)
+ startup_gfar(dev);
- /* If the mtu is larger than the max size for standard
- * ethernet frames (ie, a jumbo frame), then set maccfg2
- * to allow huge frames, and to check the length
- */
- tempval = gfar_read(&regs->maccfg2);
+ clear_bit_unlock(GFAR_RESETTING, &priv->state);
- if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE ||
- gfar_has_errata(priv, GFAR_ERRATA_74))
- tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
- else
- tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
+ return 0;
+}
- gfar_write(&regs->maccfg2, tempval);
+void reset_gfar(struct net_device *ndev)
+{
+ struct gfar_private *priv = netdev_priv(ndev);
- if ((oldsize != tempsize) && (dev->flags & IFF_UP))
- startup_gfar(dev);
+ while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
+ cpu_relax();
- return 0;
+ stop_gfar(ndev);
+ startup_gfar(ndev);
+
+ clear_bit_unlock(GFAR_RESETTING, &priv->state);
}
/* gfar_reset_task gets scheduled when a packet has not been
@@ -2501,16 +2413,7 @@ static void gfar_reset_task(struct work_struct *work)
{
struct gfar_private *priv = container_of(work, struct gfar_private,
reset_task);
- struct net_device *dev = priv->ndev;
-
- if (dev->flags & IFF_UP) {
- netif_tx_stop_all_queues(dev);
- stop_gfar(dev);
- startup_gfar(dev);
- netif_tx_start_all_queues(dev);
- }
-
- netif_tx_schedule_all(dev);
+ reset_gfar(priv->ndev);
}
static void gfar_timeout(struct net_device *dev)
@@ -2623,8 +2526,10 @@ static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
}
/* If we freed a buffer, we can restart transmission, if necessary */
- if (netif_tx_queue_stopped(txq) && tx_queue->num_txbdfree)
- netif_wake_subqueue(dev, tqi);
+ if (tx_queue->num_txbdfree &&
+ netif_tx_queue_stopped(txq) &&
+ !(test_bit(GFAR_DOWN, &priv->state)))
+ netif_wake_subqueue(priv->ndev, tqi);
/* Update dirty indicators */
tx_queue->skb_dirtytx = skb_dirtytx;
@@ -2907,17 +2812,6 @@ static int gfar_poll_sq(struct napi_struct *napi, int budget)
gfar_write(&regs->rstat, gfargrp->rstat);
gfar_write(&regs->imask, IMASK_DEFAULT);
-
- /* If we are coalescing interrupts, update the timer
- * Otherwise, clear it
- */
- gfar_write(&regs->txic, 0);
- if (likely(tx_queue->txcoalescing))
- gfar_write(&regs->txic, tx_queue->txic);
-
- gfar_write(&regs->rxic, 0);
- if (unlikely(rx_queue->rxcoalescing))
- gfar_write(&regs->rxic, rx_queue->rxic);
}
return work_done;
@@ -2987,12 +2881,6 @@ static int gfar_poll(struct napi_struct *napi, int budget)
gfar_write(&regs->rstat, gfargrp->rstat);
gfar_write(&regs->imask, IMASK_DEFAULT);
-
- /* If we are coalescing interrupts, update the timer
- * Otherwise, clear it
- */
- gfar_configure_coalescing(priv, gfargrp->rx_bit_map,
- gfargrp->tx_bit_map);
}
return work_done;
@@ -3101,12 +2989,11 @@ static void adjust_link(struct net_device *dev)
{
struct gfar_private *priv = netdev_priv(dev);
struct gfar __iomem *regs = priv->gfargrp[0].regs;
- unsigned long flags;
struct phy_device *phydev = priv->phydev;
int new_state = 0;
- local_irq_save(flags);
- lock_tx_qs(priv);
+ if (test_bit(GFAR_RESETTING, &priv->state))
+ return;
if (phydev->link) {
u32 tempval1 = gfar_read(&regs->maccfg1);
@@ -3178,8 +3065,6 @@ static void adjust_link(struct net_device *dev)
if (new_state && netif_msg_link(priv))
phy_print_status(phydev);
- unlock_tx_qs(priv);
- local_irq_restore(flags);
}
/* Update the hash table based on the current list of multicast
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index 52bb2b0..1e16216 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -9,7 +9,7 @@
* Maintainer: Kumar Gala
* Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
*
- * Copyright 2002-2009, 2011 Freescale Semiconductor, Inc.
+ * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -880,7 +880,6 @@ struct gfar {
#define FSL_GIANFAR_DEV_HAS_CSUM 0x00000010
#define FSL_GIANFAR_DEV_HAS_VLAN 0x00000020
#define FSL_GIANFAR_DEV_HAS_EXTENDED_HASH 0x00000040
-#define FSL_GIANFAR_DEV_HAS_PADDING 0x00000080
#define FSL_GIANFAR_DEV_HAS_MAGIC_PACKET 0x00000100
#define FSL_GIANFAR_DEV_HAS_BD_STASHING 0x00000200
#define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400
@@ -892,8 +891,8 @@ struct gfar {
#define DEFAULT_MAPPING 0xFF
#endif
-#define ISRG_SHIFT_TX 0x10
-#define ISRG_SHIFT_RX 0x18
+#define ISRG_RR0 0x80000000
+#define ISRG_TR0 0x00800000
/* The same driver can operate in two modes */
/* SQ_SG_MODE: Single Queue Single Group Mode
@@ -966,7 +965,6 @@ struct rx_q_stats {
/**
* struct gfar_priv_rx_q - per rx queue structure
- * @rxlock: per queue rx spin lock
* @rx_skbuff: skb pointers
* @skb_currx: currently use skb pointer
* @rx_bd_base: First rx buffer descriptor
@@ -979,8 +977,7 @@ struct rx_q_stats {
*/
struct gfar_priv_rx_q {
- spinlock_t rxlock __attribute__ ((aligned (SMP_CACHE_BYTES)));
- struct sk_buff ** rx_skbuff;
+ struct sk_buff **rx_skbuff __aligned(SMP_CACHE_BYTES);
dma_addr_t rx_bd_dma_base;
struct rxbd8 *rx_bd_base;
struct rxbd8 *cur_rx;
@@ -1041,6 +1038,11 @@ enum gfar_errata {
GFAR_ERRATA_12 = 0x08, /* a.k.a errata eTSEC49 */
};
+enum gfar_dev_state {
+ GFAR_DOWN = 1,
+ GFAR_RESETTING
+};
+
/* Struct stolen almost completely (and shamelessly) from the FCC enet source
* (Ok, that's not so true anymore, but there is a family resemblance)
* The GFAR buffer descriptors track the ring buffers. The rx_bd_base
@@ -1069,6 +1071,7 @@ struct gfar_private {
struct gfar_priv_rx_q *rx_queue[MAX_RX_QS];
struct gfar_priv_grp gfargrp[MAXGROUPS];
+ unsigned long state;
u32 device_flags;
unsigned int mode;
@@ -1113,6 +1116,9 @@ struct gfar_private {
unsigned int total_tx_ring_size;
unsigned int total_rx_ring_size;
+ u32 rqueue;
+ u32 tqueue;
+
/* RX per device parameters */
unsigned int rx_stash_size;
unsigned int rx_stash_index;
@@ -1127,11 +1133,6 @@ struct gfar_private {
u32 __iomem *hash_regs[16];
int hash_width;
- /* global parameters */
- unsigned int fifo_threshold;
- unsigned int fifo_starve;
- unsigned int fifo_starve_off;
-
/*Filer table*/
unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
@@ -1176,21 +1177,42 @@ static inline void gfar_read_filer(struct gfar_private *priv,
*fpr = gfar_read(&regs->rqfpr);
}
-void lock_rx_qs(struct gfar_private *priv);
-void lock_tx_qs(struct gfar_private *priv);
-void unlock_rx_qs(struct gfar_private *priv);
-void unlock_tx_qs(struct gfar_private *priv);
+static inline void gfar_write_isrg(struct gfar_private *priv)
+{
+ struct gfar __iomem *regs = priv->gfargrp[0].regs;
+ u32 __iomem *baddr = &regs->isrg0;
+ u32 isrg = 0;
+ int grp_idx, i;
+
+ for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
+ struct gfar_priv_grp *grp = &priv->gfargrp[grp_idx];
+
+ for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
+ isrg |= (ISRG_RR0 >> i);
+ }
+
+ for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
+ isrg |= (ISRG_TR0 >> i);
+ }
+
+ gfar_write(baddr, isrg);
+
+ baddr++;
+ isrg = 0;
+ }
+}
+
irqreturn_t gfar_receive(int irq, void *dev_id);
int startup_gfar(struct net_device *dev);
void stop_gfar(struct net_device *dev);
-void gfar_halt(struct net_device *dev);
+void reset_gfar(struct net_device *dev);
+void gfar_mac_reset(struct gfar_private *priv);
+void gfar_halt(struct gfar_private *priv);
+void gfar_start(struct gfar_private *priv);
void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev, int enable,
u32 regnum, u32 read);
void gfar_configure_coalescing_all(struct gfar_private *priv);
-void gfar_init_sysfs(struct net_device *dev);
int gfar_set_features(struct net_device *dev, netdev_features_t features);
-void gfar_check_rx_parser_mode(struct gfar_private *priv);
-void gfar_vlan_mode(struct net_device *dev, netdev_features_t features);
extern const struct ethtool_ops gfar_ethtool_ops;
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index 63d2344..891dbee 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -44,10 +44,6 @@
#include "gianfar.h"
-extern void gfar_start(struct net_device *dev);
-extern int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue,
- int rx_work_limit);
-
#define GFAR_MAX_COAL_USECS 0xffff
#define GFAR_MAX_COAL_FRAMES 0xff
static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
@@ -364,25 +360,11 @@ static int gfar_scoalesce(struct net_device *dev,
struct ethtool_coalesce *cvals)
{
struct gfar_private *priv = netdev_priv(dev);
- int i = 0;
+ int i, err = 0;
if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
return -EOPNOTSUPP;
- /* Set up rx coalescing */
- /* As of now, we will enable/disable coalescing for all
- * queues together in case of eTSEC2, this will be modified
- * along with the ethtool interface
- */
- if ((cvals->rx_coalesce_usecs == 0) ||
- (cvals->rx_max_coalesced_frames == 0)) {
- for (i = 0; i < priv->num_rx_queues; i++)
- priv->rx_queue[i]->rxcoalescing = 0;
- } else {
- for (i = 0; i < priv->num_rx_queues; i++)
- priv->rx_queue[i]->rxcoalescing = 1;
- }
-
if (NULL == priv->phydev)
return -ENODEV;
@@ -399,6 +381,32 @@ static int gfar_scoalesce(struct net_device *dev,
return -EINVAL;
}
+ /* Check the bounds of the values */
+ if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
+ netdev_info(dev, "Coalescing is limited to %d microseconds\n",
+ GFAR_MAX_COAL_USECS);
+ return -EINVAL;
+ }
+
+ if (cvals->tx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
+ netdev_info(dev, "Coalescing is limited to %d frames\n",
+ GFAR_MAX_COAL_FRAMES);
+ return -EINVAL;
+ }
+
+ while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
+ cpu_relax();
+
+ /* Set up rx coalescing */
+ if ((cvals->rx_coalesce_usecs == 0) ||
+ (cvals->rx_max_coalesced_frames == 0)) {
+ for (i = 0; i < priv->num_rx_queues; i++)
+ priv->rx_queue[i]->rxcoalescing = 0;
+ } else {
+ for (i = 0; i < priv->num_rx_queues; i++)
+ priv->rx_queue[i]->rxcoalescing = 1;
+ }
+
for (i = 0; i < priv->num_rx_queues; i++) {
priv->rx_queue[i]->rxic = mk_ic_value(
cvals->rx_max_coalesced_frames,
@@ -415,28 +423,22 @@ static int gfar_scoalesce(struct net_device *dev,
priv->tx_queue[i]->txcoalescing = 1;
}
- /* Check the bounds of the values */
- if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
- netdev_info(dev, "Coalescing is limited to %d microseconds\n",
- GFAR_MAX_COAL_USECS);
- return -EINVAL;
- }
-
- if (cvals->tx_max_coalesced_frames > GFAR_MAX_COAL_FRAMES) {
- netdev_info(dev, "Coalescing is limited to %d frames\n",
- GFAR_MAX_COAL_FRAMES);
- return -EINVAL;
- }
-
for (i = 0; i < priv->num_tx_queues; i++) {
priv->tx_queue[i]->txic = mk_ic_value(
cvals->tx_max_coalesced_frames,
gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs));
}
- gfar_configure_coalescing_all(priv);
+ if (dev->flags & IFF_UP) {
+ stop_gfar(dev);
+ err = startup_gfar(dev);
+ } else {
+ gfar_mac_reset(priv);
+ }
+
+ clear_bit_unlock(GFAR_RESETTING, &priv->state);
- return 0;
+ return err;
}
/* Fills in rvals with the current ring parameters. Currently,
@@ -467,15 +469,13 @@ static void gfar_gringparam(struct net_device *dev,
}
/* Change the current ring parameters, stopping the controller if
- * necessary so that we don't mess things up while we're in
- * motion. We wait for the ring to be clean before reallocating
- * the rings.
+ * necessary so that we don't mess things up while we're in motion.
*/
static int gfar_sringparam(struct net_device *dev,
struct ethtool_ringparam *rvals)
{
struct gfar_private *priv = netdev_priv(dev);
- int err = 0, i = 0;
+ int err = 0, i;
if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE)
return -EINVAL;
@@ -493,44 +493,25 @@ static int gfar_sringparam(struct net_device *dev,
return -EINVAL;
}
+ while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
+ cpu_relax();
- if (dev->flags & IFF_UP) {
- unsigned long flags;
-
- /* Halt TX and RX, and process the frames which
- * have already been received
- */
- local_irq_save(flags);
- lock_tx_qs(priv);
- lock_rx_qs(priv);
-
- gfar_halt(dev);
-
- unlock_rx_qs(priv);
- unlock_tx_qs(priv);
- local_irq_restore(flags);
-
- for (i = 0; i < priv->num_rx_queues; i++)
- gfar_clean_rx_ring(priv->rx_queue[i],
- priv->rx_queue[i]->rx_ring_size);
-
- /* Now we take down the rings to rebuild them */
+ if (dev->flags & IFF_UP)
stop_gfar(dev);
- }
- /* Change the size */
- for (i = 0; i < priv->num_rx_queues; i++) {
+ /* Change the sizes */
+ for (i = 0; i < priv->num_rx_queues; i++)
priv->rx_queue[i]->rx_ring_size = rvals->rx_pending;
+
+ for (i = 0; i < priv->num_tx_queues; i++)
priv->tx_queue[i]->tx_ring_size = rvals->tx_pending;
- priv->tx_queue[i]->num_txbdfree =
- priv->tx_queue[i]->tx_ring_size;
- }
/* Rebuild the rings with the new size */
- if (dev->flags & IFF_UP) {
+ if (dev->flags & IFF_UP)
err = startup_gfar(dev);
- netif_tx_wake_all_queues(dev);
- }
+
+ clear_bit_unlock(GFAR_RESETTING, &priv->state);
+
return err;
}
@@ -608,43 +589,29 @@ static int gfar_spauseparam(struct net_device *dev,
int gfar_set_features(struct net_device *dev, netdev_features_t features)
{
- struct gfar_private *priv = netdev_priv(dev);
- unsigned long flags;
- int err = 0, i = 0;
netdev_features_t changed = dev->features ^ features;
+ struct gfar_private *priv = netdev_priv(dev);
+ int err = 0;
- if (changed & (NETIF_F_HW_VLAN_CTAG_TX|NETIF_F_HW_VLAN_CTAG_RX))
- gfar_vlan_mode(dev, features);
-
- if (!(changed & NETIF_F_RXCSUM))
+ if (!(changed & (NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
+ NETIF_F_RXCSUM)))
return 0;
- if (dev->flags & IFF_UP) {
- /* Halt TX and RX, and process the frames which
- * have already been received
- */
- local_irq_save(flags);
- lock_tx_qs(priv);
- lock_rx_qs(priv);
-
- gfar_halt(dev);
+ while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
+ cpu_relax();
- unlock_tx_qs(priv);
- unlock_rx_qs(priv);
- local_irq_restore(flags);
-
- for (i = 0; i < priv->num_rx_queues; i++)
- gfar_clean_rx_ring(priv->rx_queue[i],
- priv->rx_queue[i]->rx_ring_size);
+ dev->features = features;
+ if (dev->flags & IFF_UP) {
/* Now we take down the rings to rebuild them */
stop_gfar(dev);
-
- dev->features = features;
-
err = startup_gfar(dev);
- netif_tx_wake_all_queues(dev);
+ } else {
+ gfar_mac_reset(priv);
}
+
+ clear_bit_unlock(GFAR_RESETTING, &priv->state);
+
return err;
}
@@ -1610,9 +1577,6 @@ static int gfar_write_filer_table(struct gfar_private *priv,
if (tab->index > MAX_FILER_IDX - 1)
return -EBUSY;
- /* Avoid inconsistent filer table to be processed */
- lock_rx_qs(priv);
-
/* Fill regular entries */
for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].ctrl);
i++)
@@ -1625,8 +1589,6 @@ static int gfar_write_filer_table(struct gfar_private *priv,
*/
gfar_write_filer(priv, i, 0x20, 0x0);
- unlock_rx_qs(priv);
-
return 0;
}
@@ -1831,6 +1793,9 @@ static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
struct gfar_private *priv = netdev_priv(dev);
int ret = 0;
+ if (test_bit(GFAR_RESETTING, &priv->state))
+ return -EBUSY;
+
mutex_lock(&priv->rx_queue_access);
switch (cmd->cmd) {
diff --git a/drivers/net/ethernet/freescale/gianfar_sysfs.c b/drivers/net/ethernet/freescale/gianfar_sysfs.c
deleted file mode 100644
index e02dd13..0000000
--- a/drivers/net/ethernet/freescale/gianfar_sysfs.c
+++ /dev/null
@@ -1,340 +0,0 @@
-/*
- * drivers/net/ethernet/freescale/gianfar_sysfs.c
- *
- * Gianfar Ethernet Driver
- * This driver is designed for the non-CPM ethernet controllers
- * on the 85xx and 83xx family of integrated processors
- * Based on 8260_io/fcc_enet.c
- *
- * Author: Andy Fleming
- * Maintainer: Kumar Gala (galak@kernel.crashing.org)
- * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
- *
- * Copyright 2002-2009 Freescale Semiconductor, Inc.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * Sysfs file creation and management
- */
-
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/errno.h>
-#include <linux/unistd.h>
-#include <linux/delay.h>
-#include <linux/etherdevice.h>
-#include <linux/spinlock.h>
-#include <linux/mm.h>
-#include <linux/device.h>
-
-#include <asm/uaccess.h>
-#include <linux/module.h>
-
-#include "gianfar.h"
-
-static ssize_t gfar_show_bd_stash(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
-
- return sprintf(buf, "%s\n", priv->bd_stash_en ? "on" : "off");
-}
-
-static ssize_t gfar_set_bd_stash(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
- struct gfar __iomem *regs = priv->gfargrp[0].regs;
- int new_setting = 0;
- u32 temp;
- unsigned long flags;
-
- if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BD_STASHING))
- return count;
-
-
- /* Find out the new setting */
- if (!strncmp("on", buf, count - 1) || !strncmp("1", buf, count - 1))
- new_setting = 1;
- else if (!strncmp("off", buf, count - 1) ||
- !strncmp("0", buf, count - 1))
- new_setting = 0;
- else
- return count;
-
-
- local_irq_save(flags);
- lock_rx_qs(priv);
-
- /* Set the new stashing value */
- priv->bd_stash_en = new_setting;
-
- temp = gfar_read(&regs->attr);
-
- if (new_setting)
- temp |= ATTR_BDSTASH;
- else
- temp &= ~(ATTR_BDSTASH);
-
- gfar_write(&regs->attr, temp);
-
- unlock_rx_qs(priv);
- local_irq_restore(flags);
-
- return count;
-}
-
-static DEVICE_ATTR(bd_stash, 0644, gfar_show_bd_stash, gfar_set_bd_stash);
-
-static ssize_t gfar_show_rx_stash_size(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
-
- return sprintf(buf, "%d\n", priv->rx_stash_size);
-}
-
-static ssize_t gfar_set_rx_stash_size(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
- struct gfar __iomem *regs = priv->gfargrp[0].regs;
- unsigned int length = simple_strtoul(buf, NULL, 0);
- u32 temp;
- unsigned long flags;
-
- if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING))
- return count;
-
- local_irq_save(flags);
- lock_rx_qs(priv);
-
- if (length > priv->rx_buffer_size)
- goto out;
-
- if (length == priv->rx_stash_size)
- goto out;
-
- priv->rx_stash_size = length;
-
- temp = gfar_read(&regs->attreli);
- temp &= ~ATTRELI_EL_MASK;
- temp |= ATTRELI_EL(length);
- gfar_write(&regs->attreli, temp);
-
- /* Turn stashing on/off as appropriate */
- temp = gfar_read(&regs->attr);
-
- if (length)
- temp |= ATTR_BUFSTASH;
- else
- temp &= ~(ATTR_BUFSTASH);
-
- gfar_write(&regs->attr, temp);
-
-out:
- unlock_rx_qs(priv);
- local_irq_restore(flags);
-
- return count;
-}
-
-static DEVICE_ATTR(rx_stash_size, 0644, gfar_show_rx_stash_size,
- gfar_set_rx_stash_size);
-
-/* Stashing will only be enabled when rx_stash_size != 0 */
-static ssize_t gfar_show_rx_stash_index(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
-
- return sprintf(buf, "%d\n", priv->rx_stash_index);
-}
-
-static ssize_t gfar_set_rx_stash_index(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
- struct gfar __iomem *regs = priv->gfargrp[0].regs;
- unsigned short index = simple_strtoul(buf, NULL, 0);
- u32 temp;
- unsigned long flags;
-
- if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING))
- return count;
-
- local_irq_save(flags);
- lock_rx_qs(priv);
-
- if (index > priv->rx_stash_size)
- goto out;
-
- if (index == priv->rx_stash_index)
- goto out;
-
- priv->rx_stash_index = index;
-
- temp = gfar_read(&regs->attreli);
- temp &= ~ATTRELI_EI_MASK;
- temp |= ATTRELI_EI(index);
- gfar_write(&regs->attreli, temp);
-
-out:
- unlock_rx_qs(priv);
- local_irq_restore(flags);
-
- return count;
-}
-
-static DEVICE_ATTR(rx_stash_index, 0644, gfar_show_rx_stash_index,
- gfar_set_rx_stash_index);
-
-static ssize_t gfar_show_fifo_threshold(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
-
- return sprintf(buf, "%d\n", priv->fifo_threshold);
-}
-
-static ssize_t gfar_set_fifo_threshold(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
- struct gfar __iomem *regs = priv->gfargrp[0].regs;
- unsigned int length = simple_strtoul(buf, NULL, 0);
- u32 temp;
- unsigned long flags;
-
- if (length > GFAR_MAX_FIFO_THRESHOLD)
- return count;
-
- local_irq_save(flags);
- lock_tx_qs(priv);
-
- priv->fifo_threshold = length;
-
- temp = gfar_read(&regs->fifo_tx_thr);
- temp &= ~FIFO_TX_THR_MASK;
- temp |= length;
- gfar_write(&regs->fifo_tx_thr, temp);
-
- unlock_tx_qs(priv);
- local_irq_restore(flags);
-
- return count;
-}
-
-static DEVICE_ATTR(fifo_threshold, 0644, gfar_show_fifo_threshold,
- gfar_set_fifo_threshold);
-
-static ssize_t gfar_show_fifo_starve(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
-
- return sprintf(buf, "%d\n", priv->fifo_starve);
-}
-
-static ssize_t gfar_set_fifo_starve(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
- struct gfar __iomem *regs = priv->gfargrp[0].regs;
- unsigned int num = simple_strtoul(buf, NULL, 0);
- u32 temp;
- unsigned long flags;
-
- if (num > GFAR_MAX_FIFO_STARVE)
- return count;
-
- local_irq_save(flags);
- lock_tx_qs(priv);
-
- priv->fifo_starve = num;
-
- temp = gfar_read(&regs->fifo_tx_starve);
- temp &= ~FIFO_TX_STARVE_MASK;
- temp |= num;
- gfar_write(&regs->fifo_tx_starve, temp);
-
- unlock_tx_qs(priv);
- local_irq_restore(flags);
-
- return count;
-}
-
-static DEVICE_ATTR(fifo_starve, 0644, gfar_show_fifo_starve,
- gfar_set_fifo_starve);
-
-static ssize_t gfar_show_fifo_starve_off(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
-
- return sprintf(buf, "%d\n", priv->fifo_starve_off);
-}
-
-static ssize_t gfar_set_fifo_starve_off(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct gfar_private *priv = netdev_priv(to_net_dev(dev));
- struct gfar __iomem *regs = priv->gfargrp[0].regs;
- unsigned int num = simple_strtoul(buf, NULL, 0);
- u32 temp;
- unsigned long flags;
-
- if (num > GFAR_MAX_FIFO_STARVE_OFF)
- return count;
-
- local_irq_save(flags);
- lock_tx_qs(priv);
-
- priv->fifo_starve_off = num;
-
- temp = gfar_read(&regs->fifo_tx_starve_shutoff);
- temp &= ~FIFO_TX_STARVE_OFF_MASK;
- temp |= num;
- gfar_write(&regs->fifo_tx_starve_shutoff, temp);
-
- unlock_tx_qs(priv);
- local_irq_restore(flags);
-
- return count;
-}
-
-static DEVICE_ATTR(fifo_starve_off, 0644, gfar_show_fifo_starve_off,
- gfar_set_fifo_starve_off);
-
-void gfar_init_sysfs(struct net_device *dev)
-{
- struct gfar_private *priv = netdev_priv(dev);
- int rc;
-
- /* Initialize the default values */
- priv->fifo_threshold = DEFAULT_FIFO_TX_THR;
- priv->fifo_starve = DEFAULT_FIFO_TX_STARVE;
- priv->fifo_starve_off = DEFAULT_FIFO_TX_STARVE_OFF;
-
- /* Create our sysfs files */
- rc = device_create_file(&dev->dev, &dev_attr_bd_stash);
- rc |= device_create_file(&dev->dev, &dev_attr_rx_stash_size);
- rc |= device_create_file(&dev->dev, &dev_attr_rx_stash_index);
- rc |= device_create_file(&dev->dev, &dev_attr_fifo_threshold);
- rc |= device_create_file(&dev->dev, &dev_attr_fifo_starve);
- rc |= device_create_file(&dev->dev, &dev_attr_fifo_starve_off);
- if (rc)
- dev_err(&dev->dev, "Error creating gianfar sysfs files\n");
-}
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 6d91933..e6f8961d 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -2038,13 +2038,16 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
msix_entry),
GFP_KERNEL);
if (adapter->msix_entries) {
+ struct e1000_adapter *a = adapter;
+
for (i = 0; i < adapter->num_vectors; i++)
adapter->msix_entries[i].entry = i;
- err = pci_enable_msix(adapter->pdev,
- adapter->msix_entries,
- adapter->num_vectors);
- if (err == 0)
+ err = pci_enable_msix_range(a->pdev,
+ a->msix_entries,
+ a->num_vectors,
+ a->num_vectors);
+ if (err > 0)
return;
}
/* MSI-X failed, so fall through and try MSI */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
index a50e6b3..ed3902b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c
@@ -647,9 +647,8 @@ static u16 i40e_clean_asq(struct i40e_hw *hw)
desc_cb = *desc;
cb_func(hw, &desc_cb);
}
- memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
- memset((void *)details, 0,
- sizeof(struct i40e_asq_cmd_details));
+ memset(desc, 0, sizeof(*desc));
+ memset(details, 0, sizeof(*details));
ntc++;
if (ntc == asq->count)
ntc = 0;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index b901371..53f3ed2 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -38,7 +38,7 @@ static const char i40e_driver_string[] =
#define DRV_VERSION_MAJOR 0
#define DRV_VERSION_MINOR 3
-#define DRV_VERSION_BUILD 30
+#define DRV_VERSION_BUILD 32
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN
@@ -305,6 +305,7 @@ static void i40e_tx_timeout(struct net_device *netdev)
break;
default:
netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
+ set_bit(__I40E_DOWN, &vsi->state);
i40e_down(vsi);
break;
}
@@ -3107,13 +3108,13 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
pf_q = vsi->base_queue;
for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
- j = 1000;
- do {
- usleep_range(1000, 2000);
+ for (j = 0; j < 50; j++) {
tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
- } while (j-- && ((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT)
- ^ (tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT)) & 1);
-
+ if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
+ ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
+ break;
+ usleep_range(1000, 2000);
+ }
/* Skip if the queue is already in the requested state */
if (enable && (tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
continue;
@@ -3123,8 +3124,7 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
/* turn on/off the queue */
if (enable) {
wr32(hw, I40E_QTX_HEAD(pf_q), 0);
- tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK |
- I40E_QTX_ENA_QENA_STAT_MASK;
+ tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
} else {
tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
}
@@ -3171,12 +3171,13 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
pf_q = vsi->base_queue;
for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
- j = 1000;
- do {
- usleep_range(1000, 2000);
+ for (j = 0; j < 50; j++) {
rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
- } while (j-- && ((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT)
- ^ (rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT)) & 1);
+ if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
+ ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
+ break;
+ usleep_range(1000, 2000);
+ }
if (enable) {
/* is STAT set ? */
@@ -3190,11 +3191,9 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
/* turn on/off the queue */
if (enable)
- rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK |
- I40E_QRX_ENA_QENA_STAT_MASK;
+ rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
else
- rx_reg &= ~(I40E_QRX_ENA_QENA_REQ_MASK |
- I40E_QRX_ENA_QENA_STAT_MASK);
+ rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
/* wait for the change to finish */
@@ -5331,6 +5330,11 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
/* restart the VSIs that were rebuilt and running before the reset */
i40e_pf_unquiesce_all_vsi(pf);
+ if (pf->num_alloc_vfs) {
+ for (v = 0; v < pf->num_alloc_vfs; v++)
+ i40e_reset_vf(&pf->vf[v], true);
+ }
+
/* tell the firmware that we're starting */
dv.major_version = DRV_VERSION_MAJOR;
dv.minor_version = DRV_VERSION_MINOR;
@@ -5850,37 +5854,16 @@ err_out:
**/
static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
{
- int err = 0;
-
- pf->num_msix_entries = 0;
- while (vectors >= I40E_MIN_MSIX) {
- err = pci_enable_msix(pf->pdev, pf->msix_entries, vectors);
- if (err == 0) {
- /* good to go */
- pf->num_msix_entries = vectors;
- break;
- } else if (err < 0) {
- /* total failure */
- dev_info(&pf->pdev->dev,
- "MSI-X vector reservation failed: %d\n", err);
- vectors = 0;
- break;
- } else {
- /* err > 0 is the hint for retry */
- dev_info(&pf->pdev->dev,
- "MSI-X vectors wanted %d, retrying with %d\n",
- vectors, err);
- vectors = err;
- }
- }
-
- if (vectors > 0 && vectors < I40E_MIN_MSIX) {
+ vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
+ I40E_MIN_MSIX, vectors);
+ if (vectors < 0) {
dev_info(&pf->pdev->dev,
- "Couldn't get enough vectors, only %d available\n",
- vectors);
+ "MSI-X vector reservation failed: %d\n", vectors);
vectors = 0;
}
+ pf->num_msix_entries = vectors;
+
return vectors;
}
@@ -5942,7 +5925,7 @@ static int i40e_init_msix(struct i40e_pf *pf)
} else if (vec == I40E_MIN_MSIX) {
/* Adjust for minimal MSIX use */
- dev_info(&pf->pdev->dev, "Features disabled, not enough MSIX vectors\n");
+ dev_info(&pf->pdev->dev, "Features disabled, not enough MSI-X vectors\n");
pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
pf->num_vmdq_vsis = 0;
pf->num_vmdq_qps = 0;
@@ -6071,7 +6054,7 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
(pf->flags & I40E_FLAG_MSI_ENABLED)) {
- dev_info(&pf->pdev->dev, "MSIX not available, trying MSI\n");
+ dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
err = pci_enable_msi(pf->pdev);
if (err) {
dev_info(&pf->pdev->dev, "MSI init failed - %d\n", err);
@@ -6080,7 +6063,7 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
}
if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
- dev_info(&pf->pdev->dev, "MSIX and MSI not available, falling back to Legacy IRQ\n");
+ dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
/* track first vector for misc interrupts */
err = i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT-1);
@@ -6107,7 +6090,8 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)
i40e_intr, 0, pf->misc_int_name, pf);
if (err) {
dev_info(&pf->pdev->dev,
- "request_irq for msix_misc failed: %d\n", err);
+ "request_irq for %s failed: %d\n",
+ pf->misc_int_name, err);
return -EFAULT;
}
}
@@ -8070,6 +8054,16 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
i40e_flush(hw);
+
+ if (pci_num_vf(pdev)) {
+ dev_info(&pdev->dev,
+ "Active VFs found, allocating resources.\n");
+ err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
+ if (err)
+ dev_info(&pdev->dev,
+ "Error %d allocating resources for existing VFs\n",
+ err);
+ }
}
pfs_found++;
@@ -8165,16 +8159,16 @@ static void i40e_remove(struct pci_dev *pdev)
i40e_ptp_stop(pf);
- if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
- i40e_free_vfs(pf);
- pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
- }
-
/* no more scheduling of any task */
set_bit(__I40E_DOWN, &pf->state);
del_timer_sync(&pf->service_timer);
cancel_work_sync(&pf->service_task);
+ if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
+ i40e_free_vfs(pf);
+ pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
+ }
+
i40e_fdir_teardown(pf);
/* If there is a switch structure or any orphans, remove them.
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index d4bb482..19af4ce 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -892,7 +892,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
return;
- /* likely incorrect csum if alternate IP extention headers found */
+ /* likely incorrect csum if alternate IP extension headers found */
if (rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
return;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index b9d1c1c..189e250 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -408,18 +408,10 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
"Could not allocate VF broadcast filter\n");
}
- if (!f) {
- dev_err(&pf->pdev->dev, "Unable to add ucast filter\n");
- ret = -ENOMEM;
- goto error_alloc_vsi_res;
- }
-
/* program mac filter */
ret = i40e_sync_vsi_filters(vsi);
- if (ret) {
+ if (ret)
dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
- goto error_alloc_vsi_res;
- }
error_alloc_vsi_res:
return ret;
@@ -679,9 +671,9 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
complete_reset:
/* reallocate vf resources to reset the VSI state */
i40e_free_vf_res(vf);
- mdelay(10);
i40e_alloc_vf_res(vf);
i40e_enable_vf_mappings(vf);
+ set_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states);
/* tell the VF the reset is done */
wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_id), I40E_VFR_VFACTIVE);
@@ -847,7 +839,7 @@ void i40e_free_vfs(struct i40e_pf *pf)
*
* allocate vf resources
**/
-static int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
+int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
{
struct i40e_vf *vfs;
int i, ret = 0;
@@ -855,14 +847,16 @@ static int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
/* Disable interrupt 0 so we don't try to handle the VFLR. */
i40e_irq_dynamic_disable_icr0(pf);
- ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
- if (ret) {
- dev_err(&pf->pdev->dev,
- "pci_enable_sriov failed with error %d!\n", ret);
- pf->num_alloc_vfs = 0;
- goto err_iov;
+ /* Check to see if we're just allocating resources for extant VFs */
+ if (pci_num_vf(pf->pdev) != num_alloc_vfs) {
+ ret = pci_enable_sriov(pf->pdev, num_alloc_vfs);
+ if (ret) {
+ dev_err(&pf->pdev->dev,
+ "Failed to enable SR-IOV, error %d.\n", ret);
+ pf->num_alloc_vfs = 0;
+ goto err_iov;
+ }
}
-
/* allocate memory */
vfs = kzalloc(num_alloc_vfs * sizeof(struct i40e_vf), GFP_KERNEL);
if (!vfs) {
@@ -1873,7 +1867,8 @@ int i40e_vc_process_vflr_event(struct i40e_pf *pf)
/* clear the bit in GLGEN_VFLRSTAT */
wr32(hw, I40E_GLGEN_VFLRSTAT(reg_idx), (1 << bit_idx));
- i40e_reset_vf(vf, true);
+ if (!test_bit(__I40E_DOWN, &pf->state))
+ i40e_reset_vf(vf, true);
}
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
index cc1feee..bedf0ba 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h
@@ -102,6 +102,7 @@ struct i40e_vf {
void i40e_free_vfs(struct i40e_pf *pf);
int i40e_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
+int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs);
int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
u32 v_retval, u8 *msg, u16 msglen);
int i40e_vc_process_vflr_event(struct i40e_pf *pf);
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
index f7cea1b..97662b6 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_adminq_cmd.h
@@ -1229,7 +1229,7 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
#define I40E_AQC_ADD_CLOUD_TNL_TYPE_NGE 2
#define I40E_AQC_ADD_CLOUD_TNL_TYPE_IP 3
- __le32 tenant_id ;
+ __le32 tenant_id;
u8 reserved[4];
__le16 queue_number;
#define I40E_AQC_ADD_CLOUD_QUEUE_SHIFT 0
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
index ffdb01d..827bb5f 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40evf/i40e_txrx.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -722,7 +722,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
rx_status & (1 << I40E_RX_DESC_STATUS_L3L4P_SHIFT)))
return;
- /* likely incorrect csum if alternate IP extention headers found */
+ /* likely incorrect csum if alternate IP extension headers found */
if (rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
return;
@@ -807,8 +807,8 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
rx_desc = I40E_RX_DESC(rx_ring, i);
qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
- rx_status = (qword & I40E_RXD_QW1_STATUS_MASK)
- >> I40E_RXD_QW1_STATUS_SHIFT;
+ rx_status = (qword & I40E_RXD_QW1_STATUS_MASK) >>
+ I40E_RXD_QW1_STATUS_SHIFT;
while (rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT)) {
union i40e_rx_desc *next_rxd;
diff --git a/drivers/net/ethernet/intel/i40evf/i40e_type.h b/drivers/net/ethernet/intel/i40evf/i40e_type.h
index 3bffac0..092aace 100644
--- a/drivers/net/ethernet/intel/i40evf/i40e_type.h
+++ b/drivers/net/ethernet/intel/i40evf/i40e_type.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -64,8 +64,6 @@
struct i40e_hw;
typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
-#define ETH_ALEN 6
-
/* Data type manipulation macros. */
#define I40E_DESC_UNUSED(R) \
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf.h b/drivers/net/ethernet/intel/i40evf/i40evf.h
index ff6529b..ccb43d3 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf.h
+++ b/drivers/net/ethernet/intel/i40evf/i40evf.h
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -38,8 +38,6 @@
#include <linux/ipv6.h>
#include <net/ip6_checksum.h>
#include <net/udp.h>
-#include <linux/sctp.h>
-
#include "i40e_type.h"
#include "i40e_virtchnl.h"
@@ -164,15 +162,14 @@ struct i40evf_vlan_filter {
/* Driver state. The order of these is important! */
enum i40evf_state_t {
__I40EVF_STARTUP, /* driver loaded, probe complete */
- __I40EVF_FAILED, /* PF communication failed. Fatal. */
__I40EVF_REMOVE, /* driver is being unloaded */
__I40EVF_INIT_VERSION_CHECK, /* aq msg sent, awaiting reply */
__I40EVF_INIT_GET_RESOURCES, /* aq msg sent, awaiting reply */
__I40EVF_INIT_SW, /* got resources, setting up structs */
+ __I40EVF_RESETTING, /* in reset */
/* Below here, watchdog is running */
__I40EVF_DOWN, /* ready, can be opened */
__I40EVF_TESTING, /* in ethtool self-test */
- __I40EVF_RESETTING, /* in reset */
__I40EVF_RUNNING, /* opened, working */
};
@@ -185,47 +182,25 @@ enum i40evf_critical_section_t {
/* board specific private data structure */
struct i40evf_adapter {
struct timer_list watchdog_timer;
- struct vlan_group *vlgrp;
struct work_struct reset_task;
struct work_struct adminq_task;
struct delayed_work init_task;
struct i40e_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
struct list_head vlan_filter_list;
- char name[MAX_MSIX_COUNT][IFNAMSIZ + 9];
-
- /* Interrupt Throttle Rate */
- u32 itr_setting;
- u16 eitr_low;
- u16 eitr_high;
+ char misc_vector_name[IFNAMSIZ + 9];
/* TX */
struct i40e_ring *tx_rings[I40E_MAX_VSI_QP];
- u64 restart_queue;
- u64 hw_csum_tx_good;
- u64 lsc_int;
- u64 hw_tso_ctxt;
- u64 hw_tso6_ctxt;
u32 tx_timeout_count;
struct list_head mac_filter_list;
-#ifdef DEBUG
- bool detect_tx_hung;
-#endif /* DEBUG */
/* RX */
struct i40e_ring *rx_rings[I40E_MAX_VSI_QP];
- int txd_count;
- int rxd_count;
u64 hw_csum_rx_error;
- u64 hw_rx_no_dma_resources;
- u64 hw_csum_rx_good;
- u64 non_eop_descs;
int num_msix_vectors;
struct msix_entry *msix_entries;
- u64 rx_hdr_split;
-
- u32 init_state;
- volatile unsigned long flags;
+ u32 flags;
#define I40EVF_FLAG_RX_CSUM_ENABLED (u32)(1)
#define I40EVF_FLAG_RX_1BUF_CAPABLE (u32)(1 << 1)
#define I40EVF_FLAG_RX_PS_CAPABLE (u32)(1 << 2)
@@ -234,6 +209,8 @@ struct i40evf_adapter {
#define I40EVF_FLAG_IMIR_ENABLED (u32)(1 << 5)
#define I40EVF_FLAG_MQ_CAPABLE (u32)(1 << 6)
#define I40EVF_FLAG_NEED_LINK_UPDATE (u32)(1 << 7)
+#define I40EVF_FLAG_PF_COMMS_FAILED (u32)(1 << 8)
+#define I40EVF_FLAG_RESET_PENDING (u32)(1 << 9)
/* duplcates for common code */
#define I40E_FLAG_FDIR_ATR_ENABLED 0
#define I40E_FLAG_DCB_ENABLED 0
@@ -251,21 +228,19 @@ struct i40evf_adapter {
#define I40EVF_FLAG_AQ_CONFIGURE_QUEUES (u32)(1 << 6)
#define I40EVF_FLAG_AQ_MAP_VECTORS (u32)(1 << 7)
#define I40EVF_FLAG_AQ_HANDLE_RESET (u32)(1 << 8)
+
/* OS defined structs */
struct net_device *netdev;
struct pci_dev *pdev;
struct net_device_stats net_stats;
- /* structs defined in i40e_vf.h */
- struct i40e_hw hw;
+ struct i40e_hw hw; /* defined in i40e_type.h */
enum i40evf_state_t state;
volatile unsigned long crit_section;
- u64 tx_busy;
struct work_struct watchdog_task;
bool netdev_registered;
- bool dev_closed;
bool link_up;
enum i40e_virtchnl_ops current_op;
struct i40e_virtchnl_vf_resource *vf_res; /* incl. all VSIs */
@@ -276,11 +251,6 @@ struct i40evf_adapter {
u32 aq_wait_count;
};
-struct i40evf_info {
- enum i40e_mac_type mac;
- unsigned int flags;
-};
-
/* needed by i40evf_ethtool.c */
extern char i40evf_driver_name[];
@@ -315,6 +285,7 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter);
void i40evf_del_vlans(struct i40evf_adapter *adapter);
void i40evf_set_promiscuous(struct i40evf_adapter *adapter, int flags);
void i40evf_request_stats(struct i40evf_adapter *adapter);
+void i40evf_request_reset(struct i40evf_adapter *adapter);
void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
enum i40e_virtchnl_ops v_opcode,
i40e_status v_retval, u8 *msg, u16 msglen);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
index b0b1f4b..8b0db1c 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -241,6 +241,7 @@ static int i40evf_set_ringparam(struct net_device *netdev,
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
u32 new_rx_count, new_tx_count;
+ int i;
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL;
@@ -256,12 +257,14 @@ static int i40evf_set_ringparam(struct net_device *netdev,
new_rx_count = ALIGN(new_rx_count, I40EVF_REQ_DESCRIPTOR_MULTIPLE);
/* if nothing to do return success */
- if ((new_tx_count == adapter->txd_count) &&
- (new_rx_count == adapter->rxd_count))
+ if ((new_tx_count == adapter->tx_rings[0]->count) &&
+ (new_rx_count == adapter->rx_rings[0]->count))
return 0;
- adapter->txd_count = new_tx_count;
- adapter->rxd_count = new_rx_count;
+ for (i = 0; i < adapter->vsi_res->num_queue_pairs; i++) {
+ adapter->tx_rings[0]->count = new_tx_count;
+ adapter->rx_rings[0]->count = new_rx_count;
+ }
if (netif_running(netdev))
i40evf_reinit_locked(adapter);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_main.c b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
index f5caf44..b2c03bc 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_main.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -31,10 +31,10 @@ char i40evf_driver_name[] = "i40evf";
static const char i40evf_driver_string[] =
"Intel(R) XL710 X710 Virtual Function Network Driver";
-#define DRV_VERSION "0.9.11"
+#define DRV_VERSION "0.9.13"
const char i40evf_driver_version[] = DRV_VERSION;
static const char i40evf_copyright[] =
- "Copyright (c) 2013 Intel Corporation.";
+ "Copyright (c) 2013 - 2014 Intel Corporation.";
/* i40evf_pci_tbl - PCI Device ID Table
*
@@ -167,9 +167,13 @@ static void i40evf_tx_timeout(struct net_device *netdev)
struct i40evf_adapter *adapter = netdev_priv(netdev);
adapter->tx_timeout_count++;
-
- /* Do the reset outside of interrupt context */
- schedule_work(&adapter->reset_task);
+ dev_info(&adapter->pdev->dev, "TX timeout detected.\n");
+ if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) {
+ dev_info(&adapter->pdev->dev, "Requesting reset from PF\n");
+ i40evf_request_reset(adapter);
+ adapter->flags |= I40EVF_FLAG_RESET_PENDING;
+ schedule_work(&adapter->reset_task);
+ }
}
/**
@@ -211,6 +215,9 @@ static void i40evf_irq_disable(struct i40evf_adapter *adapter)
int i;
struct i40e_hw *hw = &adapter->hw;
+ if (!adapter->msix_entries)
+ return;
+
for (i = 1; i < adapter->num_msix_vectors; i++) {
wr32(hw, I40E_VFINT_DYN_CTLN1(i - 1), 0);
synchronize_irq(adapter->msix_entries[i].vector);
@@ -511,12 +518,14 @@ static int i40evf_request_misc_irq(struct i40evf_adapter *adapter)
struct net_device *netdev = adapter->netdev;
int err;
- sprintf(adapter->name[0], "i40evf:mbx");
+ sprintf(adapter->misc_vector_name, "i40evf:mbx");
err = request_irq(adapter->msix_entries[0].vector,
- &i40evf_msix_aq, 0, adapter->name[0], netdev);
+ &i40evf_msix_aq, 0,
+ adapter->misc_vector_name, netdev);
if (err) {
dev_err(&adapter->pdev->dev,
- "request_irq for msix_aq failed: %d\n", err);
+ "request_irq for %s failed: %d\n",
+ adapter->misc_vector_name, err);
free_irq(adapter->msix_entries[0].vector, netdev);
}
return err;
@@ -963,16 +972,23 @@ void i40evf_down(struct i40evf_adapter *adapter)
struct net_device *netdev = adapter->netdev;
struct i40evf_mac_filter *f;
- /* remove all MAC filters from the VSI */
+ /* remove all MAC filters */
list_for_each_entry(f, &adapter->mac_filter_list, list) {
f->remove = true;
}
- adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
- /* disable receives */
- adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
- mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
- msleep(20);
-
+ /* remove all VLAN filters */
+ list_for_each_entry(f, &adapter->vlan_filter_list, list) {
+ f->remove = true;
+ }
+ if (!(adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) &&
+ adapter->state != __I40EVF_RESETTING) {
+ adapter->aq_required |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+ adapter->aq_required |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
+ /* disable receives */
+ adapter->aq_required |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
+ mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
+ msleep(20);
+ }
netif_tx_disable(netdev);
netif_tx_stop_all_queues(netdev);
@@ -1291,19 +1307,47 @@ static void i40evf_watchdog_task(struct work_struct *work)
watchdog_task);
struct i40e_hw *hw = &adapter->hw;
- if (adapter->state < __I40EVF_DOWN)
+ if (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section))
+ goto restart_watchdog;
+
+ if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
+ dev_info(&adapter->pdev->dev, "Checking for redemption\n");
+ if ((rd32(hw, I40E_VFGEN_RSTAT) & 0x3) == I40E_VFR_VFACTIVE) {
+ /* A chance for redemption! */
+ dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n");
+ adapter->state = __I40EVF_STARTUP;
+ adapter->flags &= ~I40EVF_FLAG_PF_COMMS_FAILED;
+ schedule_delayed_work(&adapter->init_task, 10);
+ clear_bit(__I40EVF_IN_CRITICAL_TASK,
+ &adapter->crit_section);
+ /* Don't reschedule the watchdog, since we've restarted
+ * the init task. When init_task contacts the PF and
+ * gets everything set up again, it'll restart the
+ * watchdog for us. Down, boy. Sit. Stay. Woof.
+ */
+ return;
+ }
+ adapter->aq_pending = 0;
+ adapter->aq_required = 0;
+ adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
goto watchdog_done;
+ }
- if (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section))
+ if ((adapter->state < __I40EVF_DOWN) ||
+ (adapter->flags & I40EVF_FLAG_RESET_PENDING))
goto watchdog_done;
- /* check for unannounced reset */
- if ((adapter->state != __I40EVF_RESETTING) &&
+ /* check for reset */
+ if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING) &&
(rd32(hw, I40E_VFGEN_RSTAT) & 0x3) != I40E_VFR_VFACTIVE) {
adapter->state = __I40EVF_RESETTING;
+ adapter->flags |= I40EVF_FLAG_RESET_PENDING;
+ dev_err(&adapter->pdev->dev, "Hardware reset detected.\n");
+ dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
schedule_work(&adapter->reset_task);
- dev_info(&adapter->pdev->dev, "%s: hardware reset detected\n",
- __func__);
+ adapter->aq_pending = 0;
+ adapter->aq_required = 0;
+ adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
goto watchdog_done;
}
@@ -1358,13 +1402,15 @@ static void i40evf_watchdog_task(struct work_struct *work)
i40evf_irq_enable(adapter, true);
i40evf_fire_sw_int(adapter, 0xFF);
+
watchdog_done:
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+restart_watchdog:
if (adapter->aq_required)
mod_timer(&adapter->watchdog_timer,
jiffies + msecs_to_jiffies(20));
else
mod_timer(&adapter->watchdog_timer, jiffies + (HZ * 2));
- clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
schedule_work(&adapter->adminq_task);
}
@@ -1411,6 +1457,8 @@ static void i40evf_configure_rss(struct i40evf_adapter *adapter)
i40e_flush(hw);
}
+#define I40EVF_RESET_WAIT_MS 100
+#define I40EVF_RESET_WAIT_COUNT 200
/**
* i40evf_reset_task - Call-back task to handle hardware reset
* @work: pointer to work_struct
@@ -1421,8 +1469,9 @@ static void i40evf_configure_rss(struct i40evf_adapter *adapter)
**/
static void i40evf_reset_task(struct work_struct *work)
{
- struct i40evf_adapter *adapter =
- container_of(work, struct i40evf_adapter, reset_task);
+ struct i40evf_adapter *adapter = container_of(work,
+ struct i40evf_adapter,
+ reset_task);
struct i40e_hw *hw = &adapter->hw;
int i = 0, err;
uint32_t rstat_val;
@@ -1430,22 +1479,56 @@ static void i40evf_reset_task(struct work_struct *work)
while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
&adapter->crit_section))
udelay(500);
+ /* poll until we see the reset actually happen */
+ for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
+ rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
+ I40E_VFGEN_RSTAT_VFR_STATE_MASK;
+ if (rstat_val != I40E_VFR_VFACTIVE) {
+ dev_info(&adapter->pdev->dev, "Reset now occurring\n");
+ break;
+ } else {
+ msleep(I40EVF_RESET_WAIT_MS);
+ }
+ }
+ if (i == I40EVF_RESET_WAIT_COUNT) {
+ dev_err(&adapter->pdev->dev, "Reset was not detected\n");
+ adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
+ goto continue_reset; /* act like the reset happened */
+ }
- /* wait until the reset is complete */
- for (i = 0; i < 20; i++) {
+ /* wait until the reset is complete and the PF is responding to us */
+ for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
I40E_VFGEN_RSTAT_VFR_STATE_MASK;
- if (rstat_val == I40E_VFR_COMPLETED)
+ if (rstat_val == I40E_VFR_VFACTIVE) {
+ dev_info(&adapter->pdev->dev, "Reset is complete. Reinitializing.\n");
break;
- else
- mdelay(100);
+ } else {
+ msleep(I40EVF_RESET_WAIT_MS);
+ }
}
- if (i == 20) {
+ if (i == I40EVF_RESET_WAIT_COUNT) {
/* reset never finished */
- dev_info(&adapter->pdev->dev, "%s: reset never finished: %x\n",
- __func__, rstat_val);
- /* carry on anyway */
+ dev_err(&adapter->pdev->dev, "Reset never finished (%x). PF driver is dead, and so am I.\n",
+ rstat_val);
+ adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
+
+ if (netif_running(adapter->netdev))
+ i40evf_close(adapter->netdev);
+
+ i40evf_free_misc_irq(adapter);
+ i40evf_reset_interrupt_capability(adapter);
+ i40evf_free_queues(adapter);
+ kfree(adapter->vf_res);
+ i40evf_shutdown_adminq(hw);
+ adapter->netdev->flags &= ~IFF_UP;
+ clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
+ return; /* Do not attempt to reinit. It's dead, Jim. */
}
+
+continue_reset:
+ adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
+
i40evf_down(adapter);
adapter->state = __I40EVF_RESETTING;
@@ -1505,6 +1588,9 @@ static void i40evf_adminq_task(struct work_struct *work)
i40e_status ret;
u16 pending;
+ if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED)
+ return;
+
event.msg_size = I40EVF_MAX_AQ_BUF_SIZE;
event.msg_buf = kzalloc(event.msg_size, GFP_KERNEL);
if (!event.msg_buf) {
@@ -1636,6 +1722,10 @@ static int i40evf_open(struct net_device *netdev)
struct i40evf_adapter *adapter = netdev_priv(netdev);
int err;
+ if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
+ dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n");
+ return -EIO;
+ }
if (adapter->state != __I40EVF_DOWN)
return -EBUSY;
@@ -1690,8 +1780,12 @@ static int i40evf_close(struct net_device *netdev)
{
struct i40evf_adapter *adapter = netdev_priv(netdev);
+ if (adapter->state <= __I40EVF_DOWN)
+ return 0;
+
/* signal that we are down to the interrupt handler */
adapter->state = __I40EVF_DOWN;
+
set_bit(__I40E_DOWN, &adapter->vsi.state);
i40evf_down(adapter);
@@ -1842,16 +1936,18 @@ static void i40evf_init_task(struct work_struct *work)
switch (adapter->state) {
case __I40EVF_STARTUP:
/* driver loaded, probe complete */
+ adapter->flags &= ~I40EVF_FLAG_PF_COMMS_FAILED;
+ adapter->flags &= ~I40EVF_FLAG_RESET_PENDING;
err = i40e_set_mac_type(hw);
if (err) {
- dev_info(&pdev->dev, "%s: set_mac_type failed: %d\n",
- __func__, err);
+ dev_err(&pdev->dev, "Failed to set MAC type (%d)\n",
+ err);
goto err;
}
err = i40evf_check_reset_complete(hw);
if (err) {
- dev_info(&pdev->dev, "%s: device is still in reset (%d).\n",
- __func__, err);
+ dev_err(&pdev->dev, "Device is still in reset (%d)\n",
+ err);
goto err;
}
hw->aq.num_arq_entries = I40EVF_AQ_LEN;
@@ -1861,14 +1957,14 @@ static void i40evf_init_task(struct work_struct *work)
err = i40evf_init_adminq(hw);
if (err) {
- dev_info(&pdev->dev, "%s: init_adminq failed: %d\n",
- __func__, err);
+ dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n",
+ err);
goto err;
}
err = i40evf_send_api_ver(adapter);
if (err) {
- dev_info(&pdev->dev, "%s: unable to send to PF (%d)\n",
- __func__, err);
+ dev_err(&pdev->dev, "Unable to send to PF (%d)\n",
+ err);
i40evf_shutdown_adminq(hw);
goto err;
}
@@ -1882,13 +1978,13 @@ static void i40evf_init_task(struct work_struct *work)
/* aq msg sent, awaiting reply */
err = i40evf_verify_api_ver(adapter);
if (err) {
- dev_err(&pdev->dev, "Unable to verify API version, error %d\n",
+ dev_err(&pdev->dev, "Unable to verify API version (%d)\n",
err);
goto err;
}
err = i40evf_send_vf_config_msg(adapter);
if (err) {
- dev_err(&pdev->dev, "Unable send config request, error %d\n",
+ dev_err(&pdev->dev, "Unable send config request (%d)\n",
err);
goto err;
}
@@ -1902,18 +1998,15 @@ static void i40evf_init_task(struct work_struct *work)
(I40E_MAX_VF_VSI *
sizeof(struct i40e_virtchnl_vsi_resource));
adapter->vf_res = kzalloc(bufsz, GFP_KERNEL);
- if (!adapter->vf_res) {
- dev_err(&pdev->dev, "%s: unable to allocate memory\n",
- __func__);
+ if (!adapter->vf_res)
goto err;
- }
}
err = i40evf_get_vf_config(adapter);
if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
goto restart;
if (err) {
- dev_info(&pdev->dev, "%s: unable to get VF config (%d)\n",
- __func__, err);
+ dev_err(&pdev->dev, "Unable to get VF config (%d)\n",
+ err);
goto err_alloc;
}
adapter->state = __I40EVF_INIT_SW;
@@ -1927,20 +2020,17 @@ static void i40evf_init_task(struct work_struct *work)
adapter->vsi_res = &adapter->vf_res->vsi_res[i];
}
if (!adapter->vsi_res) {
- dev_info(&pdev->dev, "%s: no LAN VSI found\n", __func__);
+ dev_err(&pdev->dev, "No LAN VSI found\n");
goto err_alloc;
}
adapter->flags |= I40EVF_FLAG_RX_CSUM_ENABLED;
- adapter->txd_count = I40EVF_DEFAULT_TXD;
- adapter->rxd_count = I40EVF_DEFAULT_RXD;
-
netdev->netdev_ops = &i40evf_netdev_ops;
i40evf_set_ethtool_ops(netdev);
netdev->watchdog_timeo = 5 * HZ;
-
- netdev->features |= NETIF_F_SG |
+ netdev->features |= NETIF_F_HIGHDMA |
+ NETIF_F_SG |
NETIF_F_IP_CSUM |
NETIF_F_SCTP_CSUM |
NETIF_F_IPV6_CSUM |
@@ -1956,11 +2046,9 @@ static void i40evf_init_task(struct work_struct *work)
NETIF_F_HW_VLAN_CTAG_FILTER;
}
- /* The HW MAC address was set and/or determined in sw_init */
if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
- dev_info(&pdev->dev,
- "Invalid MAC address %pMAC, using random\n",
- adapter->hw.mac.addr);
+ dev_info(&pdev->dev, "Invalid MAC address %pMAC, using random\n",
+ adapter->hw.mac.addr);
random_ether_addr(adapter->hw.mac.addr);
}
memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
@@ -1994,8 +2082,6 @@ static void i40evf_init_task(struct work_struct *work)
netif_carrier_off(netdev);
- strcpy(netdev->name, "eth%d");
-
adapter->vsi.id = adapter->vsi_res->vsi_id;
adapter->vsi.seid = adapter->vsi_res->vsi_id; /* dummy */
adapter->vsi.back = adapter;
@@ -2005,9 +2091,11 @@ static void i40evf_init_task(struct work_struct *work)
adapter->vsi.tx_itr_setting = I40E_ITR_DYNAMIC;
adapter->vsi.netdev = adapter->netdev;
- err = register_netdev(netdev);
- if (err)
- goto err_register;
+ if (!adapter->netdev_registered) {
+ err = register_netdev(netdev);
+ if (err)
+ goto err_register;
+ }
adapter->netdev_registered = true;
@@ -2031,17 +2119,16 @@ err_register:
i40evf_free_misc_irq(adapter);
err_sw_init:
i40evf_reset_interrupt_capability(adapter);
- adapter->state = __I40EVF_FAILED;
err_alloc:
kfree(adapter->vf_res);
adapter->vf_res = NULL;
err:
+ if (hw->aq.asq.count)
+ i40evf_shutdown_adminq(hw); /* ignore error */
/* Things went into the weeds, so try again later */
if (++adapter->aq_wait_count > I40EVF_AQ_MAX_ERR) {
dev_err(&pdev->dev, "Failed to communicate with PF; giving up.\n");
- if (hw->aq.asq.count)
- i40evf_shutdown_adminq(hw); /* ignore error */
- adapter->state = __I40EVF_FAILED;
+ adapter->flags |= I40EVF_FLAG_PF_COMMS_FAILED;
return; /* do not reschedule */
}
schedule_delayed_work(&adapter->init_task, HZ * 3);
@@ -2084,20 +2171,18 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct net_device *netdev;
struct i40evf_adapter *adapter = NULL;
struct i40e_hw *hw = NULL;
- int err, pci_using_dac;
+ int err;
err = pci_enable_device(pdev);
if (err)
return err;
if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
- pci_using_dac = true;
/* coherent mask for the same size will always succeed if
* dma_set_mask does
*/
dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
} else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
- pci_using_dac = false;
dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
} else {
dev_err(&pdev->dev, "%s: DMA configuration failed: %d\n",
@@ -2128,8 +2213,6 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, netdev);
adapter = netdev_priv(netdev);
- if (pci_using_dac)
- netdev->features |= NETIF_F_HIGHDMA;
adapter->netdev = netdev;
adapter->pdev = pdev;
@@ -2271,6 +2354,7 @@ static void i40evf_remove(struct pci_dev *pdev)
struct i40e_hw *hw = &adapter->hw;
cancel_delayed_work_sync(&adapter->init_task);
+ cancel_work_sync(&adapter->reset_task);
if (adapter->netdev_registered) {
unregister_netdev(netdev);
@@ -2278,17 +2362,15 @@ static void i40evf_remove(struct pci_dev *pdev)
}
adapter->state = __I40EVF_REMOVE;
- if (adapter->num_msix_vectors) {
+ if (adapter->msix_entries) {
i40evf_misc_irq_disable(adapter);
- del_timer_sync(&adapter->watchdog_timer);
-
- flush_scheduled_work();
-
i40evf_free_misc_irq(adapter);
-
i40evf_reset_interrupt_capability(adapter);
}
+ del_timer_sync(&adapter->watchdog_timer);
+ flush_scheduled_work();
+
if (hw->aq.asq.count)
i40evf_shutdown_adminq(hw);
diff --git a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
index e6978d7..e294f01 100644
--- a/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
+++ b/drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
@@ -1,7 +1,7 @@
/*******************************************************************************
*
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
- * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2013 - 2014 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -43,6 +43,9 @@ static int i40evf_send_pf_msg(struct i40evf_adapter *adapter,
struct i40e_hw *hw = &adapter->hw;
i40e_status err;
+ if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED)
+ return 0; /* nothing to see here, move along */
+
err = i40e_aq_send_msg_to_pf(hw, op, 0, msg, len, NULL);
if (err)
dev_err(&adapter->pdev->dev, "Unable to send opcode %d to PF, error %d, aq status %d\n",
@@ -651,6 +654,18 @@ void i40evf_request_stats(struct i40evf_adapter *adapter)
/* if the request failed, don't lock out others */
adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
}
+/**
+ * i40evf_request_reset
+ * @adapter: adapter structure
+ *
+ * Request that the PF reset this VF. No response is expected.
+ **/
+void i40evf_request_reset(struct i40evf_adapter *adapter)
+{
+ /* Don't check CURRENT_OP - this is always higher priority */
+ i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_RESET_VF, NULL, 0);
+ adapter->current_op = I40E_VIRTCHNL_OP_UNKNOWN;
+}
/**
* i40evf_virtchnl_completion
@@ -689,10 +704,12 @@ void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
}
break;
case I40E_VIRTCHNL_EVENT_RESET_IMPENDING:
- adapter->state = __I40EVF_RESETTING;
- schedule_work(&adapter->reset_task);
- dev_info(&adapter->pdev->dev,
- "%s: hardware reset pending\n", __func__);
+ dev_info(&adapter->pdev->dev, "PF reset warning received\n");
+ if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING)) {
+ adapter->flags |= I40EVF_FLAG_RESET_PENDING;
+ dev_info(&adapter->pdev->dev, "Scheduling reset task\n");
+ schedule_work(&adapter->reset_task);
+ }
break;
default:
dev_err(&adapter->pdev->dev,
diff --git a/drivers/net/ethernet/intel/igb/Makefile b/drivers/net/ethernet/intel/igb/Makefile
index f19700e..5bcb2de 100644
--- a/drivers/net/ethernet/intel/igb/Makefile
+++ b/drivers/net/ethernet/intel/igb/Makefile
@@ -1,7 +1,7 @@
################################################################################
#
# Intel 82575 PCI-Express Ethernet Linux driver
-# Copyright(c) 1999 - 2013 Intel Corporation.
+# Copyright(c) 1999 - 2014 Intel Corporation.
#
# This program is free software; you can redistribute it and/or modify it
# under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
# more details.
#
# You should have received a copy of the GNU General Public License along with
-# this program; if not, write to the Free Software Foundation, Inc.,
-# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+# this program; if not, see <http://www.gnu.org/licenses/>.
#
# The full GNU General Public License is included in this distribution in
# the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 06df692..45947b3 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
@@ -2720,7 +2719,7 @@ static const u8 e1000_emc_therm_limit[4] = {
*
* Updates the temperatures in mac.thermal_sensor_data
**/
-s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
+static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
{
s32 status = E1000_SUCCESS;
u16 ets_offset;
@@ -2774,7 +2773,7 @@ s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
* Sets the thermal sensor thresholds according to the NVM map
* and save off the threshold and location values into mac.thermal_sensor_data
**/
-s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
+static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
{
s32 status = E1000_SUCCESS;
u16 ets_offset;
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.h b/drivers/net/ethernet/intel/igb/e1000_82575.h
index 8c24377..f12b086 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.h
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
@@ -266,8 +265,6 @@ u16 igb_rxpbs_adjust_82580(u32 data);
s32 igb_read_emi_reg(struct e1000_hw *, u16 addr, u16 *data);
s32 igb_set_eee_i350(struct e1000_hw *);
s32 igb_set_eee_i354(struct e1000_hw *);
-s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *);
-s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw);
#define E1000_I2C_THERMAL_SENSOR_ADDR 0xF8
#define E1000_EMC_INTERNAL_DATA 0x00
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index 0571b97..393c896 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
index ab99e2b..10741d1 100644
--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index 0c03933..db96339 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
@@ -35,6 +34,8 @@
#include "e1000_hw.h"
#include "e1000_i210.h"
+static s32 igb_update_flash_i210(struct e1000_hw *hw);
+
/**
* igb_get_hw_semaphore_i210 - Acquire hardware semaphore
* @hw: pointer to the HW structure
@@ -111,7 +112,7 @@ static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw)
* Return successful if access grant bit set, else clear the request for
* EEPROM access and return -E1000_ERR_NVM (-1).
**/
-s32 igb_acquire_nvm_i210(struct e1000_hw *hw)
+static s32 igb_acquire_nvm_i210(struct e1000_hw *hw)
{
return igb_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
}
@@ -123,7 +124,7 @@ s32 igb_acquire_nvm_i210(struct e1000_hw *hw)
* Stop any current commands to the EEPROM and clear the EEPROM request bit,
* then release the semaphores acquired.
**/
-void igb_release_nvm_i210(struct e1000_hw *hw)
+static void igb_release_nvm_i210(struct e1000_hw *hw)
{
igb_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
}
@@ -206,8 +207,8 @@ void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
* Reads a 16 bit word from the Shadow Ram using the EERD register.
* Uses necessary synchronization semaphores.
**/
-s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
- u16 *data)
+static s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
{
s32 status = E1000_SUCCESS;
u16 i, count;
@@ -306,8 +307,8 @@ out:
* If error code is returned, data and Shadow RAM may be inconsistent - buffer
* partially written.
**/
-s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
- u16 *data)
+static s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
+ u16 *data)
{
s32 status = E1000_SUCCESS;
u16 i, count;
@@ -555,7 +556,7 @@ s32 igb_read_invm_version(struct e1000_hw *hw,
* Calculates the EEPROM checksum by reading/adding each word of the EEPROM
* and then verifies that the sum of the EEPROM is equal to 0xBABA.
**/
-s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw)
+static s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw)
{
s32 status = E1000_SUCCESS;
s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *);
@@ -590,7 +591,7 @@ s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw)
* up to the checksum. Then calculates the EEPROM checksum and writes the
* value to the EEPROM. Next commit EEPROM data onto the Flash.
**/
-s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)
+static s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)
{
s32 ret_val = E1000_SUCCESS;
u16 checksum = 0;
@@ -684,7 +685,7 @@ bool igb_get_flash_presence_i210(struct e1000_hw *hw)
* @hw: pointer to the HW structure
*
**/
-s32 igb_update_flash_i210(struct e1000_hw *hw)
+static s32 igb_update_flash_i210(struct e1000_hw *hw)
{
s32 ret_val = E1000_SUCCESS;
u32 flup;
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
index 2d91371..907fe99 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.h
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
@@ -28,17 +27,8 @@
#ifndef _E1000_I210_H_
#define _E1000_I210_H_
-s32 igb_update_flash_i210(struct e1000_hw *hw);
-s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw);
-s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw);
-s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
- u16 *data);
-s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
- u16 *data);
s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
-s32 igb_acquire_nvm_i210(struct e1000_hw *hw);
-void igb_release_nvm_i210(struct e1000_hw *hw);
s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
s32 igb_read_invm_version(struct e1000_hw *hw,
struct e1000_fw_version *invm_ver);
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c
index 298f0ed..5910a93 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.h b/drivers/net/ethernet/intel/igb/e1000_mac.h
index e4cbe8e..99299ba 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mac.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mac.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.c b/drivers/net/ethernet/intel/igb/e1000_mbx.c
index dac1447..d5b1217 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.c
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/igb/e1000_mbx.h b/drivers/net/ethernet/intel/igb/e1000_mbx.h
index de9bba4..f52f551 100644
--- a/drivers/net/ethernet/intel/igb/e1000_mbx.h
+++ b/drivers/net/ethernet/intel/igb/e1000_mbx.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.c b/drivers/net/ethernet/intel/igb/e1000_nvm.c
index a7db7f3..9abf829 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.c
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/igb/e1000_nvm.h b/drivers/net/ethernet/intel/igb/e1000_nvm.h
index 433b741..5b10117 100644
--- a/drivers/net/ethernet/intel/igb/e1000_nvm.h
+++ b/drivers/net/ethernet/intel/igb/e1000_nvm.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2013 Intel Corporation.
+ Copyright(c) 2007-2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index ad2b74d..4009bba 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
@@ -394,77 +393,6 @@ s32 igb_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data)
}
/**
- * e1000_write_sfp_data_byte - Writes SFP module data.
- * @hw: pointer to the HW structure
- * @offset: byte location offset to write to
- * @data: data to write
- *
- * Writes one byte to SFP module data stored
- * in SFP resided EEPROM memory or SFP diagnostic area.
- * Function should be called with
- * E1000_I2CCMD_SFP_DATA_ADDR(<byte offset>) for SFP module database access
- * E1000_I2CCMD_SFP_DIAG_ADDR(<byte offset>) for SFP diagnostics parameters
- * access
- **/
-s32 e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data)
-{
- u32 i = 0;
- u32 i2ccmd = 0;
- u32 data_local = 0;
-
- if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) {
- hw_dbg("I2CCMD command address exceeds upper limit\n");
- return -E1000_ERR_PHY;
- }
- /* The programming interface is 16 bits wide
- * so we need to read the whole word first
- * then update appropriate byte lane and write
- * the updated word back.
- */
- /* Set up Op-code, EEPROM Address,in the I2CCMD
- * register. The MAC will take care of interfacing
- * with an EEPROM to write the data given.
- */
- i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
- E1000_I2CCMD_OPCODE_READ);
- /* Set a command to read single word */
- wr32(E1000_I2CCMD, i2ccmd);
- for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
- udelay(50);
- /* Poll the ready bit to see if lastly
- * launched I2C operation completed
- */
- i2ccmd = rd32(E1000_I2CCMD);
- if (i2ccmd & E1000_I2CCMD_READY) {
- /* Check if this is READ or WRITE phase */
- if ((i2ccmd & E1000_I2CCMD_OPCODE_READ) ==
- E1000_I2CCMD_OPCODE_READ) {
- /* Write the selected byte
- * lane and update whole word
- */
- data_local = i2ccmd & 0xFF00;
- data_local |= data;
- i2ccmd = ((offset <<
- E1000_I2CCMD_REG_ADDR_SHIFT) |
- E1000_I2CCMD_OPCODE_WRITE | data_local);
- wr32(E1000_I2CCMD, i2ccmd);
- } else {
- break;
- }
- }
- }
- if (!(i2ccmd & E1000_I2CCMD_READY)) {
- hw_dbg("I2CCMD Write did not complete\n");
- return -E1000_ERR_PHY;
- }
- if (i2ccmd & E1000_I2CCMD_ERROR) {
- hw_dbg("I2CCMD Error bit set\n");
- return -E1000_ERR_PHY;
- }
- return 0;
-}
-
-/**
* igb_read_phy_reg_igp - Read igp PHY register
* @hw: pointer to the HW structure
* @offset: register offset to be read
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.h b/drivers/net/ethernet/intel/igb/e1000_phy.h
index 6a0873f..4c2c36c 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.h
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
@@ -70,7 +69,6 @@ s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data);
s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data);
s32 igb_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data);
-s32 e1000_write_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 data);
s32 igb_copper_link_setup_82580(struct e1000_hw *hw);
s32 igb_get_phy_info_82580(struct e1000_hw *hw);
s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw);
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index 82632c6..abdd935 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h
index ccf472f..fc3fc2c 100644
--- a/drivers/net/ethernet/intel/igb/igb.h
+++ b/drivers/net/ethernet/intel/igb/igb.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
@@ -525,9 +524,7 @@ void igb_set_fw_version(struct igb_adapter *);
void igb_ptp_init(struct igb_adapter *adapter);
void igb_ptp_stop(struct igb_adapter *adapter);
void igb_ptp_reset(struct igb_adapter *adapter);
-void igb_ptp_tx_work(struct work_struct *work);
void igb_ptp_rx_hang(struct igb_adapter *adapter);
-void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb);
void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va,
struct sk_buff *skb);
diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c
index 1df0237..c7f5741 100644
--- a/drivers/net/ethernet/intel/igb/igb_ethtool.c
+++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/igb/igb_hwmon.c b/drivers/net/ethernet/intel/igb/igb_hwmon.c
index e0af5bc..8333f67 100644
--- a/drivers/net/ethernet/intel/igb/igb_hwmon.c
+++ b/drivers/net/ethernet/intel/igb/igb_hwmon.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 46d31a4..3384156 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel(R) Gigabit Ethernet Linux driver
- Copyright(c) 2007-2013 Intel Corporation.
+ Copyright(c) 2007-2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -13,8 +13,7 @@
more details.
You should have received a copy of the GNU General Public License along with
- this program; if not, write to the Free Software Foundation, Inc.,
- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ this program; if not, see <http://www.gnu.org/licenses/>.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
@@ -70,7 +69,7 @@ char igb_driver_version[] = DRV_VERSION;
static const char igb_driver_string[] =
"Intel(R) Gigabit Ethernet Network Driver";
static const char igb_copyright[] =
- "Copyright (c) 2007-2013 Intel Corporation.";
+ "Copyright (c) 2007-2014 Intel Corporation.";
static const struct e1000_info *igb_info_tbl[] = {
[board_82575] = &e1000_82575_info,
@@ -1111,10 +1110,11 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
for (i = 0; i < numvecs; i++)
adapter->msix_entries[i].entry = i;
- err = pci_enable_msix(adapter->pdev,
- adapter->msix_entries,
- numvecs);
- if (err == 0)
+ err = pci_enable_msix_range(adapter->pdev,
+ adapter->msix_entries,
+ numvecs,
+ numvecs);
+ if (err > 0)
return;
igb_reset_interrupt_capability(adapter);
diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c
index 5a54e3d..9c9c141 100644
--- a/drivers/net/ethernet/intel/igb/igb_ptp.c
+++ b/drivers/net/ethernet/intel/igb/igb_ptp.c
@@ -12,9 +12,8 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/module.h>
#include <linux/device.h>
@@ -75,6 +74,8 @@
#define INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT)
#define IGB_NBITS_82580 40
+static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
+
/* SYSTIM read access for the 82576 */
static cycle_t igb_ptp_read_82576(const struct cyclecounter *cc)
{
@@ -372,7 +373,7 @@ static int igb_ptp_enable(struct ptp_clock_info *ptp,
* This work function polls the TSYNCTXCTL valid bit to determine when a
* timestamp has been taken for the current stored skb.
**/
-void igb_ptp_tx_work(struct work_struct *work)
+static void igb_ptp_tx_work(struct work_struct *work)
{
struct igb_adapter *adapter = container_of(work, struct igb_adapter,
ptp_tx_work);
@@ -466,7 +467,7 @@ void igb_ptp_rx_hang(struct igb_adapter *adapter)
* available, then it must have been for this skb here because we only
* allow only one such packet into the queue.
**/
-void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
+static void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
struct skb_shared_hwtstamps shhwtstamps;
diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c
index 675435f..e2c6d80 100644
--- a/drivers/net/ethernet/intel/igbvf/netdev.c
+++ b/drivers/net/ethernet/intel/igbvf/netdev.c
@@ -1043,11 +1043,11 @@ static void igbvf_set_interrupt_capability(struct igbvf_adapter *adapter)
for (i = 0; i < 3; i++)
adapter->msix_entries[i].entry = i;
- err = pci_enable_msix(adapter->pdev,
- adapter->msix_entries, 3);
+ err = pci_enable_msix_range(adapter->pdev,
+ adapter->msix_entries, 3, 3);
}
- if (err) {
+ if (err < 0) {
/* MSI-X failed */
dev_err(&adapter->pdev->dev,
"Failed to initialize MSI-X interrupts.\n");
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
index 0186ea2..4371ef0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h
@@ -765,6 +765,7 @@ struct ixgbe_adapter {
struct ptp_clock_info ptp_caps;
struct work_struct ptp_tx_work;
struct sk_buff *ptp_tx_skb;
+ struct hwtstamp_config tstamp_config;
unsigned long ptp_tx_start;
unsigned long last_overflow_check;
unsigned long last_rx_ptp_check;
@@ -884,7 +885,6 @@ s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw,
u16 soft_id);
void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input,
union ixgbe_atr_input *mask);
-bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
void ixgbe_set_rx_mode(struct net_device *netdev);
#ifdef CONFIG_IXGBE_DCB
void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter);
@@ -958,8 +958,8 @@ static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring,
rx_ring->last_rx_timestamp = jiffies;
}
-int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter, struct ifreq *ifr,
- int cmd);
+int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr);
+int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr);
void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter);
void ixgbe_ptp_reset(struct ixgbe_adapter *adapter);
void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
index a26f3fe..15506f0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
+ Copyright(c) 1999 - 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -61,6 +61,9 @@ static void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR);
u16 pcie_devctl2;
+ if (ixgbe_removed(hw->hw_addr))
+ return;
+
/* only take action if timeout value is defaulted to 0 */
if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK)
goto out;
@@ -79,8 +82,9 @@ static void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw)
* directly in order to set the completion timeout value for
* 16ms to 55ms
*/
- pci_read_config_word(adapter->pdev,
- IXGBE_PCI_DEVICE_CONTROL2, &pcie_devctl2);
+ pcie_devctl2 = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2);
+ if (ixgbe_removed(hw->hw_addr))
+ return;
pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms;
pci_write_config_word(adapter->pdev,
IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2);
@@ -1316,6 +1320,8 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
.get_thermal_sensor_data = NULL,
.init_thermal_sensor_thresh = NULL,
.mng_fw_enabled = NULL,
+ .prot_autoc_read = &prot_autoc_read_generic,
+ .prot_autoc_write = &prot_autoc_write_generic,
};
static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
index edda681..b96cefd 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
+ Copyright(c) 1999 - 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -63,6 +63,8 @@ static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 *data);
static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset,
u8 dev_addr, u8 data);
+static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
+static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw);
static bool ixgbe_mng_enabled(struct ixgbe_hw *hw)
{
@@ -122,7 +124,6 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
{
s32 ret_val = 0;
u16 list_offset, data_offset, data_value;
- bool got_lock = false;
if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
ixgbe_init_mac_link_ops_82599(hw);
@@ -160,30 +161,10 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
usleep_range(hw->eeprom.semaphore_delay * 1000,
hw->eeprom.semaphore_delay * 2000);
- /* Need SW/FW semaphore around AUTOC writes if LESM on,
- * likewise reset_pipeline requires lock as it also writes
- * AUTOC.
- */
- if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
- ret_val = hw->mac.ops.acquire_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
- if (ret_val)
- goto setup_sfp_out;
-
- got_lock = true;
- }
-
/* Restart DSP and set SFI mode */
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((hw->mac.orig_autoc) |
- IXGBE_AUTOC_LMS_10G_SERIAL));
- hw->mac.cached_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
- ret_val = ixgbe_reset_pipeline_82599(hw);
-
- if (got_lock) {
- hw->mac.ops.release_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
- got_lock = false;
- }
+ ret_val = hw->mac.ops.prot_autoc_write(hw,
+ hw->mac.orig_autoc | IXGBE_AUTOC_LMS_10G_SERIAL,
+ false);
if (ret_val) {
hw_dbg(hw, " sfp module setup not complete\n");
@@ -207,6 +188,79 @@ setup_sfp_err:
return IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
}
+/**
+ * prot_autoc_read_82599 - Hides MAC differences needed for AUTOC read
+ * @hw: pointer to hardware structure
+ * @locked: Return the if we locked for this read.
+ * @reg_val: Value we read from AUTOC
+ *
+ * For this part (82599) we need to wrap read-modify-writes with a possible
+ * FW/SW lock. It is assumed this lock will be freed with the next
+ * prot_autoc_write_82599(). Note, that locked can only be true in cases
+ * where this function doesn't return an error.
+ **/
+static s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked,
+ u32 *reg_val)
+{
+ s32 ret_val;
+
+ *locked = false;
+ /* If LESM is on then we need to hold the SW/FW semaphore. */
+ if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+ ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ if (!ret_val)
+ return IXGBE_ERR_SWFW_SYNC;
+
+ *locked = true;
+ }
+
+ *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ return 0;
+}
+
+/**
+ * prot_autoc_write_82599 - Hides MAC differences needed for AUTOC write
+ * @hw: pointer to hardware structure
+ * @reg_val: value to write to AUTOC
+ * @locked: bool to indicate whether the SW/FW lock was already taken by
+ * previous proc_autoc_read_82599.
+ *
+ * This part (82599) may need to hold a the SW/FW lock around all writes to
+ * AUTOC. Likewise after a write we need to do a pipeline reset.
+ **/
+static s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked)
+{
+ s32 ret_val = 0;
+
+ /* Blocked by MNG FW so bail */
+ if (ixgbe_check_reset_blocked(hw))
+ goto out;
+
+ /* We only need to get the lock if:
+ * - We didn't do it already (in the read part of a read-modify-write)
+ * - LESM is enabled.
+ */
+ if (!locked && ixgbe_verify_lesm_fw_enabled_82599(hw)) {
+ ret_val = hw->mac.ops.acquire_swfw_sync(hw,
+ IXGBE_GSSR_MAC_CSR_SM);
+ if (!ret_val)
+ return IXGBE_ERR_SWFW_SYNC;
+ }
+
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
+ ret_val = ixgbe_reset_pipeline_82599(hw);
+
+out:
+ /* Free the SW/FW semaphore as we either grabbed it here or
+ * already had it when this function was called.
+ */
+ if (locked)
+ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
+
+ return ret_val;
+}
+
static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw)
{
struct ixgbe_mac_info *mac = &hw->mac;
@@ -542,6 +596,10 @@ static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
{
u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP);
+ /* Blocked by MNG FW so bail */
+ if (ixgbe_check_reset_blocked(hw))
+ return;
+
/* Disable tx laser; allow 100us to go dark per spec */
esdp_reg |= IXGBE_ESDP_SDP3;
IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg);
@@ -582,6 +640,10 @@ static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
**/
static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw)
{
+ /* Blocked by MNG FW so bail */
+ if (ixgbe_check_reset_blocked(hw))
+ return;
+
if (hw->mac.autotry_restart) {
ixgbe_disable_tx_laser_multispeed_fiber(hw);
ixgbe_enable_tx_laser_multispeed_fiber(hw);
@@ -966,7 +1028,6 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
u32 links_reg;
u32 i;
ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
- bool got_lock = false;
bool autoneg = false;
/* Check to see if speed passed in is supported. */
@@ -989,7 +1050,7 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
orig_autoc = autoc;
- start_autoc = hw->mac.cached_autoc;
+ start_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
link_mode = autoc & IXGBE_AUTOC_LMS_MASK;
pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
@@ -1030,27 +1091,10 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
}
if (autoc != start_autoc) {
- /* Need SW/FW semaphore around AUTOC writes if LESM is on,
- * likewise reset_pipeline requires us to hold this lock as
- * it also writes to AUTOC.
- */
- if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
- status = hw->mac.ops.acquire_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
- if (status != 0)
- goto out;
-
- got_lock = true;
- }
-
/* Restart link */
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
- hw->mac.cached_autoc = autoc;
- ixgbe_reset_pipeline_82599(hw);
-
- if (got_lock)
- hw->mac.ops.release_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
+ status = hw->mac.ops.prot_autoc_write(hw, autoc, false);
+ if (!status)
+ goto out;
/* Only poll for autoneg to complete if specified to do so */
if (autoneg_wait_to_complete) {
@@ -1117,7 +1161,7 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
{
ixgbe_link_speed link_speed;
s32 status;
- u32 ctrl, i, autoc2;
+ u32 ctrl, i, autoc, autoc2;
u32 curr_lms;
bool link_up = false;
@@ -1151,11 +1195,7 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
hw->phy.ops.reset(hw);
/* remember AUTOC from before we reset */
- if (hw->mac.cached_autoc)
- curr_lms = hw->mac.cached_autoc & IXGBE_AUTOC_LMS_MASK;
- else
- curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) &
- IXGBE_AUTOC_LMS_MASK;
+ curr_lms = IXGBE_READ_REG(hw, IXGBE_AUTOC) & IXGBE_AUTOC_LMS_MASK;
mac_reset_top:
/*
@@ -1205,7 +1245,7 @@ mac_reset_top:
* stored off yet. Otherwise restore the stored original
* values since the reset operation sets back to defaults.
*/
- hw->mac.cached_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
/* Enable link if disabled in NVM */
@@ -1216,7 +1256,7 @@ mac_reset_top:
}
if (hw->mac.orig_link_settings_stored == false) {
- hw->mac.orig_autoc = hw->mac.cached_autoc;
+ hw->mac.orig_autoc = autoc;
hw->mac.orig_autoc2 = autoc2;
hw->mac.orig_link_settings_stored = true;
} else {
@@ -1233,28 +1273,12 @@ mac_reset_top:
(hw->mac.orig_autoc & ~IXGBE_AUTOC_LMS_MASK) |
curr_lms;
- if (hw->mac.cached_autoc != hw->mac.orig_autoc) {
- /* Need SW/FW semaphore around AUTOC writes if LESM is
- * on, likewise reset_pipeline requires us to hold
- * this lock as it also writes to AUTOC.
- */
- bool got_lock = false;
- if (ixgbe_verify_lesm_fw_enabled_82599(hw)) {
- status = hw->mac.ops.acquire_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
- if (status)
- goto reset_hw_out;
-
- got_lock = true;
- }
-
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, hw->mac.orig_autoc);
- hw->mac.cached_autoc = hw->mac.orig_autoc;
- ixgbe_reset_pipeline_82599(hw);
-
- if (got_lock)
- hw->mac.ops.release_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
+ if (autoc != hw->mac.orig_autoc) {
+ status = hw->mac.ops.prot_autoc_write(hw,
+ hw->mac.orig_autoc,
+ false);
+ if (!status)
+ goto reset_hw_out;
}
if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
@@ -2260,7 +2284,7 @@ fw_version_err:
* Returns true if the LESM FW module is present and enabled. Otherwise
* returns false. Smart Speed must be disabled if LESM FW module is enabled.
**/
-bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
+static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
{
bool lesm_enabled = false;
u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
@@ -2366,7 +2390,7 @@ static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
* full pipeline reset. Note - We must hold the SW/FW semaphore before writing
* to AUTOC, so this function assumes the semaphore is held.
**/
-s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
+static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
{
s32 ret_val;
u32 anlp1_reg = 0;
@@ -2380,11 +2404,12 @@ s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw)
IXGBE_WRITE_FLUSH(hw);
}
- autoc_reg = hw->mac.cached_autoc;
+ autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
autoc_reg |= IXGBE_AUTOC_AN_RESTART;
/* Write AUTOC register with toggled LMS[2] bit and Restart_AN */
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg ^ IXGBE_AUTOC_LMS_1G_AN);
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC,
+ autoc_reg ^ (0x4 << IXGBE_AUTOC_LMS_SHIFT));
/* Wait for AN to leave state 0 */
for (i = 0; i < 10; i++) {
@@ -2566,6 +2591,8 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
.get_thermal_sensor_data = &ixgbe_get_thermal_sensor_data_generic,
.init_thermal_sensor_thresh = &ixgbe_init_thermal_sensor_thresh_generic,
.mng_fw_enabled = &ixgbe_mng_enabled,
+ .prot_autoc_read = &prot_autoc_read_82599,
+ .prot_autoc_write = &prot_autoc_write_82599,
};
static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index b5c434b..4456c23 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
+ Copyright(c) 1999 - 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -114,7 +114,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
s32 ret_val = 0;
u32 reg = 0, reg_bp = 0;
u16 reg_cu = 0;
- bool got_lock = false;
+ bool locked = false;
/*
* Validate the requested mode. Strict IEEE mode does not allow
@@ -139,11 +139,17 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
* we link at 10G, the 1G advertisement is harmless and vice versa.
*/
switch (hw->phy.media_type) {
+ case ixgbe_media_type_backplane:
+ /* some MAC's need RMW protection on AUTOC */
+ ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &reg_bp);
+ if (!ret_val)
+ goto out;
+
+ /* only backplane uses autoc so fall though */
case ixgbe_media_type_fiber_fixed:
case ixgbe_media_type_fiber:
- case ixgbe_media_type_backplane:
reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
- reg_bp = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+
break;
case ixgbe_media_type_copper:
hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE,
@@ -240,27 +246,12 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
* LESM is on, likewise reset_pipeline requries the lock as
* it also writes AUTOC.
*/
- if ((hw->mac.type == ixgbe_mac_82599EB) &&
- ixgbe_verify_lesm_fw_enabled_82599(hw)) {
- ret_val = hw->mac.ops.acquire_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
- if (ret_val)
- goto out;
-
- got_lock = true;
- }
-
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_bp);
-
- if (hw->mac.type == ixgbe_mac_82599EB)
- ixgbe_reset_pipeline_82599(hw);
-
- if (got_lock)
- hw->mac.ops.release_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
+ ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked);
+ if (ret_val)
+ goto out;
} else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
- ixgbe_device_supports_autoneg_fc(hw)) {
+ ixgbe_device_supports_autoneg_fc(hw)) {
hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE,
MDIO_MMD_AN, reg_cu);
}
@@ -2437,6 +2428,55 @@ out:
}
/**
+ * ixgbe_pcie_timeout_poll - Return number of times to poll for completion
+ * @hw: pointer to hardware structure
+ *
+ * System-wide timeout range is encoded in PCIe Device Control2 register.
+ *
+ * Add 10% to specified maximum and return the number of times to poll for
+ * completion timeout, in units of 100 microsec. Never return less than
+ * 800 = 80 millisec.
+ **/
+static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
+{
+ struct ixgbe_adapter *adapter = hw->back;
+ s16 devctl2;
+ u32 pollcnt;
+
+ pci_read_config_word(adapter->pdev, IXGBE_PCI_DEVICE_CONTROL2,
+ &devctl2);
+ devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK;
+
+ switch (devctl2) {
+ case IXGBE_PCIDEVCTRL2_65_130ms:
+ pollcnt = 1300; /* 130 millisec */
+ break;
+ case IXGBE_PCIDEVCTRL2_260_520ms:
+ pollcnt = 5200; /* 520 millisec */
+ break;
+ case IXGBE_PCIDEVCTRL2_1_2s:
+ pollcnt = 20000; /* 2 sec */
+ break;
+ case IXGBE_PCIDEVCTRL2_4_8s:
+ pollcnt = 80000; /* 8 sec */
+ break;
+ case IXGBE_PCIDEVCTRL2_17_34s:
+ pollcnt = 34000; /* 34 sec */
+ break;
+ case IXGBE_PCIDEVCTRL2_50_100us: /* 100 microsecs */
+ case IXGBE_PCIDEVCTRL2_1_2ms: /* 2 millisecs */
+ case IXGBE_PCIDEVCTRL2_16_32ms: /* 32 millisec */
+ case IXGBE_PCIDEVCTRL2_16_32ms_def: /* 32 millisec default */
+ default:
+ pollcnt = 800; /* 80 millisec minimum */
+ break;
+ }
+
+ /* add 10% to spec maximum */
+ return (pollcnt * 11) / 10;
+}
+
+/**
* ixgbe_disable_pcie_master - Disable PCI-express master access
* @hw: pointer to hardware structure
*
@@ -2447,16 +2487,16 @@ out:
**/
static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
{
- struct ixgbe_adapter *adapter = hw->back;
s32 status = 0;
- u32 i;
+ u32 i, poll;
u16 value;
/* Always set this bit to ensure any future transactions are blocked */
IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS);
/* Exit if master requests are blocked */
- if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
+ if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) ||
+ ixgbe_removed(hw->hw_addr))
goto out;
/* Poll for master request bit to clear */
@@ -2481,10 +2521,12 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
* Before proceeding, make sure that the PCIe block does not have
* transactions pending.
*/
- for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
+ poll = ixgbe_pcie_timeout_poll(hw);
+ for (i = 0; i < poll; i++) {
udelay(100);
- pci_read_config_word(adapter->pdev, IXGBE_PCI_DEVICE_STATUS,
- &value);
+ value = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_STATUS);
+ if (ixgbe_removed(hw->hw_addr))
+ goto out;
if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
goto out;
}
@@ -2564,6 +2606,35 @@ void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask)
}
/**
+ * prot_autoc_read_generic - Hides MAC differences needed for AUTOC read
+ * @hw: pointer to hardware structure
+ * @reg_val: Value we read from AUTOC
+ * @locked: bool to indicate whether the SW/FW lock should be taken. Never
+ * true in this the generic case.
+ *
+ * The default case requires no protection so just to the register read.
+ **/
+s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val)
+{
+ *locked = false;
+ *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ return 0;
+}
+
+/**
+ * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write
+ * @hw: pointer to hardware structure
+ * @reg_val: value to write to AUTOC
+ * @locked: bool to indicate whether the SW/FW lock was already taken by
+ * previous read.
+ **/
+s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked)
+{
+ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val);
+ return 0;
+}
+
+/**
* ixgbe_disable_rx_buff_generic - Stops the receive data path
* @hw: pointer to hardware structure
*
@@ -2641,6 +2712,7 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
s32 ret_val = 0;
+ bool locked = false;
/*
* Link must be up to auto-blink the LEDs;
@@ -2649,28 +2721,19 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
hw->mac.ops.check_link(hw, &speed, &link_up, false);
if (!link_up) {
- /* Need the SW/FW semaphore around AUTOC writes if 82599 and
- * LESM is on.
- */
- bool got_lock = false;
-
- if ((hw->mac.type == ixgbe_mac_82599EB) &&
- ixgbe_verify_lesm_fw_enabled_82599(hw)) {
- ret_val = hw->mac.ops.acquire_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
- if (ret_val)
- goto out;
+ ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
+ if (!ret_val)
+ goto out;
- got_lock = true;
- }
autoc_reg |= IXGBE_AUTOC_AN_RESTART;
autoc_reg |= IXGBE_AUTOC_FLU;
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
+
+ ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
+ if (!ret_val)
+ goto out;
+
IXGBE_WRITE_FLUSH(hw);
- if (got_lock)
- hw->mac.ops.release_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
usleep_range(10000, 20000);
}
@@ -2690,33 +2753,21 @@ out:
**/
s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
{
- u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
+ u32 autoc_reg = 0;
u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
s32 ret_val = 0;
- bool got_lock = false;
+ bool locked = false;
- /* Need the SW/FW semaphore around AUTOC writes if 82599 and
- * LESM is on.
- */
- if ((hw->mac.type == ixgbe_mac_82599EB) &&
- ixgbe_verify_lesm_fw_enabled_82599(hw)) {
- ret_val = hw->mac.ops.acquire_swfw_sync(hw,
- IXGBE_GSSR_MAC_CSR_SM);
- if (ret_val)
- goto out;
-
- got_lock = true;
- }
+ ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
+ if (!ret_val)
+ goto out;
autoc_reg &= ~IXGBE_AUTOC_FLU;
autoc_reg |= IXGBE_AUTOC_AN_RESTART;
- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg);
-
- if (hw->mac.type == ixgbe_mac_82599EB)
- ixgbe_reset_pipeline_82599(hw);
- if (got_lock)
- hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
+ ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
+ if (!ret_val)
+ goto out;
led_reg &= ~IXGBE_LED_MODE_MASK(index);
led_reg &= ~IXGBE_LED_BLINK(index);
@@ -2817,7 +2868,6 @@ san_mac_addr_clr:
**/
u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
{
- struct ixgbe_adapter *adapter = hw->back;
u16 msix_count = 1;
u16 max_msix_count;
u16 pcie_offset;
@@ -2836,7 +2886,9 @@ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
return msix_count;
}
- pci_read_config_word(adapter->pdev, pcie_offset, &msix_count);
+ msix_count = ixgbe_read_pci_cfg_word(hw, pcie_offset);
+ if (ixgbe_removed(hw->hw_addr))
+ msix_count = 0;
msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK;
/* MSI-X count is zero-based in HW */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
index f2e3919..ef0fd4c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
+ Copyright(c) 1999 - 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -98,6 +98,10 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
bool *link_up, bool link_up_wait_to_complete);
s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
u16 *wwpn_prefix);
+
+s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *, u32 *reg_val);
+s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked);
+
s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf);
@@ -109,7 +113,6 @@ void ixgbe_clear_tx_pending(struct ixgbe_hw *hw);
void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb,
u32 headroom, int strategy);
-s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw);
#define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8
#define IXGBE_EMC_INTERNAL_DATA 0x00
@@ -125,6 +128,10 @@ s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw);
s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw);
#define IXGBE_FAILED_READ_REG 0xffffffffU
+#define IXGBE_FAILED_READ_CFG_DWORD 0xffffffffU
+#define IXGBE_FAILED_READ_CFG_WORD 0xffffU
+
+u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg);
static inline bool ixgbe_removed(void __iomem *addr)
{
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 0433070..f2d35c0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
+ Copyright(c) 1999 - 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -1247,6 +1247,11 @@ static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data)
struct ixgbe_hw *hw = &adapter->hw;
bool link_up;
u32 link_speed = 0;
+
+ if (ixgbe_removed(hw->hw_addr)) {
+ *data = 1;
+ return 1;
+ }
*data = 0;
hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
@@ -1969,6 +1974,7 @@ static void ixgbe_diag_test(struct net_device *netdev,
data[1] = 1;
data[2] = 1;
data[3] = 1;
+ data[4] = 1;
eth_test->flags |= ETH_TEST_FL_FAILED;
return;
}
@@ -1988,6 +1994,7 @@ static void ixgbe_diag_test(struct net_device *netdev,
data[1] = 1;
data[2] = 1;
data[3] = 1;
+ data[4] = 1;
eth_test->flags |= ETH_TEST_FL_FAILED;
clear_bit(__IXGBE_TESTING,
&adapter->state);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
index 32e3eaa..0834e1e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c
@@ -698,7 +698,7 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
int vectors)
{
- int err, vector_threshold;
+ int vector_threshold;
/* We'll want at least 2 (vector_threshold):
* 1) TxQ[0] + RxQ[0] handler
@@ -712,18 +712,10 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
* Right now, we simply care about how many we'll get; we'll
* set them up later while requesting irq's.
*/
- while (vectors >= vector_threshold) {
- err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
- vectors);
- if (!err) /* Success in acquiring all requested vectors. */
- break;
- else if (err < 0)
- vectors = 0; /* Nasty failure, quit now */
- else /* err == number of vectors we should try again with */
- vectors = err;
- }
+ vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
+ vector_threshold, vectors);
- if (vectors < vector_threshold) {
+ if (vectors < 0) {
/* Can't allocate enough MSI-X interrupts? Oh well.
* This just means we'll go with either a single MSI
* vector or fall back to legacy interrupts.
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 18076c4..10b35d8 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
+ Copyright(c) 1999 - 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -67,7 +67,7 @@ static char ixgbe_default_device_descr[] =
#define DRV_VERSION "3.19.1-k"
const char ixgbe_driver_version[] = DRV_VERSION;
static const char ixgbe_copyright[] =
- "Copyright (c) 1999-2013 Intel Corporation.";
+ "Copyright (c) 1999-2014 Intel Corporation.";
static const struct ixgbe_info *ixgbe_info_tbl[] = {
[board_82598] = &ixgbe_82598_info,
@@ -151,6 +151,8 @@ MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
+static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev);
+
static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
u32 reg, u16 *value)
{
@@ -169,6 +171,9 @@ static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter,
return -1;
pcie_capability_read_word(parent_dev, reg, value);
+ if (*value == IXGBE_FAILED_READ_CFG_WORD &&
+ ixgbe_check_cfg_remove(&adapter->hw, parent_dev))
+ return -1;
return 0;
}
@@ -313,6 +318,48 @@ void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg)
ixgbe_remove_adapter(hw);
}
+static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev)
+{
+ u16 value;
+
+ pci_read_config_word(pdev, PCI_VENDOR_ID, &value);
+ if (value == IXGBE_FAILED_READ_CFG_WORD) {
+ ixgbe_remove_adapter(hw);
+ return true;
+ }
+ return false;
+}
+
+u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg)
+{
+ struct ixgbe_adapter *adapter = hw->back;
+ u16 value;
+
+ if (ixgbe_removed(hw->hw_addr))
+ return IXGBE_FAILED_READ_CFG_WORD;
+ pci_read_config_word(adapter->pdev, reg, &value);
+ if (value == IXGBE_FAILED_READ_CFG_WORD &&
+ ixgbe_check_cfg_remove(hw, adapter->pdev))
+ return IXGBE_FAILED_READ_CFG_WORD;
+ return value;
+}
+
+#ifdef CONFIG_PCI_IOV
+static u32 ixgbe_read_pci_cfg_dword(struct ixgbe_hw *hw, u32 reg)
+{
+ struct ixgbe_adapter *adapter = hw->back;
+ u32 value;
+
+ if (ixgbe_removed(hw->hw_addr))
+ return IXGBE_FAILED_READ_CFG_DWORD;
+ pci_read_config_dword(adapter->pdev, reg, &value);
+ if (value == IXGBE_FAILED_READ_CFG_DWORD &&
+ ixgbe_check_cfg_remove(hw, adapter->pdev))
+ return IXGBE_FAILED_READ_CFG_DWORD;
+ return value;
+}
+#endif /* CONFIG_PCI_IOV */
+
static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter)
{
BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state));
@@ -2630,9 +2677,12 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data)
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
case ixgbe_mac_X540:
- if (eicr & IXGBE_EICR_ECC)
- e_info(link, "Received unrecoverable ECC Err, please "
- "reboot\n");
+ if (eicr & IXGBE_EICR_ECC) {
+ e_info(link, "Received ECC Err, initiating reset\n");
+ adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
+ ixgbe_service_event_schedule(adapter);
+ IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
+ }
/* Handle Flow Director Full threshold interrupt */
if (eicr & IXGBE_EICR_FLOW_DIR) {
int reinit_count = 0;
@@ -2846,9 +2896,12 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
ixgbe_check_sfp_event(adapter, eicr);
/* Fall through */
case ixgbe_mac_X540:
- if (eicr & IXGBE_EICR_ECC)
- e_info(link, "Received unrecoverable ECC err, please "
- "reboot\n");
+ if (eicr & IXGBE_EICR_ECC) {
+ e_info(link, "Received ECC Err, initiating reset\n");
+ adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED;
+ ixgbe_service_event_schedule(adapter);
+ IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC);
+ }
ixgbe_check_overtemp_event(adapter, eicr);
break;
default:
@@ -5502,6 +5555,7 @@ static int ixgbe_resume(struct pci_dev *pdev)
struct net_device *netdev = adapter->netdev;
u32 err;
+ adapter->hw.hw_addr = adapter->io_addr;
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
/*
@@ -7143,7 +7197,9 @@ static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd)
switch (cmd) {
case SIOCSHWTSTAMP:
- return ixgbe_ptp_hwtstamp_ioctl(adapter, req, cmd);
+ return ixgbe_ptp_set_ts_config(adapter, req);
+ case SIOCGHWTSTAMP:
+ return ixgbe_ptp_get_ts_config(adapter, req);
default:
return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd);
}
@@ -7792,6 +7848,7 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id,
case IXGBE_DEV_ID_82599_SFP:
/* Only these subdevices could supports WOL */
switch (subdevice_id) {
+ case IXGBE_SUBDEV_ID_82599_SFP_WOL0:
case IXGBE_SUBDEV_ID_82599_560FLR:
/* only support first port */
if (hw->bus.func != 0)
@@ -8331,6 +8388,7 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
struct net_device *netdev = adapter->netdev;
#ifdef CONFIG_PCI_IOV
+ struct ixgbe_hw *hw = &adapter->hw;
struct pci_dev *bdev, *vfdev;
u32 dw0, dw1, dw2, dw3;
int vf, pos;
@@ -8351,10 +8409,12 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
if (!pos)
goto skip_bad_vf_detection;
- pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG, &dw0);
- pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 4, &dw1);
- pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 8, &dw2);
- pci_read_config_dword(bdev, pos + PCI_ERR_HEADER_LOG + 12, &dw3);
+ dw0 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG);
+ dw1 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 4);
+ dw2 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 8);
+ dw3 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 12);
+ if (ixgbe_removed(hw->hw_addr))
+ goto skip_bad_vf_detection;
req_id = dw1 >> 16;
/* On the 82599 if bit 7 of the requestor ID is set then it's a VF */
@@ -8446,6 +8506,7 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
e_err(probe, "Cannot re-enable PCI device after reset.\n");
result = PCI_ERS_RESULT_DISCONNECT;
} else {
+ adapter->hw.hw_addr = adapter->io_addr;
pci_set_master(pdev);
pci_restore_state(pdev);
pci_save_state(pdev);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index 132557c..d2caae4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
+ Copyright(c) 1999 - 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -98,6 +98,32 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
}
/**
+ * ixgbe_check_reset_blocked - check status of MNG FW veto bit
+ * @hw: pointer to the hardware structure
+ *
+ * This function checks the MMNGC.MNG_VETO bit to see if there are
+ * any constraints on link from manageability. For MAC's that don't
+ * have this bit just return false since the link can not be blocked
+ * via this method.
+ **/
+s32 ixgbe_check_reset_blocked(struct ixgbe_hw *hw)
+{
+ u32 mmngc;
+
+ /* If we don't have this bit, it can't be blocking */
+ if (hw->mac.type == ixgbe_mac_82598EB)
+ return false;
+
+ mmngc = IXGBE_READ_REG(hw, IXGBE_MMNGC);
+ if (mmngc & IXGBE_MMNGC_MNG_VETO) {
+ hw_dbg(hw, "MNG_VETO bit detected.\n");
+ return true;
+ }
+
+ return false;
+}
+
+/**
* ixgbe_get_phy_id - Get the phy type
* @hw: pointer to hardware structure
*
@@ -172,6 +198,10 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
(IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw)))
goto out;
+ /* Blocked by MNG FW so bail */
+ if (ixgbe_check_reset_blocked(hw))
+ goto out;
+
/*
* Perform soft PHY reset to the PHY_XS.
* This will cause a soft reset to the PHY
@@ -476,6 +506,10 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
autoneg_reg);
}
+ /* Blocked by MNG FW so don't reset PHY */
+ if (ixgbe_check_reset_blocked(hw))
+ return status;
+
/* Restart PHY autonegotiation and wait for completion */
hw->phy.ops.read_reg(hw, MDIO_CTRL1,
MDIO_MMD_AN, &autoneg_reg);
@@ -682,6 +716,10 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
autoneg_reg);
}
+ /* Blocked by MNG FW so don't reset PHY */
+ if (ixgbe_check_reset_blocked(hw))
+ return status;
+
/* Restart PHY autonegotiation and wait for completion */
hw->phy.ops.read_reg(hw, MDIO_CTRL1,
MDIO_MMD_AN, &autoneg_reg);
@@ -759,6 +797,10 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
s32 ret_val = 0;
u32 i;
+ /* Blocked by MNG FW so bail */
+ if (ixgbe_check_reset_blocked(hw))
+ goto out;
+
hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, &phy_data);
/* reset the PHY and poll for completion */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
index fffcbdd..b4d4323 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
+ Copyright(c) 1999 - 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -131,6 +131,7 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
ixgbe_link_speed *speed,
bool *autoneg);
+s32 ixgbe_check_reset_blocked(struct ixgbe_hw *hw);
/* PHY specific */
s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
index 5184e2a..9ef730f 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
@@ -576,14 +576,21 @@ void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
shhwtstamps->hwtstamp = ns_to_ktime(ns);
}
+int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
+{
+ struct hwtstamp_config *config = &adapter->tstamp_config;
+
+ return copy_to_user(ifr->ifr_data, config,
+ sizeof(*config)) ? -EFAULT : 0;
+}
+
/**
- * ixgbe_ptp_hwtstamp_ioctl - control hardware time stamping
+ * ixgbe_ptp_set_ts_config - control hardware time stamping
* @adapter: pointer to adapter struct
* @ifreq: ioctl data
- * @cmd: particular ioctl requested
*
* Outgoing time stamping can be enabled and disabled. Play nice and
- * disable it when requested, although it shouldn't case any overhead
+ * disable it when requested, although it shouldn't cause any overhead
* when no packet needs it. At most one packet in the queue may be
* marked for time stamping, otherwise it would be impossible to tell
* for sure to which packet the hardware time stamp belongs.
@@ -599,8 +606,7 @@ void __ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
* Event mode. This more accurately tells the user what the hardware is going
* to do anyways.
*/
-int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
- struct ifreq *ifr, int cmd)
+int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr)
{
struct ixgbe_hw *hw = &adapter->hw;
struct hwtstamp_config config;
@@ -702,6 +708,10 @@ int ixgbe_ptp_hwtstamp_ioctl(struct ixgbe_adapter *adapter,
regval = IXGBE_READ_REG(hw, IXGBE_TXSTMPH);
regval = IXGBE_READ_REG(hw, IXGBE_RXSTMPH);
+ /* save these settings for future reference */
+ memcpy(&adapter->tstamp_config, &config,
+ sizeof(adapter->tstamp_config));
+
return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
-EFAULT : 0;
}
@@ -809,6 +819,9 @@ void ixgbe_ptp_reset(struct ixgbe_adapter *adapter)
IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x00000000);
IXGBE_WRITE_FLUSH(hw);
+ /* Reset the saved tstamp_config */
+ memset(&adapter->tstamp_config, 0, sizeof(adapter->tstamp_config));
+
ixgbe_ptp_start_cyclecounter(adapter);
spin_lock_irqsave(&adapter->tmreg_lock, flags);
@@ -840,7 +853,9 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
switch (adapter->hw.mac.type) {
case ixgbe_mac_X540:
- snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name);
+ snprintf(adapter->ptp_caps.name,
+ sizeof(adapter->ptp_caps.name),
+ "%s", netdev->name);
adapter->ptp_caps.owner = THIS_MODULE;
adapter->ptp_caps.max_adj = 250000000;
adapter->ptp_caps.n_alarm = 0;
@@ -854,7 +869,9 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter)
adapter->ptp_caps.enable = ixgbe_ptp_enable;
break;
case ixgbe_mac_82599EB:
- snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name);
+ snprintf(adapter->ptp_caps.name,
+ sizeof(adapter->ptp_caps.name),
+ "%s", netdev->name);
adapter->ptp_caps.owner = THIS_MODULE;
adapter->ptp_caps.max_adj = 250000000;
adapter->ptp_caps.n_alarm = 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
index 0d39cfc..c10382e 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
@@ -1,7 +1,7 @@
/*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
- Copyright(c) 1999 - 2013 Intel Corporation.
+ Copyright(c) 1999 - 2014 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
@@ -54,6 +54,7 @@
#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152a
#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529
#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9
+#define IXGBE_SUBDEV_ID_82599_SFP_WOL0 0x1071
#define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72
#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0
#define IXGBE_SUBDEV_ID_82599_SP_560FLR 0x211B
@@ -1609,6 +1610,9 @@ enum {
#define IXGBE_MACC_FS 0x00040000
#define IXGBE_MAC_RX2TX_LPBK 0x00000002
+/* Veto Bit definiton */
+#define IXGBE_MMNGC_MNG_VETO 0x00000001
+
/* LINKS Bit Masks */
#define IXGBE_LINKS_KX_AN_COMP 0x80000000
#define IXGBE_LINKS_UP 0x40000000
@@ -1853,8 +1857,19 @@ enum {
#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80
#define IXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005
+#define IXGBE_PCIDEVCTRL2_TIMEO_MASK 0xf
+#define IXGBE_PCIDEVCTRL2_16_32ms_def 0x0
+#define IXGBE_PCIDEVCTRL2_50_100us 0x1
+#define IXGBE_PCIDEVCTRL2_1_2ms 0x2
+#define IXGBE_PCIDEVCTRL2_16_32ms 0x5
+#define IXGBE_PCIDEVCTRL2_65_130ms 0x6
+#define IXGBE_PCIDEVCTRL2_260_520ms 0x9
+#define IXGBE_PCIDEVCTRL2_1_2s 0xa
+#define IXGBE_PCIDEVCTRL2_4_8s 0xd
+#define IXGBE_PCIDEVCTRL2_17_34s 0xe
+
/* Number of 100 microseconds we wait for PCI Express master disable */
-#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800
+#define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800
/* RAH */
#define IXGBE_RAH_VIND_MASK 0x003C0000
@@ -2858,6 +2873,8 @@ struct ixgbe_mac_operations {
s32 (*enable_rx_dma)(struct ixgbe_hw *, u32);
s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u16);
void (*release_swfw_sync)(struct ixgbe_hw *, u16);
+ s32 (*prot_autoc_read)(struct ixgbe_hw *, bool *, u32 *);
+ s32 (*prot_autoc_write)(struct ixgbe_hw *, u32, bool);
/* Link */
void (*disable_tx_laser)(struct ixgbe_hw *);
@@ -2957,7 +2974,6 @@ struct ixgbe_mac_info {
u32 max_tx_queues;
u32 max_rx_queues;
u32 orig_autoc;
- u32 cached_autoc;
u32 orig_autoc2;
bool orig_link_settings_stored;
bool autotry_restart;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
index 24b80a6..c870f37 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
@@ -855,6 +855,8 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
.get_thermal_sensor_data = NULL,
.init_thermal_sensor_thresh = NULL,
.mng_fw_enabled = NULL,
+ .prot_autoc_read = &prot_autoc_read_generic,
+ .prot_autoc_write = &prot_autoc_write_generic,
};
static struct ixgbe_eeprom_operations eeprom_ops_X540 = {
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 9df2898..57e0cd8 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -1817,7 +1817,6 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter)
static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
int vectors)
{
- int err = 0;
int vector_threshold;
/* We'll want at least 2 (vector_threshold):
@@ -1831,33 +1830,24 @@ static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
* Right now, we simply care about how many we'll get; we'll
* set them up later while requesting irq's.
*/
- while (vectors >= vector_threshold) {
- err = pci_enable_msix(adapter->pdev, adapter->msix_entries,
- vectors);
- if (!err || err < 0) /* Success or a nasty failure. */
- break;
- else /* err == number of vectors we should try again with */
- vectors = err;
- }
-
- if (vectors < vector_threshold)
- err = -ENOMEM;
+ vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
+ vector_threshold, vectors);
- if (err) {
+ if (vectors < 0) {
dev_err(&adapter->pdev->dev,
"Unable to allocate MSI-X interrupts\n");
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
- } else {
- /*
- * Adjust for only the vectors we'll use, which is minimum
- * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
- * vectors we were allocated.
- */
- adapter->num_msix_vectors = vectors;
+ return vectors;
}
- return err;
+ /* Adjust for only the vectors we'll use, which is minimum
+ * of max_msix_q_vectors + NON_Q_VECTORS, or the number of
+ * vectors we were allocated.
+ */
+ adapter->num_msix_vectors = vectors;
+
+ return 0;
}
/**
@@ -2787,6 +2777,9 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
u32 vlan_macip_lens, type_tucmd;
u32 mss_l4len_idx, l4len;
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ return 0;
+
if (!skb_is_gso(skb))
return 0;
@@ -3165,7 +3158,7 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
tso = ixgbevf_tso(tx_ring, first, &hdr_len);
if (tso < 0)
goto out_drop;
- else
+ else if (!tso)
ixgbevf_tx_csum(tx_ring, first);
ixgbevf_tx_map(tx_ring, first, hdr_len);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index f418f4f..12c6a66 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2784,7 +2784,6 @@ static int mvneta_probe(struct platform_device *pdev)
const char *mac_from;
int phy_mode;
int err;
- int cpu;
/* Our multiqueue support is not complete, so for now, only
* allow the usage of the first RX queue
@@ -2845,18 +2844,12 @@ static int mvneta_probe(struct platform_device *pdev)
}
/* Alloc per-cpu stats */
- pp->stats = alloc_percpu(struct mvneta_pcpu_stats);
+ pp->stats = netdev_alloc_pcpu_stats(struct mvneta_pcpu_stats);
if (!pp->stats) {
err = -ENOMEM;
goto err_unmap;
}
- for_each_possible_cpu(cpu) {
- struct mvneta_pcpu_stats *stats;
- stats = per_cpu_ptr(pp->stats, cpu);
- u64_stats_init(&stats->syncp);
- }
-
dt_mac_addr = of_get_mac_address(dn);
if (dt_mac_addr) {
mac_from = "device tree";
diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig
index 563495d..1a6e188 100644
--- a/drivers/net/ethernet/mellanox/mlx4/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig
@@ -3,7 +3,7 @@
#
config MLX4_EN
- tristate "Mellanox Technologies 10Gbit Ethernet support"
+ tristate "Mellanox Technologies 1/10/40Gbit Ethernet support"
depends on PCI
select MLX4_CORE
select PTP_1588_CLOCK
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
index b4881b6..c95ca25 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_dcb_nl.c
@@ -62,7 +62,7 @@ static int mlx4_en_ets_validate(struct mlx4_en_priv *priv, struct ieee_ets *ets)
int has_ets_tc = 0;
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
- if (ets->prio_tc[i] > MLX4_EN_NUM_UP) {
+ if (ets->prio_tc[i] >= MLX4_EN_NUM_UP) {
en_err(priv, "Bad priority in UP <=> TC mapping. TC: %d, UP: %d\n",
i, ets->prio_tc[i]);
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index d357bf5..3454437 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -72,6 +72,12 @@ MLX4_EN_PARM_INT(pfctx, 0, "Priority based Flow Control policy on TX[7:0]."
MLX4_EN_PARM_INT(pfcrx, 0, "Priority based Flow Control policy on RX[7:0]."
" Per priority bit mask");
+MLX4_EN_PARM_INT(inline_thold, MAX_INLINE,
+ "Threshold for using inline data (range: 17-104, default: 104)");
+
+#define MAX_PFC_TX 0xff
+#define MAX_PFC_RX 0xff
+
int en_print(const char *level, const struct mlx4_en_priv *priv,
const char *format, ...)
{
@@ -140,6 +146,7 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
params->prof[i].tx_ring_num = params->num_tx_rings_p_up *
MLX4_EN_NUM_UP;
params->prof[i].rss_rings = 0;
+ params->prof[i].inline_thold = inline_thold;
}
return 0;
@@ -274,19 +281,8 @@ static void *mlx4_en_add(struct mlx4_dev *dev)
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
mlx4_en_init_timestamp(mdev);
- mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
- if (!dev->caps.comp_pool) {
- mdev->profile.prof[i].rx_ring_num =
- rounddown_pow_of_two(max_t(int, MIN_RX_RINGS,
- min_t(int,
- dev->caps.num_comp_vectors,
- DEF_RX_RINGS)));
- } else {
- mdev->profile.prof[i].rx_ring_num = rounddown_pow_of_two(
- min_t(int, dev->caps.comp_pool/
- dev->caps.num_ports - 1 , MAX_MSIX_P_PORT - 1));
- }
- }
+ /* Set default number of RX rings*/
+ mlx4_en_set_num_rx_rings(mdev);
/* Create our own workqueue for reset/multicast tasks
* Note: we cannot use the shared workqueue because of deadlocks caused
@@ -336,8 +332,31 @@ static struct mlx4_interface mlx4_en_interface = {
.protocol = MLX4_PROT_ETH,
};
+void mlx4_en_verify_params(void)
+{
+ if (pfctx > MAX_PFC_TX) {
+ pr_warn("mlx4_en: WARNING: illegal module parameter pfctx 0x%x - should be in range 0-0x%x, will be changed to default (0)\n",
+ pfctx, MAX_PFC_TX);
+ pfctx = 0;
+ }
+
+ if (pfcrx > MAX_PFC_RX) {
+ pr_warn("mlx4_en: WARNING: illegal module parameter pfcrx 0x%x - should be in range 0-0x%x, will be changed to default (0)\n",
+ pfcrx, MAX_PFC_RX);
+ pfcrx = 0;
+ }
+
+ if (inline_thold < MIN_PKT_LEN || inline_thold > MAX_INLINE) {
+ pr_warn("mlx4_en: WARNING: illegal module parameter inline_thold %d - should be in range %d-%d, will be changed to default (%d)\n",
+ inline_thold, MIN_PKT_LEN, MAX_INLINE, MAX_INLINE);
+ inline_thold = MAX_INLINE;
+ }
+}
+
static int __init mlx4_en_init(void)
{
+ mlx4_en_verify_params();
+
return mlx4_register_interface(&mlx4_en_interface);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index fad4531..3db5946 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -603,7 +603,7 @@ static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
int err = 0;
u64 reg_id;
int *qpn = &priv->base_qpn;
- u64 mac = mlx4_en_mac_to_u64(priv->dev->dev_addr);
+ u64 mac = mlx4_mac_to_u64(priv->dev->dev_addr);
en_dbg(DRV, priv, "Registering MAC: %pM for adding\n",
priv->dev->dev_addr);
@@ -672,7 +672,7 @@ static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
u64 mac;
if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) {
- mac = mlx4_en_mac_to_u64(priv->dev->dev_addr);
+ mac = mlx4_mac_to_u64(priv->dev->dev_addr);
en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
priv->dev->dev_addr);
mlx4_unregister_mac(dev, priv->port, mac);
@@ -685,7 +685,7 @@ static void mlx4_en_put_qp(struct mlx4_en_priv *priv)
for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) {
bucket = &priv->mac_hash[i];
hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
- mac = mlx4_en_mac_to_u64(entry->mac);
+ mac = mlx4_mac_to_u64(entry->mac);
en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n",
entry->mac);
mlx4_en_uc_steer_release(priv, entry->mac,
@@ -715,14 +715,14 @@ static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
struct mlx4_en_dev *mdev = priv->mdev;
struct mlx4_dev *dev = mdev->dev;
int err = 0;
- u64 new_mac_u64 = mlx4_en_mac_to_u64(new_mac);
+ u64 new_mac_u64 = mlx4_mac_to_u64(new_mac);
if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) {
struct hlist_head *bucket;
unsigned int mac_hash;
struct mlx4_mac_entry *entry;
struct hlist_node *tmp;
- u64 prev_mac_u64 = mlx4_en_mac_to_u64(prev_mac);
+ u64 prev_mac_u64 = mlx4_mac_to_u64(prev_mac);
bucket = &priv->mac_hash[prev_mac[MLX4_EN_MAC_HASH_IDX]];
hlist_for_each_entry_safe(entry, tmp, bucket, hlist) {
@@ -751,18 +751,6 @@ static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
return __mlx4_replace_mac(dev, priv->port, qpn, new_mac_u64);
}
-u64 mlx4_en_mac_to_u64(u8 *addr)
-{
- u64 mac = 0;
- int i;
-
- for (i = 0; i < ETH_ALEN; i++) {
- mac <<= 8;
- mac |= addr[i];
- }
- return mac;
-}
-
static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv)
{
int err = 0;
@@ -1081,7 +1069,7 @@ static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
mlx4_en_cache_mclist(dev);
netif_addr_unlock_bh(dev);
list_for_each_entry(mclist, &priv->mc_list, list) {
- mcast_addr = mlx4_en_mac_to_u64(mclist->addr);
+ mcast_addr = mlx4_mac_to_u64(mclist->addr);
mlx4_SET_MCAST_FLTR(mdev->dev, priv->port,
mcast_addr, 0, MLX4_MCAST_CONFIG);
}
@@ -1173,7 +1161,7 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
found = true;
if (!found) {
- mac = mlx4_en_mac_to_u64(entry->mac);
+ mac = mlx4_mac_to_u64(entry->mac);
mlx4_en_uc_steer_release(priv, entry->mac,
priv->base_qpn,
entry->reg_id);
@@ -1216,7 +1204,7 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
priv->flags |= MLX4_EN_FLAG_FORCE_PROMISC;
break;
}
- mac = mlx4_en_mac_to_u64(ha->addr);
+ mac = mlx4_mac_to_u64(ha->addr);
memcpy(entry->mac, ha->addr, ETH_ALEN);
err = mlx4_register_mac(mdev->dev, priv->port, mac);
if (err < 0) {
@@ -2206,7 +2194,7 @@ static int mlx4_en_set_vf_mac(struct net_device *dev, int queue, u8 *mac)
{
struct mlx4_en_priv *en_priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = en_priv->mdev;
- u64 mac_u64 = mlx4_en_mac_to_u64(mac);
+ u64 mac_u64 = mlx4_mac_to_u64(mac);
if (!is_valid_ether_addr(mac))
return -EINVAL;
@@ -2341,7 +2329,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
netif_set_real_num_rx_queues(dev, prof->rx_ring_num);
SET_NETDEV_DEV(dev, &mdev->dev->pdev->dev);
- dev->dev_id = port - 1;
+ dev->dev_port = port - 1;
/*
* Initialize driver private data
@@ -2407,7 +2395,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
if (mlx4_is_slave(priv->mdev->dev)) {
eth_hw_addr_random(dev);
en_warn(priv, "Assigned random MAC address %pM\n", dev->dev_addr);
- mac_u64 = mlx4_en_mac_to_u64(dev->dev_addr);
+ mac_u64 = mlx4_mac_to_u64(dev->dev_addr);
mdev->dev->caps.def_mac[priv->port] = mac_u64;
} else {
en_err(priv, "Port: %d, invalid mac burned: %pM, quiting\n",
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index dae1a1f..c2cfb05 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -148,10 +148,16 @@ int mlx4_en_DUMP_ETH_STATS(struct mlx4_en_dev *mdev, u8 port, u8 reset)
stats->tx_packets = 0;
stats->tx_bytes = 0;
priv->port_stats.tx_chksum_offload = 0;
+ priv->port_stats.queue_stopped = 0;
+ priv->port_stats.wake_queue = 0;
+
for (i = 0; i < priv->tx_ring_num; i++) {
stats->tx_packets += priv->tx_ring[i]->packets;
stats->tx_bytes += priv->tx_ring[i]->bytes;
priv->port_stats.tx_chksum_offload += priv->tx_ring[i]->tx_csum;
+ priv->port_stats.queue_stopped +=
+ priv->tx_ring[i]->queue_stopped;
+ priv->port_stats.wake_queue += priv->tx_ring[i]->wake_queue;
}
stats->rx_errors = be64_to_cpu(mlx4_en_stats->PCS) +
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 890922c..8afb72ec 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -318,6 +318,31 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
}
}
+void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev)
+{
+ int i;
+ int num_of_eqs;
+ int num_rx_rings;
+ struct mlx4_dev *dev = mdev->dev;
+
+ mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) {
+ if (!dev->caps.comp_pool)
+ num_of_eqs = max_t(int, MIN_RX_RINGS,
+ min_t(int,
+ dev->caps.num_comp_vectors,
+ DEF_RX_RINGS));
+ else
+ num_of_eqs = min_t(int, MAX_MSIX_P_PORT,
+ dev->caps.comp_pool/
+ dev->caps.num_ports) - 1;
+
+ num_rx_rings = min_t(int, num_of_eqs,
+ netif_get_num_default_rss_queues());
+ mdev->profile.prof[i].rx_ring_num =
+ rounddown_pow_of_two(num_rx_rings);
+ }
+}
+
int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring **pring,
u32 size, u16 stride, int node)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
index c11d063..03e5f6a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_selftest.c
@@ -129,8 +129,10 @@ static int mlx4_en_test_speed(struct mlx4_en_priv *priv)
if (mlx4_en_QUERY_PORT(priv->mdev, priv->port))
return -ENOMEM;
- /* The device currently only supports 10G speed */
- if (priv->port_state.link_speed != SPEED_10000)
+ /* The device supports 1G, 10G and 40G speeds */
+ if (priv->port_state.link_speed != 1000 &&
+ priv->port_state.link_speed != 10000 &&
+ priv->port_state.link_speed != 40000)
return priv->port_state.link_speed;
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 1345703..69c2fce 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -44,16 +44,6 @@
#include "mlx4_en.h"
-enum {
- MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */
- MAX_BF = 256,
-};
-
-static int inline_thold __read_mostly = MAX_INLINE;
-
-module_param_named(inline_thold, inline_thold, int, 0444);
-MODULE_PARM_DESC(inline_thold, "threshold for using inline data");
-
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring **pring, int qpn, u32 size,
u16 stride, int node, int queue_index)
@@ -75,8 +65,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
ring->size = size;
ring->size_mask = size - 1;
ring->stride = stride;
-
- inline_thold = min(inline_thold, MAX_INLINE);
+ ring->inline_thold = priv->prof->inline_thold;
tmp = size * sizeof(struct mlx4_en_tx_info);
ring->tx_info = vmalloc_node(tmp, node);
@@ -456,7 +445,7 @@ static int mlx4_en_process_tx_cq(struct net_device *dev,
*/
if (netif_tx_queue_stopped(ring->tx_queue) && txbbs_skipped > 0) {
netif_tx_wake_queue(ring->tx_queue);
- priv->port_stats.wake_queue++;
+ ring->wake_queue++;
}
return done;
}
@@ -520,7 +509,7 @@ static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
return ring->buf + index * TXBB_SIZE;
}
-static int is_inline(struct sk_buff *skb, void **pfrag)
+static int is_inline(int inline_thold, struct sk_buff *skb, void **pfrag)
{
void *ptr;
@@ -580,7 +569,7 @@ static int get_real_size(struct sk_buff *skb, struct net_device *dev,
}
} else {
*lso_header_size = 0;
- if (!is_inline(skb, NULL))
+ if (!is_inline(priv->prof->inline_thold, skb, NULL))
real_size = CTRL_SIZE + (skb_shinfo(skb)->nr_frags + 1) * DS_SIZE;
else
real_size = inline_size(skb);
@@ -596,7 +585,13 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
int spc = MLX4_INLINE_ALIGN - CTRL_SIZE - sizeof *inl;
if (skb->len <= spc) {
- inl->byte_count = cpu_to_be32(1 << 31 | skb->len);
+ if (likely(skb->len >= MIN_PKT_LEN)) {
+ inl->byte_count = cpu_to_be32(1 << 31 | skb->len);
+ } else {
+ inl->byte_count = cpu_to_be32(1 << 31 | MIN_PKT_LEN);
+ memset(((void *)(inl + 1)) + skb->len, 0,
+ MIN_PKT_LEN - skb->len);
+ }
skb_copy_from_linear_data(skb, inl + 1, skb_headlen(skb));
if (skb_shinfo(skb)->nr_frags)
memcpy(((void *)(inl + 1)) + skb_headlen(skb), fragptr,
@@ -696,7 +691,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
ring->size - HEADROOM - MAX_DESC_TXBBS)) {
/* every full Tx ring stops queue */
netif_tx_stop_queue(ring->tx_queue);
- priv->port_stats.queue_stopped++;
+ ring->queue_stopped++;
/* If queue was emptied after the if, and before the
* stop_queue - need to wake the queue, or else it will remain
@@ -709,7 +704,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
if (unlikely(((int)(ring->prod - ring->cons)) <=
ring->size - HEADROOM - MAX_DESC_TXBBS)) {
netif_tx_wake_queue(ring->tx_queue);
- priv->port_stats.wake_queue++;
+ ring->wake_queue++;
} else {
return NETDEV_TX_BUSY;
}
@@ -747,11 +742,11 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
tx_info->data_offset = (void *)data - (void *)tx_desc;
tx_info->linear = (lso_header_size < skb_headlen(skb) &&
- !is_inline(skb, NULL)) ? 1 : 0;
+ !is_inline(ring->inline_thold, skb, NULL)) ? 1 : 0;
data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1;
- if (is_inline(skb, &fragptr)) {
+ if (is_inline(ring->inline_thold, skb, &fragptr)) {
tx_info->inl = 1;
} else {
/* Map fragments */
@@ -881,7 +876,8 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
skb_tx_timestamp(skb);
if (ring->bf_enabled && desc_size <= MAX_BF && !bounce && !vlan_tx_tag_present(skb)) {
- *(__be32 *) (&tx_desc->ctrl.vlan_tag) |= cpu_to_be32(ring->doorbell_qpn);
+ tx_desc->ctrl.bf_qpn |= cpu_to_be32(ring->doorbell_qpn);
+
op_own |= htonl((bf_index & 0xffff) << 8);
/* Ensure new descirptor hits memory
* before setting ownership of this descriptor to HW */
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 91b69ff..9cdf452 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -1890,7 +1890,8 @@ void mlx4_opreq_action(struct work_struct *work)
err = EINVAL;
break;
}
- err = mlx4_cmd(dev, 0, ((u32) err | cpu_to_be32(token) << 16),
+ err = mlx4_cmd(dev, 0, ((u32) err |
+ (__force u32)cpu_to_be32(token) << 16),
1, MLX4_CMD_GET_OP_REQ, MLX4_CMD_TIME_CLASS_A,
MLX4_CMD_NATIVE);
if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index d711158..979ea43 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -41,7 +41,6 @@
#include <linux/slab.h>
#include <linux/io-mapping.h>
#include <linux/delay.h>
-#include <linux/netdevice.h>
#include <linux/kmod.h>
#include <linux/mlx4/device.h>
@@ -1974,9 +1973,8 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
struct mlx4_priv *priv = mlx4_priv(dev);
struct msix_entry *entries;
int nreq = min_t(int, dev->caps.num_ports *
- min_t(int, netif_get_num_default_rss_queues() + 1,
+ min_t(int, num_online_cpus() + 1,
MAX_MSIX_P_PORT) + MSIX_LEGACY_SZ, MAX_MSIX);
- int err;
int i;
if (msi_x) {
@@ -1990,23 +1988,13 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev)
for (i = 0; i < nreq; ++i)
entries[i].entry = i;
- retry:
- err = pci_enable_msix(dev->pdev, entries, nreq);
- if (err) {
- /* Try again if at least 2 vectors are available */
- if (err > 1) {
- mlx4_info(dev, "Requested %d vectors, "
- "but only %d MSI-X vectors available, "
- "trying again\n", nreq, err);
- nreq = err;
- goto retry;
- }
+ nreq = pci_enable_msix_range(dev->pdev, entries, 2, nreq);
+
+ if (nreq < 0) {
kfree(entries);
goto no_msi;
- }
-
- if (nreq <
- MSIX_LEGACY_SZ + dev->caps.num_ports * MIN_MSIX_P_PORT) {
+ } else if (nreq < MSIX_LEGACY_SZ +
+ dev->caps.num_ports * MIN_MSIX_P_PORT) {
/*Working in legacy mode , all EQ's shared*/
dev->caps.comp_pool = 0;
dev->caps.num_comp_vectors = nreq - 1;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index b57e8c8..69e1f36 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -187,6 +187,13 @@ enum {
#define GET_AVG_PERF_COUNTER(cnt) (0)
#endif /* MLX4_EN_PERF_STAT */
+/* Constants for TX flow */
+enum {
+ MAX_INLINE = 104, /* 128 - 16 - 4 - 4 */
+ MAX_BF = 256,
+ MIN_PKT_LEN = 17,
+};
+
/*
* Configurables
*/
@@ -267,10 +274,13 @@ struct mlx4_en_tx_ring {
unsigned long bytes;
unsigned long packets;
unsigned long tx_csum;
+ unsigned long queue_stopped;
+ unsigned long wake_queue;
struct mlx4_bf bf;
bool bf_enabled;
struct netdev_queue *tx_queue;
int hwtstamp_tx_type;
+ int inline_thold;
};
struct mlx4_en_rx_desc {
@@ -346,6 +356,7 @@ struct mlx4_en_port_profile {
u8 tx_pause;
u8 tx_ppp;
int rss_rings;
+ int inline_thold;
};
struct mlx4_en_profile {
@@ -737,7 +748,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
int cq, int user_prio);
void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring);
-
+void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev);
int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring **pring,
u32 size, u16 stride, int node);
@@ -786,7 +797,6 @@ void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv);
#define MLX4_EN_NUM_SELF_TEST 5
void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf);
-u64 mlx4_en_mac_to_u64(u8 *addr);
void mlx4_en_ptp_overflow_check(struct mlx4_en_dev *mdev);
/*
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 23b7e2d..6f7c866 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -116,7 +116,6 @@ static int mlx5_enable_msix(struct mlx5_core_dev *dev)
struct mlx5_eq_table *table = &dev->priv.eq_table;
int num_eqs = 1 << dev->caps.log_max_eq;
int nvec;
- int err;
int i;
nvec = dev->caps.num_ports * num_online_cpus() + MLX5_EQ_VEC_COMP_BASE;
@@ -131,17 +130,12 @@ static int mlx5_enable_msix(struct mlx5_core_dev *dev)
for (i = 0; i < nvec; i++)
table->msix_arr[i].entry = i;
-retry:
- table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
- err = pci_enable_msix(dev->pdev, table->msix_arr, nvec);
- if (err <= 0) {
- return err;
- } else if (err > 2) {
- nvec = err;
- goto retry;
- }
+ nvec = pci_enable_msix_range(dev->pdev, table->msix_arr,
+ MLX5_EQ_VEC_COMP_BASE, nvec);
+ if (nvec < 0)
+ return nvec;
- mlx5_core_dbg(dev, "received %d MSI vectors out of %d requested\n", err, nvec);
+ table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
return 0;
}
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 68026f7..130f6b2 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -2329,16 +2329,14 @@ static int myri10ge_request_irq(struct myri10ge_priv *mgp)
status = 0;
if (myri10ge_msi) {
if (mgp->num_slices > 1) {
- status =
- pci_enable_msix(pdev, mgp->msix_vectors,
- mgp->num_slices);
- if (status == 0) {
- mgp->msix_enabled = 1;
- } else {
+ status = pci_enable_msix_range(pdev, mgp->msix_vectors,
+ mgp->num_slices, mgp->num_slices);
+ if (status < 0) {
dev_err(&pdev->dev,
"Error %d setting up MSI-X\n", status);
return status;
}
+ mgp->msix_enabled = 1;
}
if (mgp->msix_enabled == 0) {
status = pci_enable_msi(pdev);
@@ -3895,32 +3893,34 @@ static void myri10ge_probe_slices(struct myri10ge_priv *mgp)
mgp->msix_vectors = kcalloc(mgp->num_slices, sizeof(*mgp->msix_vectors),
GFP_KERNEL);
if (mgp->msix_vectors == NULL)
- goto disable_msix;
+ goto no_msix;
for (i = 0; i < mgp->num_slices; i++) {
mgp->msix_vectors[i].entry = i;
}
while (mgp->num_slices > 1) {
- /* make sure it is a power of two */
- while (!is_power_of_2(mgp->num_slices))
- mgp->num_slices--;
+ mgp->num_slices = rounddown_pow_of_two(mgp->num_slices);
if (mgp->num_slices == 1)
- goto disable_msix;
- status = pci_enable_msix(pdev, mgp->msix_vectors,
- mgp->num_slices);
- if (status == 0) {
- pci_disable_msix(pdev);
+ goto no_msix;
+ status = pci_enable_msix_range(pdev,
+ mgp->msix_vectors,
+ mgp->num_slices,
+ mgp->num_slices);
+ if (status < 0)
+ goto no_msix;
+
+ pci_disable_msix(pdev);
+
+ if (status == mgp->num_slices) {
if (old_allocated)
kfree(old_fw);
return;
- }
- if (status > 0)
+ } else {
mgp->num_slices = status;
- else
- goto disable_msix;
+ }
}
-disable_msix:
+no_msix:
if (mgp->msix_vectors != NULL) {
kfree(mgp->msix_vectors);
mgp->msix_vectors = NULL;
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 9eeddbd..56e3a9d 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -3792,9 +3792,10 @@ static int s2io_enable_msi_x(struct s2io_nic *nic)
writeq(rx_mat, &bar0->rx_mat);
readq(&bar0->rx_mat);
- ret = pci_enable_msix(nic->pdev, nic->entries, nic->num_entries);
+ ret = pci_enable_msix_range(nic->pdev, nic->entries,
+ nic->num_entries, nic->num_entries);
/* We fail init if error or we get less vectors than min required */
- if (ret) {
+ if (ret < 0) {
DBG_PRINT(ERR_DBG, "Enabling MSI-X failed\n");
kfree(nic->entries);
swstats->mem_freed += nic->num_entries *
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index e46e869..c83cedd 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -2349,12 +2349,18 @@ start:
vdev->vxge_entries[j].entry = VXGE_ALARM_MSIX_ID;
vdev->vxge_entries[j].in_use = 0;
- ret = pci_enable_msix(vdev->pdev, vdev->entries, vdev->intr_cnt);
- if (ret > 0) {
+ ret = pci_enable_msix_range(vdev->pdev,
+ vdev->entries, 3, vdev->intr_cnt);
+ if (ret < 0) {
+ ret = -ENODEV;
+ goto enable_msix_failed;
+ } else if (ret < vdev->intr_cnt) {
+ pci_disable_msix(vdev->pdev);
+
vxge_debug_init(VXGE_ERR,
"%s: MSI-X enable failed for %d vectors, ret: %d",
VXGE_DRIVER_NAME, vdev->intr_cnt, ret);
- if ((max_config_vpath != VXGE_USE_DEFAULT) || (ret < 3)) {
+ if (max_config_vpath != VXGE_USE_DEFAULT) {
ret = -ENODEV;
goto enable_msix_failed;
}
@@ -2368,9 +2374,6 @@ start:
vxge_close_vpaths(vdev, temp);
vdev->no_of_vpath = temp;
goto start;
- } else if (ret < 0) {
- ret = -ENODEV;
- goto enable_msix_failed;
}
return 0;
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 70cf97f..bad3c05 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -3930,7 +3930,7 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
{
struct fe_priv *np = get_nvpriv(dev);
u8 __iomem *base = get_hwbase(dev);
- int ret = 1;
+ int ret;
int i;
irqreturn_t (*handler)(int foo, void *data);
@@ -3946,14 +3946,18 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
if (np->msi_flags & NV_MSI_X_CAPABLE) {
for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++)
np->msi_x_entry[i].entry = i;
- ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK));
- if (ret == 0) {
+ ret = pci_enable_msix_range(np->pci_dev,
+ np->msi_x_entry,
+ np->msi_flags & NV_MSI_X_VECTORS_MASK,
+ np->msi_flags & NV_MSI_X_VECTORS_MASK);
+ if (ret > 0) {
np->msi_flags |= NV_MSI_X_ENABLED;
if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
/* Request irq for rx handling */
sprintf(np->name_rx, "%s-rx", dev->name);
- if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector,
- nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev) != 0) {
+ ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector,
+ nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev);
+ if (ret) {
netdev_info(dev,
"request_irq failed for rx %d\n",
ret);
@@ -3963,8 +3967,9 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
}
/* Request irq for tx handling */
sprintf(np->name_tx, "%s-tx", dev->name);
- if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector,
- nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev) != 0) {
+ ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector,
+ nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev);
+ if (ret) {
netdev_info(dev,
"request_irq failed for tx %d\n",
ret);
@@ -3974,8 +3979,9 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
}
/* Request irq for link and timer handling */
sprintf(np->name_other, "%s-other", dev->name);
- if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector,
- nv_nic_irq_other, IRQF_SHARED, np->name_other, dev) != 0) {
+ ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector,
+ nv_nic_irq_other, IRQF_SHARED, np->name_other, dev);
+ if (ret) {
netdev_info(dev,
"request_irq failed for link %d\n",
ret);
@@ -3991,7 +3997,9 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
set_msix_vector_map(dev, NV_MSI_X_VECTOR_OTHER, NVREG_IRQ_OTHER);
} else {
/* Request irq for all interrupts */
- if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, handler, IRQF_SHARED, dev->name, dev) != 0) {
+ ret = request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector,
+ handler, IRQF_SHARED, dev->name, dev);
+ if (ret) {
netdev_info(dev,
"request_irq failed %d\n",
ret);
@@ -4005,13 +4013,15 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
writel(0, base + NvRegMSIXMap1);
}
netdev_info(dev, "MSI-X enabled\n");
+ return 0;
}
}
- if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
+ if (np->msi_flags & NV_MSI_CAPABLE) {
ret = pci_enable_msi(np->pci_dev);
if (ret == 0) {
np->msi_flags |= NV_MSI_ENABLED;
- if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) {
+ ret = request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev);
+ if (ret) {
netdev_info(dev, "request_irq failed %d\n",
ret);
pci_disable_msi(np->pci_dev);
@@ -4025,13 +4035,12 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
/* enable msi vector 0 */
writel(NVREG_MSI_VECTOR_0_ENABLED, base + NvRegMSIIrqMask);
netdev_info(dev, "MSI enabled\n");
+ return 0;
}
}
- if (ret != 0) {
- if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0)
- goto out_err;
- }
+ if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0)
+ goto out_err;
return 0;
out_free_tx:
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 70849de..f09c35d 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -643,8 +643,9 @@ static int netxen_setup_msi_interrupts(struct netxen_adapter *adapter,
if (adapter->msix_supported) {
netxen_init_msix_entries(adapter, num_msix);
- err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
- if (err == 0) {
+ err = pci_enable_msix_range(pdev, adapter->msix_entries,
+ num_msix, num_msix);
+ if (err > 0) {
adapter->flags |= NETXEN_NIC_MSIX_ENABLED;
netxen_set_msix_bit(pdev, 1);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index f19f81c..df9daa3 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -38,8 +38,8 @@
#define _QLCNIC_LINUX_MAJOR 5
#define _QLCNIC_LINUX_MINOR 3
-#define _QLCNIC_LINUX_SUBVERSION 55
-#define QLCNIC_LINUX_VERSIONID "5.3.55"
+#define _QLCNIC_LINUX_SUBVERSION 56
+#define QLCNIC_LINUX_VERSIONID "5.3.56"
#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -394,7 +394,7 @@ struct qlcnic_nic_intr_coalesce {
u32 timer_out;
};
-struct qlcnic_dump_template_hdr {
+struct qlcnic_83xx_dump_template_hdr {
u32 type;
u32 offset;
u32 size;
@@ -411,15 +411,42 @@ struct qlcnic_dump_template_hdr {
u32 rsvd[0];
};
+struct qlcnic_82xx_dump_template_hdr {
+ u32 type;
+ u32 offset;
+ u32 size;
+ u32 cap_mask;
+ u32 num_entries;
+ u32 version;
+ u32 timestamp;
+ u32 checksum;
+ u32 drv_cap_mask;
+ u32 sys_info[3];
+ u32 saved_state[16];
+ u32 cap_sizes[8];
+ u32 rsvd[7];
+ u32 capabilities;
+ u32 rsvd1[0];
+};
+
struct qlcnic_fw_dump {
u8 clr; /* flag to indicate if dump is cleared */
bool enable; /* enable/disable dump */
u32 size; /* total size of the dump */
+ u32 cap_mask; /* Current capture mask */
void *data; /* dump data area */
- struct qlcnic_dump_template_hdr *tmpl_hdr;
+ void *tmpl_hdr;
dma_addr_t phys_addr;
void *dma_buffer;
bool use_pex_dma;
+ /* Read only elements which are common between 82xx and 83xx
+ * template header. Update these values immediately after we read
+ * template header from Firmware
+ */
+ u32 tmpl_hdr_size;
+ u32 version;
+ u32 num_entries;
+ u32 offset;
};
/*
@@ -1769,6 +1796,12 @@ struct qlcnic_hardware_ops {
struct qlcnic_host_tx_ring *);
void (*disable_tx_intr) (struct qlcnic_adapter *,
struct qlcnic_host_tx_ring *);
+ u32 (*get_saved_state)(void *, u32);
+ void (*set_saved_state)(void *, u32, u32);
+ void (*cache_tmpl_hdr_values)(struct qlcnic_fw_dump *);
+ u32 (*get_cap_size)(void *, int);
+ void (*set_sys_info)(void *, int, u32);
+ void (*store_cap_mask)(void *, u32);
};
extern struct qlcnic_nic_template qlcnic_vf_ops;
@@ -2007,6 +2040,42 @@ static inline void qlcnic_read_phys_port_id(struct qlcnic_adapter *adapter)
adapter->ahw->hw_ops->read_phys_port_id(adapter);
}
+static inline u32 qlcnic_get_saved_state(struct qlcnic_adapter *adapter,
+ void *t_hdr, u32 index)
+{
+ return adapter->ahw->hw_ops->get_saved_state(t_hdr, index);
+}
+
+static inline void qlcnic_set_saved_state(struct qlcnic_adapter *adapter,
+ void *t_hdr, u32 index, u32 value)
+{
+ adapter->ahw->hw_ops->set_saved_state(t_hdr, index, value);
+}
+
+static inline void qlcnic_cache_tmpl_hdr_values(struct qlcnic_adapter *adapter,
+ struct qlcnic_fw_dump *fw_dump)
+{
+ adapter->ahw->hw_ops->cache_tmpl_hdr_values(fw_dump);
+}
+
+static inline u32 qlcnic_get_cap_size(struct qlcnic_adapter *adapter,
+ void *tmpl_hdr, int index)
+{
+ return adapter->ahw->hw_ops->get_cap_size(tmpl_hdr, index);
+}
+
+static inline void qlcnic_set_sys_info(struct qlcnic_adapter *adapter,
+ void *tmpl_hdr, int idx, u32 value)
+{
+ adapter->ahw->hw_ops->set_sys_info(tmpl_hdr, idx, value);
+}
+
+static inline void qlcnic_store_cap_mask(struct qlcnic_adapter *adapter,
+ void *tmpl_hdr, u32 mask)
+{
+ adapter->ahw->hw_ops->store_cap_mask(tmpl_hdr, mask);
+}
+
static inline void qlcnic_dev_request_reset(struct qlcnic_adapter *adapter,
u32 key)
{
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 27c4f13..3b83fbd 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -203,7 +203,12 @@ static struct qlcnic_hardware_ops qlcnic_83xx_hw_ops = {
.disable_sds_intr = qlcnic_83xx_disable_sds_intr,
.enable_tx_intr = qlcnic_83xx_enable_tx_intr,
.disable_tx_intr = qlcnic_83xx_disable_tx_intr,
-
+ .get_saved_state = qlcnic_83xx_get_saved_state,
+ .set_saved_state = qlcnic_83xx_set_saved_state,
+ .cache_tmpl_hdr_values = qlcnic_83xx_cache_tmpl_hdr_values,
+ .get_cap_size = qlcnic_83xx_get_cap_size,
+ .set_sys_info = qlcnic_83xx_set_sys_info,
+ .store_cap_mask = qlcnic_83xx_store_cap_mask,
};
static struct qlcnic_nic_template qlcnic_83xx_ops = {
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index f92485c..81c1889 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -308,6 +308,8 @@ struct qlc_83xx_reset {
#define QLC_83XX_IDC_FLASH_PARAM_ADDR 0x3e8020
struct qlcnic_adapter;
+struct qlcnic_fw_dump;
+
struct qlc_83xx_idc {
int (*state_entry) (struct qlcnic_adapter *);
u64 sec_counter;
@@ -650,4 +652,10 @@ int qlcnic_83xx_check_vnic_state(struct qlcnic_adapter *);
void qlcnic_83xx_aer_stop_poll_work(struct qlcnic_adapter *);
int qlcnic_83xx_aer_reset(struct qlcnic_adapter *);
void qlcnic_83xx_aer_start_poll_work(struct qlcnic_adapter *);
+u32 qlcnic_83xx_get_saved_state(void *, u32);
+void qlcnic_83xx_set_saved_state(void *, u32, u32);
+void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *);
+u32 qlcnic_83xx_get_cap_size(void *, int);
+void qlcnic_83xx_set_sys_info(void *, int, u32);
+void qlcnic_83xx_store_cap_mask(void *, u32);
#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index acee1a5..1960609 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -1639,14 +1639,14 @@ qlcnic_get_dump_flag(struct net_device *netdev, struct ethtool_dump *dump)
}
if (fw_dump->clr)
- dump->len = fw_dump->tmpl_hdr->size + fw_dump->size;
+ dump->len = fw_dump->tmpl_hdr_size + fw_dump->size;
else
dump->len = 0;
if (!qlcnic_check_fw_dump_state(adapter))
dump->flag = ETH_FW_DUMP_DISABLE;
else
- dump->flag = fw_dump->tmpl_hdr->drv_cap_mask;
+ dump->flag = fw_dump->cap_mask;
dump->version = adapter->fw_version;
return 0;
@@ -1671,9 +1671,10 @@ qlcnic_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
netdev_info(netdev, "Dump not available\n");
return -EINVAL;
}
+
/* Copy template header first */
- copy_sz = fw_dump->tmpl_hdr->size;
- hdr_ptr = (u32 *) fw_dump->tmpl_hdr;
+ copy_sz = fw_dump->tmpl_hdr_size;
+ hdr_ptr = (u32 *)fw_dump->tmpl_hdr;
data = buffer;
for (i = 0; i < copy_sz/sizeof(u32); i++)
*data++ = cpu_to_le32(*hdr_ptr++);
@@ -1681,7 +1682,7 @@ qlcnic_get_dump_data(struct net_device *netdev, struct ethtool_dump *dump,
/* Copy captured dump data */
memcpy(buffer + copy_sz, fw_dump->data, fw_dump->size);
dump->len = copy_sz + fw_dump->size;
- dump->flag = fw_dump->tmpl_hdr->drv_cap_mask;
+ dump->flag = fw_dump->cap_mask;
/* Free dump area once data has been captured */
vfree(fw_dump->data);
@@ -1703,7 +1704,11 @@ static int qlcnic_set_dump_mask(struct qlcnic_adapter *adapter, u32 mask)
return -EOPNOTSUPP;
}
- fw_dump->tmpl_hdr->drv_cap_mask = mask;
+ fw_dump->cap_mask = mask;
+
+ /* Store new capture mask in template header as well*/
+ qlcnic_store_cap_mask(adapter, fw_dump->tmpl_hdr, mask);
+
netdev_info(netdev, "Driver mask changed to: 0x%x\n", mask);
return 0;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
index 03d18a0..9f3adf4 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.c
@@ -317,9 +317,7 @@ static void qlcnic_write_window_reg(u32 addr, void __iomem *bar0, u32 data)
int
qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
{
- int timeout = 0;
- int err = 0;
- u32 done = 0;
+ int timeout = 0, err = 0, done = 0;
while (!done) {
done = QLCRD32(adapter, QLCNIC_PCIE_REG(PCIE_SEM_LOCK(sem)),
@@ -327,10 +325,20 @@ qlcnic_pcie_sem_lock(struct qlcnic_adapter *adapter, int sem, u32 id_reg)
if (done == 1)
break;
if (++timeout >= QLCNIC_PCIE_SEM_TIMEOUT) {
- dev_err(&adapter->pdev->dev,
- "Failed to acquire sem=%d lock; holdby=%d\n",
- sem,
- id_reg ? QLCRD32(adapter, id_reg, &err) : -1);
+ if (id_reg) {
+ done = QLCRD32(adapter, id_reg, &err);
+ if (done != -1)
+ dev_err(&adapter->pdev->dev,
+ "Failed to acquire sem=%d lock held by=%d\n",
+ sem, done);
+ else
+ dev_err(&adapter->pdev->dev,
+ "Failed to acquire sem=%d lock",
+ sem);
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "Failed to acquire sem=%d lock", sem);
+ }
return -EIO;
}
msleep(1);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
index 63d7561..576b301 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
@@ -161,6 +161,7 @@ struct qlcnic_host_sds_ring;
struct qlcnic_host_tx_ring;
struct qlcnic_hardware_context;
struct qlcnic_adapter;
+struct qlcnic_fw_dump;
int qlcnic_82xx_hw_read_wx_2M(struct qlcnic_adapter *adapter, ulong, int *);
int qlcnic_82xx_hw_write_wx_2M(struct qlcnic_adapter *, ulong, u32);
@@ -213,4 +214,11 @@ int qlcnic_82xx_shutdown(struct pci_dev *);
int qlcnic_82xx_resume(struct qlcnic_adapter *);
void qlcnic_clr_all_drv_state(struct qlcnic_adapter *adapter, u8 failed);
void qlcnic_fw_poll_work(struct work_struct *work);
+
+u32 qlcnic_82xx_get_saved_state(void *, u32);
+void qlcnic_82xx_set_saved_state(void *, u32, u32);
+void qlcnic_82xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *);
+u32 qlcnic_82xx_get_cap_size(void *, int);
+void qlcnic_82xx_set_sys_info(void *, int, u32);
+void qlcnic_82xx_store_cap_mask(void *, u32);
#endif /* __QLCNIC_HW_H_ */
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 1222865..4b92d9d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -90,7 +90,6 @@ static void qlcnic_82xx_io_resume(struct pci_dev *);
static void qlcnic_82xx_set_mac_filter_count(struct qlcnic_adapter *);
static pci_ers_result_t qlcnic_82xx_io_error_detected(struct pci_dev *,
pci_channel_state_t);
-
static u32 qlcnic_vlan_tx_check(struct qlcnic_adapter *adapter)
{
struct qlcnic_hardware_context *ahw = adapter->ahw;
@@ -561,6 +560,12 @@ static struct qlcnic_hardware_ops qlcnic_hw_ops = {
.disable_sds_intr = qlcnic_82xx_disable_sds_intr,
.enable_tx_intr = qlcnic_82xx_enable_tx_intr,
.disable_tx_intr = qlcnic_82xx_disable_tx_intr,
+ .get_saved_state = qlcnic_82xx_get_saved_state,
+ .set_saved_state = qlcnic_82xx_set_saved_state,
+ .cache_tmpl_hdr_values = qlcnic_82xx_cache_tmpl_hdr_values,
+ .get_cap_size = qlcnic_82xx_get_cap_size,
+ .set_sys_info = qlcnic_82xx_set_sys_info,
+ .store_cap_mask = qlcnic_82xx_store_cap_mask,
};
static int qlcnic_check_multi_tx_capability(struct qlcnic_adapter *adapter)
@@ -684,7 +689,7 @@ restore:
int qlcnic_enable_msix(struct qlcnic_adapter *adapter, u32 num_msix)
{
struct pci_dev *pdev = adapter->pdev;
- int err = -1, vector;
+ int err, vector;
if (!adapter->msix_entries) {
adapter->msix_entries = kcalloc(num_msix,
@@ -701,13 +706,17 @@ enable_msix:
for (vector = 0; vector < num_msix; vector++)
adapter->msix_entries[vector].entry = vector;
- err = pci_enable_msix(pdev, adapter->msix_entries, num_msix);
- if (err == 0) {
+ err = pci_enable_msix_range(pdev,
+ adapter->msix_entries, 1, num_msix);
+
+ if (err == num_msix) {
adapter->flags |= QLCNIC_MSIX_ENABLED;
adapter->ahw->num_msix = num_msix;
dev_info(&pdev->dev, "using msi-x interrupts\n");
- return err;
+ return 0;
} else if (err > 0) {
+ pci_disable_msix(pdev);
+
dev_info(&pdev->dev,
"Unable to allocate %d MSI-X vectors, Available vectors %d\n",
num_msix, err);
@@ -715,12 +724,12 @@ enable_msix:
if (qlcnic_82xx_check(adapter)) {
num_msix = rounddown_pow_of_two(err);
if (err < QLCNIC_82XX_MINIMUM_VECTOR)
- return -EIO;
+ return -ENOSPC;
} else {
num_msix = rounddown_pow_of_two(err - 1);
num_msix += 1;
if (err < QLCNIC_83XX_MINIMUM_VECTOR)
- return -EIO;
+ return -ENOSPC;
}
if (qlcnic_82xx_check(adapter) &&
@@ -747,7 +756,7 @@ enable_msix:
}
}
- return err;
+ return -EIO;
}
static int qlcnic_82xx_calculate_msix_vector(struct qlcnic_adapter *adapter)
@@ -2442,8 +2451,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err) {
switch (err) {
case -ENOTRECOVERABLE:
- dev_err(&pdev->dev, "Adapter initialization failed due to a faulty hardware. Please reboot\n");
- dev_err(&pdev->dev, "If reboot doesn't help, please replace the adapter with new one and return the faulty adapter for repair\n");
+ dev_err(&pdev->dev, "Adapter initialization failed due to a faulty hardware\n");
+ dev_err(&pdev->dev, "Please replace the adapter with new one and return the faulty adapter for repair\n");
goto err_out_free_hw;
case -ENOMEM:
dev_err(&pdev->dev, "Adapter initialization failed. Please reboot\n");
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
index 7763962..37b979b 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c
@@ -211,6 +211,107 @@ enum qlcnic_minidump_opcode {
QLCNIC_DUMP_RDEND = 255
};
+inline u32 qlcnic_82xx_get_saved_state(void *t_hdr, u32 index)
+{
+ struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
+
+ return hdr->saved_state[index];
+}
+
+inline void qlcnic_82xx_set_saved_state(void *t_hdr, u32 index,
+ u32 value)
+{
+ struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
+
+ hdr->saved_state[index] = value;
+}
+
+void qlcnic_82xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
+{
+ struct qlcnic_82xx_dump_template_hdr *hdr;
+
+ hdr = fw_dump->tmpl_hdr;
+ fw_dump->tmpl_hdr_size = hdr->size;
+ fw_dump->version = hdr->version;
+ fw_dump->num_entries = hdr->num_entries;
+ fw_dump->offset = hdr->offset;
+
+ hdr->drv_cap_mask = hdr->cap_mask;
+ fw_dump->cap_mask = hdr->cap_mask;
+}
+
+inline u32 qlcnic_82xx_get_cap_size(void *t_hdr, int index)
+{
+ struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
+
+ return hdr->cap_sizes[index];
+}
+
+void qlcnic_82xx_set_sys_info(void *t_hdr, int idx, u32 value)
+{
+ struct qlcnic_82xx_dump_template_hdr *hdr = t_hdr;
+
+ hdr->sys_info[idx] = value;
+}
+
+void qlcnic_82xx_store_cap_mask(void *tmpl_hdr, u32 mask)
+{
+ struct qlcnic_82xx_dump_template_hdr *hdr = tmpl_hdr;
+
+ hdr->drv_cap_mask = mask;
+}
+
+inline u32 qlcnic_83xx_get_saved_state(void *t_hdr, u32 index)
+{
+ struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
+
+ return hdr->saved_state[index];
+}
+
+inline void qlcnic_83xx_set_saved_state(void *t_hdr, u32 index,
+ u32 value)
+{
+ struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
+
+ hdr->saved_state[index] = value;
+}
+
+void qlcnic_83xx_cache_tmpl_hdr_values(struct qlcnic_fw_dump *fw_dump)
+{
+ struct qlcnic_83xx_dump_template_hdr *hdr;
+
+ hdr = fw_dump->tmpl_hdr;
+ fw_dump->tmpl_hdr_size = hdr->size;
+ fw_dump->version = hdr->version;
+ fw_dump->num_entries = hdr->num_entries;
+ fw_dump->offset = hdr->offset;
+
+ hdr->drv_cap_mask = hdr->cap_mask;
+ fw_dump->cap_mask = hdr->cap_mask;
+}
+
+inline u32 qlcnic_83xx_get_cap_size(void *t_hdr, int index)
+{
+ struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
+
+ return hdr->cap_sizes[index];
+}
+
+void qlcnic_83xx_set_sys_info(void *t_hdr, int idx, u32 value)
+{
+ struct qlcnic_83xx_dump_template_hdr *hdr = t_hdr;
+
+ hdr->sys_info[idx] = value;
+}
+
+void qlcnic_83xx_store_cap_mask(void *tmpl_hdr, u32 mask)
+{
+ struct qlcnic_83xx_dump_template_hdr *hdr;
+
+ hdr = tmpl_hdr;
+ hdr->drv_cap_mask = mask;
+}
+
struct qlcnic_dump_operations {
enum qlcnic_minidump_opcode opcode;
u32 (*handler)(struct qlcnic_adapter *, struct qlcnic_dump_entry *,
@@ -238,11 +339,11 @@ static u32 qlcnic_dump_crb(struct qlcnic_adapter *adapter,
static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
struct qlcnic_dump_entry *entry, __le32 *buffer)
{
+ void *hdr = adapter->ahw->fw_dump.tmpl_hdr;
+ struct __ctrl *ctr = &entry->region.ctrl;
int i, k, timeout = 0;
- u32 addr, data;
+ u32 addr, data, temp;
u8 no_ops;
- struct __ctrl *ctr = &entry->region.ctrl;
- struct qlcnic_dump_template_hdr *t_hdr = adapter->ahw->fw_dump.tmpl_hdr;
addr = ctr->addr;
no_ops = ctr->no_ops;
@@ -285,29 +386,42 @@ static u32 qlcnic_dump_ctrl(struct qlcnic_adapter *adapter,
}
break;
case QLCNIC_DUMP_RD_SAVE:
- if (ctr->index_a)
- addr = t_hdr->saved_state[ctr->index_a];
+ temp = ctr->index_a;
+ if (temp)
+ addr = qlcnic_get_saved_state(adapter,
+ hdr,
+ temp);
data = qlcnic_ind_rd(adapter, addr);
- t_hdr->saved_state[ctr->index_v] = data;
+ qlcnic_set_saved_state(adapter, hdr,
+ ctr->index_v, data);
break;
case QLCNIC_DUMP_WRT_SAVED:
- if (ctr->index_v)
- data = t_hdr->saved_state[ctr->index_v];
+ temp = ctr->index_v;
+ if (temp)
+ data = qlcnic_get_saved_state(adapter,
+ hdr,
+ temp);
else
data = ctr->val1;
- if (ctr->index_a)
- addr = t_hdr->saved_state[ctr->index_a];
+
+ temp = ctr->index_a;
+ if (temp)
+ addr = qlcnic_get_saved_state(adapter,
+ hdr,
+ temp);
qlcnic_ind_wr(adapter, addr, data);
break;
case QLCNIC_DUMP_MOD_SAVE_ST:
- data = t_hdr->saved_state[ctr->index_v];
+ data = qlcnic_get_saved_state(adapter, hdr,
+ ctr->index_v);
data <<= ctr->shl_val;
data >>= ctr->shr_val;
if (ctr->val2)
data &= ctr->val2;
data |= ctr->val3;
data += ctr->val1;
- t_hdr->saved_state[ctr->index_v] = data;
+ qlcnic_set_saved_state(adapter, hdr,
+ ctr->index_v, data);
break;
default:
dev_info(&adapter->pdev->dev,
@@ -544,7 +658,7 @@ out:
static int qlcnic_start_pex_dma(struct qlcnic_adapter *adapter,
struct __mem *mem)
{
- struct qlcnic_dump_template_hdr *tmpl_hdr;
+ struct qlcnic_83xx_dump_template_hdr *tmpl_hdr;
struct device *dev = &adapter->pdev->dev;
u32 dma_no, dma_base_addr, temp_addr;
int i, ret, dma_sts;
@@ -596,7 +710,7 @@ static u32 qlcnic_read_memory_pexdma(struct qlcnic_adapter *adapter,
struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
u32 temp, dma_base_addr, size = 0, read_size = 0;
struct qlcnic_pex_dma_descriptor *dma_descr;
- struct qlcnic_dump_template_hdr *tmpl_hdr;
+ struct qlcnic_83xx_dump_template_hdr *tmpl_hdr;
struct device *dev = &adapter->pdev->dev;
dma_addr_t dma_phys_addr;
void *dma_buffer;
@@ -938,8 +1052,8 @@ static int
qlcnic_fw_flash_get_minidump_temp_size(struct qlcnic_adapter *adapter,
struct qlcnic_cmd_args *cmd)
{
- struct qlcnic_dump_template_hdr tmp_hdr;
- u32 size = sizeof(struct qlcnic_dump_template_hdr) / sizeof(u32);
+ struct qlcnic_83xx_dump_template_hdr tmp_hdr;
+ u32 size = sizeof(tmp_hdr) / sizeof(u32);
int ret = 0;
if (qlcnic_82xx_check(adapter))
@@ -1027,17 +1141,19 @@ free_mem:
return err;
}
+#define QLCNIC_TEMPLATE_VERSION (0x20001)
+
int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
{
- int err;
- u32 temp_size = 0;
- u32 version, csum, *tmp_buf;
struct qlcnic_hardware_context *ahw;
- struct qlcnic_dump_template_hdr *tmpl_hdr;
+ struct qlcnic_fw_dump *fw_dump;
+ u32 version, csum, *tmp_buf;
u8 use_flash_temp = 0;
+ u32 temp_size = 0;
+ int err;
ahw = adapter->ahw;
-
+ fw_dump = &ahw->fw_dump;
err = qlcnic_fw_get_minidump_temp_size(adapter, &version, &temp_size,
&use_flash_temp);
if (err) {
@@ -1046,11 +1162,11 @@ int qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter)
return -EIO;
}
- ahw->fw_dump.tmpl_hdr = vzalloc(temp_size);
- if (!ahw->fw_dump.tmpl_hdr)
+ fw_dump->tmpl_hdr = vzalloc(temp_size);
+ if (!fw_dump->tmpl_hdr)
return -ENOMEM;
- tmp_buf = (u32 *)ahw->fw_dump.tmpl_hdr;
+ tmp_buf = (u32 *)fw_dump->tmpl_hdr;
if (use_flash_temp)
goto flash_temp;
@@ -1065,8 +1181,8 @@ flash_temp:
dev_err(&adapter->pdev->dev,
"Failed to get minidump template header %d\n",
err);
- vfree(ahw->fw_dump.tmpl_hdr);
- ahw->fw_dump.tmpl_hdr = NULL;
+ vfree(fw_dump->tmpl_hdr);
+ fw_dump->tmpl_hdr = NULL;
return -EIO;
}
}
@@ -1076,21 +1192,22 @@ flash_temp:
if (csum) {
dev_err(&adapter->pdev->dev,
"Template header checksum validation failed\n");
- vfree(ahw->fw_dump.tmpl_hdr);
- ahw->fw_dump.tmpl_hdr = NULL;
+ vfree(fw_dump->tmpl_hdr);
+ fw_dump->tmpl_hdr = NULL;
return -EIO;
}
- tmpl_hdr = ahw->fw_dump.tmpl_hdr;
- tmpl_hdr->drv_cap_mask = tmpl_hdr->cap_mask;
+ qlcnic_cache_tmpl_hdr_values(adapter, fw_dump);
+
dev_info(&adapter->pdev->dev,
"Default minidump capture mask 0x%x\n",
- tmpl_hdr->cap_mask);
+ fw_dump->cap_mask);
- if ((tmpl_hdr->version & 0xfffff) >= 0x20001)
- ahw->fw_dump.use_pex_dma = true;
+ if (qlcnic_83xx_check(adapter) &&
+ (fw_dump->version & 0xfffff) >= QLCNIC_TEMPLATE_VERSION)
+ fw_dump->use_pex_dma = true;
else
- ahw->fw_dump.use_pex_dma = false;
+ fw_dump->use_pex_dma = false;
qlcnic_enable_fw_dump_state(adapter);
@@ -1099,21 +1216,22 @@ flash_temp:
int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
{
- __le32 *buffer;
- u32 ocm_window;
- char mesg[64];
- char *msg[] = {mesg, NULL};
- int i, k, ops_cnt, ops_index, dump_size = 0;
- u32 entry_offset, dump, no_entries, buf_offset = 0;
- struct qlcnic_dump_entry *entry;
struct qlcnic_fw_dump *fw_dump = &adapter->ahw->fw_dump;
- struct qlcnic_dump_template_hdr *tmpl_hdr = fw_dump->tmpl_hdr;
static const struct qlcnic_dump_operations *fw_dump_ops;
+ struct qlcnic_83xx_dump_template_hdr *hdr_83xx;
+ u32 entry_offset, dump, no_entries, buf_offset = 0;
+ int i, k, ops_cnt, ops_index, dump_size = 0;
struct device *dev = &adapter->pdev->dev;
struct qlcnic_hardware_context *ahw;
- void *temp_buffer;
+ struct qlcnic_dump_entry *entry;
+ void *temp_buffer, *tmpl_hdr;
+ u32 ocm_window;
+ __le32 *buffer;
+ char mesg[64];
+ char *msg[] = {mesg, NULL};
ahw = adapter->ahw;
+ tmpl_hdr = fw_dump->tmpl_hdr;
/* Return if we don't have firmware dump template header */
if (!tmpl_hdr)
@@ -1133,8 +1251,9 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
netif_info(adapter->ahw, drv, adapter->netdev, "Take FW dump\n");
/* Calculate the size for dump data area only */
for (i = 2, k = 1; (i & QLCNIC_DUMP_MASK_MAX); i <<= 1, k++)
- if (i & tmpl_hdr->drv_cap_mask)
- dump_size += tmpl_hdr->cap_sizes[k];
+ if (i & fw_dump->cap_mask)
+ dump_size += qlcnic_get_cap_size(adapter, tmpl_hdr, k);
+
if (!dump_size)
return -EIO;
@@ -1144,10 +1263,10 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
buffer = fw_dump->data;
fw_dump->size = dump_size;
- no_entries = tmpl_hdr->num_entries;
- entry_offset = tmpl_hdr->offset;
- tmpl_hdr->sys_info[0] = QLCNIC_DRIVER_VERSION;
- tmpl_hdr->sys_info[1] = adapter->fw_version;
+ no_entries = fw_dump->num_entries;
+ entry_offset = fw_dump->offset;
+ qlcnic_set_sys_info(adapter, tmpl_hdr, 0, QLCNIC_DRIVER_VERSION);
+ qlcnic_set_sys_info(adapter, tmpl_hdr, 1, adapter->fw_version);
if (fw_dump->use_pex_dma) {
temp_buffer = dma_alloc_coherent(dev, QLC_PEX_DMA_READ_SIZE,
@@ -1163,16 +1282,17 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
ops_cnt = ARRAY_SIZE(qlcnic_fw_dump_ops);
fw_dump_ops = qlcnic_fw_dump_ops;
} else {
+ hdr_83xx = tmpl_hdr;
ops_cnt = ARRAY_SIZE(qlcnic_83xx_fw_dump_ops);
fw_dump_ops = qlcnic_83xx_fw_dump_ops;
- ocm_window = tmpl_hdr->ocm_wnd_reg[adapter->ahw->pci_func];
- tmpl_hdr->saved_state[QLC_83XX_OCM_INDEX] = ocm_window;
- tmpl_hdr->saved_state[QLC_83XX_PCI_INDEX] = ahw->pci_func;
+ ocm_window = hdr_83xx->ocm_wnd_reg[ahw->pci_func];
+ hdr_83xx->saved_state[QLC_83XX_OCM_INDEX] = ocm_window;
+ hdr_83xx->saved_state[QLC_83XX_PCI_INDEX] = ahw->pci_func;
}
for (i = 0; i < no_entries; i++) {
- entry = (void *)tmpl_hdr + entry_offset;
- if (!(entry->hdr.mask & tmpl_hdr->drv_cap_mask)) {
+ entry = tmpl_hdr + entry_offset;
+ if (!(entry->hdr.mask & fw_dump->cap_mask)) {
entry->hdr.flags |= QLCNIC_DUMP_SKIP;
entry_offset += entry->hdr.offset;
continue;
@@ -1209,8 +1329,9 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter)
fw_dump->clr = 1;
snprintf(mesg, sizeof(mesg), "FW_DUMP=%s", adapter->netdev->name);
- dev_info(dev, "%s: Dump data %d bytes captured, template header size %d bytes\n",
- adapter->netdev->name, fw_dump->size, tmpl_hdr->size);
+ netdev_info(adapter->netdev,
+ "Dump data %d bytes captured, template header size %d bytes\n",
+ fw_dump->size, fw_dump->tmpl_hdr_size);
/* Send a udev event to notify availability of FW dump */
kobject_uevent_env(&dev->kobj, KOBJ_CHANGE, msg);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index e5277a6..14f748c 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -15,6 +15,7 @@
#define QLC_MAC_OPCODE_MASK 0x7
#define QLC_VF_FLOOD_BIT BIT_16
#define QLC_FLOOD_MODE 0x5
+#define QLC_SRIOV_ALLOW_VLAN0 BIT_19
static int qlcnic_sriov_pf_get_vport_handle(struct qlcnic_adapter *, u8);
@@ -335,8 +336,11 @@ static int qlcnic_sriov_pf_cfg_vlan_filtering(struct qlcnic_adapter *adapter,
return err;
cmd.req.arg[1] = 0x4;
- if (enable)
+ if (enable) {
cmd.req.arg[1] |= BIT_16;
+ if (qlcnic_84xx_check(adapter))
+ cmd.req.arg[1] |= QLC_SRIOV_ALLOW_VLAN0;
+ }
err = qlcnic_issue_cmd(adapter, &cmd);
if (err)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 3d64113..448d156 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -350,33 +350,15 @@ static ssize_t qlcnic_sysfs_write_mem(struct file *filp, struct kobject *kobj,
return size;
}
-static u32 qlcnic_get_pci_func_count(struct qlcnic_adapter *adapter)
-{
- struct qlcnic_hardware_context *ahw = adapter->ahw;
- u32 count = 0;
-
- if (!(adapter->flags & QLCNIC_ESWITCH_ENABLED))
- return ahw->total_nic_func;
-
- if (ahw->total_pci_func <= QLC_DEFAULT_VNIC_COUNT)
- count = QLC_DEFAULT_VNIC_COUNT;
- else
- count = ahw->max_vnic_func;
-
- return count;
-}
-
int qlcnic_is_valid_nic_func(struct qlcnic_adapter *adapter, u8 pci_func)
{
- u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
int i;
- for (i = 0; i < pci_func_count; i++) {
+ for (i = 0; i < adapter->ahw->max_vnic_func; i++) {
if (adapter->npars[i].pci_func == pci_func)
return i;
}
-
- return -1;
+ return -EINVAL;
}
static int validate_pm_config(struct qlcnic_adapter *adapter,
@@ -464,23 +446,21 @@ static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
struct qlcnic_pm_func_cfg *pm_cfg;
- int i, pm_cfg_size;
u8 pci_func;
+ u32 count;
+ int i;
- pm_cfg_size = pci_func_count * sizeof(*pm_cfg);
- if (size != pm_cfg_size)
- return QL_STATUS_INVALID_PARAM;
-
- memset(buf, 0, pm_cfg_size);
+ memset(buf, 0, size);
pm_cfg = (struct qlcnic_pm_func_cfg *)buf;
-
- for (i = 0; i < pci_func_count; i++) {
+ count = size / sizeof(struct qlcnic_pm_func_cfg);
+ for (i = 0; i < adapter->ahw->total_nic_func; i++) {
pci_func = adapter->npars[i].pci_func;
- if (!adapter->npars[i].active)
+ if (pci_func >= count) {
+ dev_dbg(dev, "%s: Total nic functions[%d], App sent function count[%d]\n",
+ __func__, adapter->ahw->total_nic_func, count);
continue;
-
+ }
if (!adapter->npars[i].eswitch_status)
continue;
@@ -494,7 +474,6 @@ static ssize_t qlcnic_sysfs_read_pm_config(struct file *filp,
static int validate_esw_config(struct qlcnic_adapter *adapter,
struct qlcnic_esw_func_cfg *esw_cfg, int count)
{
- u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
struct qlcnic_hardware_context *ahw = adapter->ahw;
int i, ret;
u32 op_mode;
@@ -507,7 +486,7 @@ static int validate_esw_config(struct qlcnic_adapter *adapter,
for (i = 0; i < count; i++) {
pci_func = esw_cfg[i].pci_func;
- if (pci_func >= pci_func_count)
+ if (pci_func >= ahw->max_vnic_func)
return QL_STATUS_INVALID_PARAM;
if (adapter->ahw->op_mode == QLCNIC_MGMT_FUNC)
@@ -642,23 +621,21 @@ static ssize_t qlcnic_sysfs_read_esw_config(struct file *file,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
struct qlcnic_esw_func_cfg *esw_cfg;
- size_t esw_cfg_size;
- u8 i, pci_func;
-
- esw_cfg_size = pci_func_count * sizeof(*esw_cfg);
- if (size != esw_cfg_size)
- return QL_STATUS_INVALID_PARAM;
+ u8 pci_func;
+ u32 count;
+ int i;
- memset(buf, 0, esw_cfg_size);
+ memset(buf, 0, size);
esw_cfg = (struct qlcnic_esw_func_cfg *)buf;
-
- for (i = 0; i < pci_func_count; i++) {
+ count = size / sizeof(struct qlcnic_esw_func_cfg);
+ for (i = 0; i < adapter->ahw->total_nic_func; i++) {
pci_func = adapter->npars[i].pci_func;
- if (!adapter->npars[i].active)
+ if (pci_func >= count) {
+ dev_dbg(dev, "%s: Total nic functions[%d], App sent function count[%d]\n",
+ __func__, adapter->ahw->total_nic_func, count);
continue;
-
+ }
if (!adapter->npars[i].eswitch_status)
continue;
@@ -741,23 +718,24 @@ static ssize_t qlcnic_sysfs_read_npar_config(struct file *file,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
struct qlcnic_npar_func_cfg *np_cfg;
struct qlcnic_info nic_info;
- size_t np_cfg_size;
int i, ret;
-
- np_cfg_size = pci_func_count * sizeof(*np_cfg);
- if (size != np_cfg_size)
- return QL_STATUS_INVALID_PARAM;
+ u32 count;
memset(&nic_info, 0, sizeof(struct qlcnic_info));
- memset(buf, 0, np_cfg_size);
+ memset(buf, 0, size);
np_cfg = (struct qlcnic_npar_func_cfg *)buf;
- for (i = 0; i < pci_func_count; i++) {
+ count = size / sizeof(struct qlcnic_npar_func_cfg);
+ for (i = 0; i < adapter->ahw->total_nic_func; i++) {
if (qlcnic_is_valid_nic_func(adapter, i) < 0)
continue;
+ if (adapter->npars[i].pci_func >= count) {
+ dev_dbg(dev, "%s: Total nic functions[%d], App sent function count[%d]\n",
+ __func__, adapter->ahw->total_nic_func, count);
+ continue;
+ }
ret = qlcnic_get_nic_info(adapter, &nic_info, i);
if (ret)
return ret;
@@ -783,7 +761,6 @@ static ssize_t qlcnic_sysfs_get_port_stats(struct file *file,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
struct qlcnic_esw_statistics port_stats;
int ret;
@@ -793,7 +770,7 @@ static ssize_t qlcnic_sysfs_get_port_stats(struct file *file,
if (size != sizeof(struct qlcnic_esw_statistics))
return QL_STATUS_INVALID_PARAM;
- if (offset >= pci_func_count)
+ if (offset >= adapter->ahw->max_vnic_func)
return QL_STATUS_INVALID_PARAM;
memset(&port_stats, 0, size);
@@ -884,13 +861,12 @@ static ssize_t qlcnic_sysfs_clear_port_stats(struct file *file,
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
int ret;
if (qlcnic_83xx_check(adapter))
return QLC_STATUS_UNSUPPORTED_CMD;
- if (offset >= pci_func_count)
+ if (offset >= adapter->ahw->max_vnic_func)
return QL_STATUS_INVALID_PARAM;
ret = qlcnic_clear_esw_stats(adapter, QLCNIC_STATS_PORT, offset,
@@ -914,17 +890,12 @@ static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
{
struct device *dev = container_of(kobj, struct device, kobj);
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
- u32 pci_func_count = qlcnic_get_pci_func_count(adapter);
struct qlcnic_pci_func_cfg *pci_cfg;
struct qlcnic_pci_info *pci_info;
- size_t pci_cfg_sz;
int i, ret;
+ u32 count;
- pci_cfg_sz = pci_func_count * sizeof(*pci_cfg);
- if (size != pci_cfg_sz)
- return QL_STATUS_INVALID_PARAM;
-
- pci_info = kcalloc(pci_func_count, sizeof(*pci_info), GFP_KERNEL);
+ pci_info = kcalloc(size, sizeof(*pci_info), GFP_KERNEL);
if (!pci_info)
return -ENOMEM;
@@ -935,7 +906,8 @@ static ssize_t qlcnic_sysfs_read_pci_config(struct file *file,
}
pci_cfg = (struct qlcnic_pci_func_cfg *)buf;
- for (i = 0; i < pci_func_count; i++) {
+ count = size / sizeof(struct qlcnic_pci_func_cfg);
+ for (i = 0; i < count; i++) {
pci_cfg[i].pci_func = pci_info[i].id;
pci_cfg[i].func_type = pci_info[i].type;
pci_cfg[i].func_state = 0;
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index ce2cfdd..adf87d2 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -3331,24 +3331,16 @@ static void ql_enable_msix(struct ql_adapter *qdev)
for (i = 0; i < qdev->intr_count; i++)
qdev->msi_x_entry[i].entry = i;
- /* Loop to get our vectors. We start with
- * what we want and settle for what we get.
- */
- do {
- err = pci_enable_msix(qdev->pdev,
- qdev->msi_x_entry, qdev->intr_count);
- if (err > 0)
- qdev->intr_count = err;
- } while (err > 0);
-
+ err = pci_enable_msix_range(qdev->pdev, qdev->msi_x_entry,
+ 1, qdev->intr_count);
if (err < 0) {
kfree(qdev->msi_x_entry);
qdev->msi_x_entry = NULL;
netif_warn(qdev, ifup, qdev->ndev,
"MSI-X Enable failed, trying MSI.\n");
- qdev->intr_count = 1;
qlge_irq_type = MSI_IRQ;
- } else if (err == 0) {
+ } else {
+ qdev->intr_count = err;
set_bit(QL_MSIX_ENABLED, &qdev->flags);
netif_info(qdev, ifup, qdev->ndev,
"MSI-X Enabled, got %d vectors.\n",
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 040cb94..b1e6554 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1,8 +1,8 @@
/* SuperH Ethernet device driver
*
* Copyright (C) 2006-2012 Nobuhiro Iwamatsu
- * Copyright (C) 2008-2013 Renesas Solutions Corp.
- * Copyright (C) 2013 Cogent Embedded, Inc.
+ * Copyright (C) 2008-2014 Renesas Solutions Corp.
+ * Copyright (C) 2013-2014 Cogent Embedded, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -27,6 +27,10 @@
#include <linux/platform_device.h>
#include <linux/mdio-bitbang.h>
#include <linux/netdevice.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/of_net.h>
#include <linux/phy.h>
#include <linux/cache.h>
#include <linux/io.h>
@@ -2098,8 +2102,8 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb->len + 2);
txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len,
DMA_TO_DEVICE);
- if (skb->len < ETHERSMALL)
- txdesc->buffer_length = ETHERSMALL;
+ if (skb->len < ETH_ZLEN)
+ txdesc->buffer_length = ETH_ZLEN;
else
txdesc->buffer_length = skb->len;
@@ -2710,6 +2714,54 @@ static const struct net_device_ops sh_eth_netdev_ops_tsu = {
.ndo_change_mtu = eth_change_mtu,
};
+#ifdef CONFIG_OF
+static struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ struct sh_eth_plat_data *pdata;
+ struct device_node *phy;
+ const char *mac_addr;
+
+ pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return NULL;
+
+ pdata->phy_interface = of_get_phy_mode(np);
+
+ phy = of_parse_phandle(np, "phy-handle", 0);
+ if (of_property_read_u32(phy, "reg", &pdata->phy))
+ return NULL;
+ pdata->phy_irq = irq_of_parse_and_map(phy, 0);
+
+ mac_addr = of_get_mac_address(np);
+ if (mac_addr)
+ memcpy(pdata->mac_addr, mac_addr, ETH_ALEN);
+
+ pdata->no_ether_link =
+ of_property_read_bool(np, "renesas,no-ether-link");
+ pdata->ether_link_active_low =
+ of_property_read_bool(np, "renesas,ether-link-active-low");
+
+ return pdata;
+}
+
+static const struct of_device_id sh_eth_match_table[] = {
+ { .compatible = "renesas,gether-r8a7740", .data = &r8a7740_data },
+ { .compatible = "renesas,ether-r8a7778", .data = &r8a777x_data },
+ { .compatible = "renesas,ether-r8a7779", .data = &r8a777x_data },
+ { .compatible = "renesas,ether-r8a7790", .data = &r8a779x_data },
+ { .compatible = "renesas,ether-r8a7791", .data = &r8a779x_data },
+ { .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data },
+ { }
+};
+MODULE_DEVICE_TABLE(of, sh_eth_match_table);
+#else
+static inline struct sh_eth_plat_data *sh_eth_parse_dt(struct device *dev)
+{
+ return NULL;
+}
+#endif
+
static int sh_eth_drv_probe(struct platform_device *pdev)
{
int ret, devno = 0;
@@ -2763,6 +2815,8 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
pm_runtime_resume(&pdev->dev);
+ if (pdev->dev.of_node)
+ pd = sh_eth_parse_dt(&pdev->dev);
if (!pd) {
dev_err(&pdev->dev, "no platform data\n");
ret = -EINVAL;
@@ -2778,7 +2832,15 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
mdp->ether_link_active_low = pd->ether_link_active_low;
/* set cpu data */
- mdp->cd = (struct sh_eth_cpu_data *)id->driver_data;
+ if (id) {
+ mdp->cd = (struct sh_eth_cpu_data *)id->driver_data;
+ } else {
+ const struct of_device_id *match;
+
+ match = of_match_device(of_match_ptr(sh_eth_match_table),
+ &pdev->dev);
+ mdp->cd = (struct sh_eth_cpu_data *)match->data;
+ }
mdp->reg_offset = sh_eth_get_register_offset(mdp->cd->register_type);
sh_eth_set_default_cpu_data(mdp->cd);
@@ -2920,6 +2982,7 @@ static struct platform_driver sh_eth_driver = {
.driver = {
.name = CARDNAME,
.pm = SH_ETH_PM_OPS,
+ .of_match_table = of_match_ptr(sh_eth_match_table),
},
};
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 6075915..d55e37c 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -27,8 +27,7 @@
#define RX_RING_MIN 64
#define TX_RING_MAX 1024
#define RX_RING_MAX 1024
-#define ETHERSMALL 60
-#define PKT_BUF_SZ 1538
+#define PKT_BUF_SZ 1538
#define SH_ETH_TSU_TIMEOUT_MS 500
#define SH_ETH_TSU_CAM_ENTRIES 32
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 174a92f..3b39798 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -172,8 +172,8 @@ static int efx_ef10_probe(struct efx_nic *efx)
struct efx_ef10_nic_data *nic_data;
int i, rc;
- /* We can have one VI for each 8K region. However we need
- * multiple TX queues per channel.
+ /* We can have one VI for each 8K region. However, until we
+ * use TX option descriptors we need two TX queues per channel.
*/
efx->max_channels =
min_t(unsigned int,
diff --git a/drivers/net/ethernet/sfc/ef10_regs.h b/drivers/net/ethernet/sfc/ef10_regs.h
index 207ac9a..62a55dd 100644
--- a/drivers/net/ethernet/sfc/ef10_regs.h
+++ b/drivers/net/ethernet/sfc/ef10_regs.h
@@ -227,36 +227,6 @@
#define ESF_DZ_RX_KER_BUF_ADDR_LBN 0
#define ESF_DZ_RX_KER_BUF_ADDR_WIDTH 48
-/* RX_USER_DESC */
-#define ESF_DZ_RX_USR_RESERVED_LBN 62
-#define ESF_DZ_RX_USR_RESERVED_WIDTH 2
-#define ESF_DZ_RX_USR_BYTE_CNT_LBN 48
-#define ESF_DZ_RX_USR_BYTE_CNT_WIDTH 14
-#define ESF_DZ_RX_USR_BUF_PAGE_SIZE_LBN 44
-#define ESF_DZ_RX_USR_BUF_PAGE_SIZE_WIDTH 4
-#define ESE_DZ_USR_BUF_PAGE_SZ_4MB 10
-#define ESE_DZ_USR_BUF_PAGE_SZ_1MB 8
-#define ESE_DZ_USR_BUF_PAGE_SZ_64KB 4
-#define ESE_DZ_USR_BUF_PAGE_SZ_4KB 0
-#define ESF_DZ_RX_USR_BUF_ID_OFFSET_LBN 0
-#define ESF_DZ_RX_USR_BUF_ID_OFFSET_WIDTH 44
-#define ESF_DZ_RX_USR_4KBPS_BUF_ID_LBN 12
-#define ESF_DZ_RX_USR_4KBPS_BUF_ID_WIDTH 32
-#define ESF_DZ_RX_USR_64KBPS_BUF_ID_LBN 16
-#define ESF_DZ_RX_USR_64KBPS_BUF_ID_WIDTH 28
-#define ESF_DZ_RX_USR_1MBPS_BUF_ID_LBN 20
-#define ESF_DZ_RX_USR_1MBPS_BUF_ID_WIDTH 24
-#define ESF_DZ_RX_USR_4MBPS_BUF_ID_LBN 22
-#define ESF_DZ_RX_USR_4MBPS_BUF_ID_WIDTH 22
-#define ESF_DZ_RX_USR_4MBPS_BYTE_OFFSET_LBN 0
-#define ESF_DZ_RX_USR_4MBPS_BYTE_OFFSET_WIDTH 22
-#define ESF_DZ_RX_USR_1MBPS_BYTE_OFFSET_LBN 0
-#define ESF_DZ_RX_USR_1MBPS_BYTE_OFFSET_WIDTH 20
-#define ESF_DZ_RX_USR_64KBPS_BYTE_OFFSET_LBN 0
-#define ESF_DZ_RX_USR_64KBPS_BYTE_OFFSET_WIDTH 16
-#define ESF_DZ_RX_USR_4KBPS_BYTE_OFFSET_LBN 0
-#define ESF_DZ_RX_USR_4KBPS_BYTE_OFFSET_WIDTH 12
-
/* TX_CSUM_TSTAMP_DESC */
#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
@@ -338,37 +308,6 @@
#define ESF_DZ_TX_TSO_TCP_SEQNO_LBN 0
#define ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32
-/* TX_USER_DESC */
-#define ESF_DZ_TX_USR_TYPE_LBN 63
-#define ESF_DZ_TX_USR_TYPE_WIDTH 1
-#define ESF_DZ_TX_USR_CONT_LBN 62
-#define ESF_DZ_TX_USR_CONT_WIDTH 1
-#define ESF_DZ_TX_USR_BYTE_CNT_LBN 48
-#define ESF_DZ_TX_USR_BYTE_CNT_WIDTH 14
-#define ESF_DZ_TX_USR_BUF_PAGE_SIZE_LBN 44
-#define ESF_DZ_TX_USR_BUF_PAGE_SIZE_WIDTH 4
-#define ESE_DZ_USR_BUF_PAGE_SZ_4MB 10
-#define ESE_DZ_USR_BUF_PAGE_SZ_1MB 8
-#define ESE_DZ_USR_BUF_PAGE_SZ_64KB 4
-#define ESE_DZ_USR_BUF_PAGE_SZ_4KB 0
-#define ESF_DZ_TX_USR_BUF_ID_OFFSET_LBN 0
-#define ESF_DZ_TX_USR_BUF_ID_OFFSET_WIDTH 44
-#define ESF_DZ_TX_USR_4KBPS_BUF_ID_LBN 12
-#define ESF_DZ_TX_USR_4KBPS_BUF_ID_WIDTH 32
-#define ESF_DZ_TX_USR_64KBPS_BUF_ID_LBN 16
-#define ESF_DZ_TX_USR_64KBPS_BUF_ID_WIDTH 28
-#define ESF_DZ_TX_USR_1MBPS_BUF_ID_LBN 20
-#define ESF_DZ_TX_USR_1MBPS_BUF_ID_WIDTH 24
-#define ESF_DZ_TX_USR_4MBPS_BUF_ID_LBN 22
-#define ESF_DZ_TX_USR_4MBPS_BUF_ID_WIDTH 22
-#define ESF_DZ_TX_USR_4MBPS_BYTE_OFFSET_LBN 0
-#define ESF_DZ_TX_USR_4MBPS_BYTE_OFFSET_WIDTH 22
-#define ESF_DZ_TX_USR_1MBPS_BYTE_OFFSET_LBN 0
-#define ESF_DZ_TX_USR_1MBPS_BYTE_OFFSET_WIDTH 20
-#define ESF_DZ_TX_USR_64KBPS_BYTE_OFFSET_LBN 0
-#define ESF_DZ_TX_USR_64KBPS_BYTE_OFFSET_WIDTH 16
-#define ESF_DZ_TX_USR_4KBPS_BYTE_OFFSET_LBN 0
-#define ESF_DZ_TX_USR_4KBPS_BYTE_OFFSET_WIDTH 12
/*************************************************************************/
/* TX_DESC_UPD_REG: Transmit descriptor update register.
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 83d4643..d72e003 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -503,8 +503,6 @@ static int efx_probe_channel(struct efx_channel *channel)
goto fail;
}
- channel->n_rx_frm_trunc = 0;
-
return 0;
fail:
@@ -1346,20 +1344,23 @@ static int efx_probe_interrupts(struct efx_nic *efx)
for (i = 0; i < n_channels; i++)
xentries[i].entry = i;
- rc = pci_enable_msix(efx->pci_dev, xentries, n_channels);
- if (rc > 0) {
+ rc = pci_enable_msix_range(efx->pci_dev,
+ xentries, 1, n_channels);
+ if (rc < 0) {
+ /* Fall back to single channel MSI */
+ efx->interrupt_mode = EFX_INT_MODE_MSI;
+ netif_err(efx, drv, efx->net_dev,
+ "could not enable MSI-X\n");
+ } else if (rc < n_channels) {
netif_err(efx, drv, efx->net_dev,
"WARNING: Insufficient MSI-X vectors"
" available (%d < %u).\n", rc, n_channels);
netif_err(efx, drv, efx->net_dev,
"WARNING: Performance may be reduced.\n");
- EFX_BUG_ON_PARANOID(rc >= n_channels);
n_channels = rc;
- rc = pci_enable_msix(efx->pci_dev, xentries,
- n_channels);
}
- if (rc == 0) {
+ if (rc > 0) {
efx->n_channels = n_channels;
if (n_channels > extra_channels)
n_channels -= extra_channels;
@@ -1375,11 +1376,6 @@ static int efx_probe_interrupts(struct efx_nic *efx)
for (i = 0; i < efx->n_channels; i++)
efx_get_channel(efx, i)->irq =
xentries[i].vector;
- } else {
- /* Fall back to single channel MSI */
- efx->interrupt_mode = EFX_INT_MODE_MSI;
- netif_err(efx, drv, efx->net_dev,
- "could not enable MSI-X\n");
}
}
@@ -2115,7 +2111,7 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct sockaddr *addr = data;
- char *new_addr = addr->sa_data;
+ u8 *new_addr = addr->sa_data;
if (!is_valid_ether_addr(new_addr)) {
netif_err(efx, drv, efx->net_dev,
@@ -3273,6 +3269,6 @@ module_exit(efx_exit_module);
MODULE_AUTHOR("Solarflare Communications and "
"Michael Brown <mbrown@fensystems.co.uk>");
-MODULE_DESCRIPTION("Solarflare Communications network driver");
+MODULE_DESCRIPTION("Solarflare network driver");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, efx_pci_table);
diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h
index dbd7b78..9903258 100644
--- a/drivers/net/ethernet/sfc/efx.h
+++ b/drivers/net/ethernet/sfc/efx.h
@@ -14,7 +14,7 @@
#include "net_driver.h"
#include "filter.h"
-/* Solarstorm controllers use BAR 0 for I/O space and BAR 2(&3) for memory */
+/* All controllers use BAR 0 for I/O space and BAR 2(&3) for memory */
#define EFX_MEM_BAR 2
/* TX */
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 2294289..89fcaff 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -251,6 +251,9 @@ static void efx_fill_test(unsigned int test_index, u8 *strings, u64 *data,
* @test_index: Starting index of the test
* @strings: Ethtool strings, or %NULL
* @data: Ethtool test results, or %NULL
+ *
+ * Fill in a block of loopback self-test entries. Return new test
+ * index.
*/
static int efx_fill_loopback_test(struct efx_nic *efx,
struct efx_loopback_self_tests *lb_tests,
@@ -290,6 +293,12 @@ static int efx_fill_loopback_test(struct efx_nic *efx,
* @tests: Efx self-test results structure, or %NULL
* @strings: Ethtool strings, or %NULL
* @data: Ethtool test results, or %NULL
+ *
+ * Get self-test number of strings, strings, and/or test results.
+ * Return number of strings (== number of test results).
+ *
+ * The reason for merging these three functions is to make sure that
+ * they can never be inconsistent.
*/
static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
struct efx_self_tests *tests,
@@ -444,7 +453,7 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_self_tests *efx_tests;
- int already_up;
+ bool already_up;
int rc = -ENOMEM;
efx_tests = kzalloc(sizeof(*efx_tests), GFP_KERNEL);
@@ -452,8 +461,8 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
goto fail;
if (efx->state != STATE_READY) {
- rc = -EIO;
- goto fail1;
+ rc = -EBUSY;
+ goto out;
}
netif_info(efx, drv, efx->net_dev, "starting %sline testing\n",
@@ -466,7 +475,7 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
if (rc) {
netif_err(efx, drv, efx->net_dev,
"failed opening device.\n");
- goto fail1;
+ goto out;
}
}
@@ -479,8 +488,7 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
rc == 0 ? "passed" : "failed",
(test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
-fail1:
- /* Fill ethtool results structures */
+out:
efx_ethtool_fill_self_tests(efx, efx_tests, NULL, data);
kfree(efx_tests);
fail:
@@ -691,7 +699,6 @@ static void efx_ethtool_get_pauseparam(struct net_device *net_dev,
pause->autoneg = !!(efx->wanted_fc & EFX_FC_AUTO);
}
-
static void efx_ethtool_get_wol(struct net_device *net_dev,
struct ethtool_wolinfo *wol)
{
diff --git a/drivers/net/ethernet/sfc/falcon.c b/drivers/net/ethernet/sfc/falcon.c
index 18d6f76..72652f3 100644
--- a/drivers/net/ethernet/sfc/falcon.c
+++ b/drivers/net/ethernet/sfc/falcon.c
@@ -422,7 +422,6 @@ static inline void falcon_irq_ack_a1(struct efx_nic *efx)
efx_readd(efx, &reg, FR_AA_WORK_AROUND_BROKEN_PCI_READS);
}
-
static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
{
struct efx_nic *efx = dev_id;
@@ -467,6 +466,7 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
efx_schedule_channel_irq(efx_get_channel(efx, 1));
return IRQ_HANDLED;
}
+
/**************************************************************************
*
* RSS
@@ -1358,6 +1358,7 @@ static void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
case 100: link_speed = 1; break;
default: link_speed = 0; break;
}
+
/* MAC_LINK_STATUS controls MAC backpressure but doesn't work
* as advertised. Disable to ensure packets are not
* indefinitely held and TX queue can be flushed at any point
@@ -2868,4 +2869,3 @@ const struct efx_nic_type falcon_b0_nic_type = {
.mcdi_max_ver = -1,
.max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS,
};
-
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index f72489a..aa1b169 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -311,7 +311,6 @@ static inline void efx_farch_push_tx_desc(struct efx_tx_queue *tx_queue,
*/
void efx_farch_tx_write(struct efx_tx_queue *tx_queue)
{
-
struct efx_tx_buffer *buffer;
efx_qword_t *txd;
unsigned write_ptr;
@@ -1609,7 +1608,6 @@ irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-
/* Setup RSS indirection table.
* This maps from the hash value of the packet to RXQ
*/
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index af2b8c5..8a400a0 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -1323,7 +1323,6 @@ static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue,
return &rx_queue->buffer[index];
}
-
/**
* EFX_MAX_FRAME_LEN - calculate maximum frame length
*
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 79226b1..32d969e 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -530,4 +530,3 @@ void efx_nic_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *rx_nodesc_drops)
efx->rx_nodesc_drops_prev_state = !!(efx->net_dev->flags & IFF_UP);
*rx_nodesc_drops -= efx->rx_nodesc_drops_while_down;
}
-
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index d7a3682..722344f 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -223,7 +223,6 @@ struct efx_ptp_timeset {
* @evt_list: List of MC receive events awaiting packets
* @evt_free_list: List of free events
* @evt_lock: Lock for manipulating evt_list and evt_free_list
- * @evt_overflow: Boolean indicating that event list has overflowed
* @rx_evts: Instantiated events (on evt_list and evt_free_list)
* @workwq: Work queue for processing pending PTP operations
* @work: Work task
@@ -275,7 +274,6 @@ struct efx_ptp_data {
struct list_head evt_list;
struct list_head evt_free_list;
spinlock_t evt_lock;
- bool evt_overflow;
struct efx_ptp_event_rx rx_evts[MAX_RECEIVE_EVENTS];
struct workqueue_struct *workwq;
struct work_struct work;
@@ -768,37 +766,36 @@ efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf),
return -EAGAIN;
}
- /* Convert the NIC time into kernel time. No correction is required-
- * this time is the output of a firmware process.
- */
- mc_time = ptp->nic_to_kernel_time(ptp->timeset[last_good].major,
- ptp->timeset[last_good].minor, 0);
-
- /* Calculate delay from actual PPS to last_time */
- delta = ktime_to_timespec(mc_time);
- delta.tv_nsec +=
- last_time->ts_real.tv_nsec -
- (ptp->timeset[last_good].host_start & MC_NANOSECOND_MASK);
-
- /* It is possible that the seconds rolled over between taking
+ /* Calculate delay from last good sync (host time) to last_time.
+ * It is possible that the seconds rolled over between taking
* the start reading and the last value written by the host. The
* timescales are such that a gap of more than one second is never
- * expected.
+ * expected. delta is *not* normalised.
*/
start_sec = ptp->timeset[last_good].host_start >> MC_NANOSECOND_BITS;
last_sec = last_time->ts_real.tv_sec & MC_SECOND_MASK;
- if (start_sec != last_sec) {
- if (((start_sec + 1) & MC_SECOND_MASK) != last_sec) {
- netif_warn(efx, hw, efx->net_dev,
- "PTP bad synchronisation seconds\n");
- return -EAGAIN;
- } else {
- delta.tv_sec = 1;
- }
- } else {
- delta.tv_sec = 0;
+ if (start_sec != last_sec &&
+ ((start_sec + 1) & MC_SECOND_MASK) != last_sec) {
+ netif_warn(efx, hw, efx->net_dev,
+ "PTP bad synchronisation seconds\n");
+ return -EAGAIN;
}
+ delta.tv_sec = (last_sec - start_sec) & 1;
+ delta.tv_nsec =
+ last_time->ts_real.tv_nsec -
+ (ptp->timeset[last_good].host_start & MC_NANOSECOND_MASK);
+
+ /* Convert the NIC time at last good sync into kernel time.
+ * No correction is required - this time is the output of a
+ * firmware process.
+ */
+ mc_time = ptp->nic_to_kernel_time(ptp->timeset[last_good].major,
+ ptp->timeset[last_good].minor, 0);
+
+ /* Calculate delay from NIC top of second to last_time */
+ delta.tv_nsec += ktime_to_timespec(mc_time).tv_nsec;
+ /* Set PPS timestamp to match NIC top of second */
ptp->host_time_pps = *last_time;
pps_sub_ts(&ptp->host_time_pps, delta);
@@ -941,11 +938,6 @@ static void efx_ptp_drop_time_expired_events(struct efx_nic *efx)
}
}
}
- /* If the event overflow flag is set and the event list is now empty
- * clear the flag to re-enable the overflow warning message.
- */
- if (ptp->evt_overflow && list_empty(&ptp->evt_list))
- ptp->evt_overflow = false;
spin_unlock_bh(&ptp->evt_lock);
}
@@ -989,11 +981,6 @@ static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx,
break;
}
}
- /* If the event overflow flag is set and the event list is now empty
- * clear the flag to re-enable the overflow warning message.
- */
- if (ptp->evt_overflow && list_empty(&ptp->evt_list))
- ptp->evt_overflow = false;
spin_unlock_bh(&ptp->evt_lock);
return rc;
@@ -1147,7 +1134,6 @@ static int efx_ptp_stop(struct efx_nic *efx)
list_for_each_safe(cursor, next, &efx->ptp_data->evt_list) {
list_move(cursor, &efx->ptp_data->evt_free_list);
}
- ptp->evt_overflow = false;
spin_unlock_bh(&efx->ptp_data->evt_lock);
return rc;
@@ -1253,7 +1239,6 @@ int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel)
spin_lock_init(&ptp->evt_lock);
for (pos = 0; pos < MAX_RECEIVE_EVENTS; pos++)
list_add(&ptp->rx_evts[pos].link, &ptp->evt_free_list);
- ptp->evt_overflow = false;
/* Get the NIC PTP attributes and set up time conversions */
rc = efx_ptp_get_attributes(efx);
@@ -1380,6 +1365,7 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
struct efx_ptp_match *match = (struct efx_ptp_match *)skb->cb;
u8 *match_data_012, *match_data_345;
unsigned int version;
+ u8 *data;
match->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS);
@@ -1388,7 +1374,8 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
if (!pskb_may_pull(skb, PTP_V1_MIN_LENGTH)) {
return false;
}
- version = ntohs(*(__be16 *)&skb->data[PTP_V1_VERSION_OFFSET]);
+ data = skb->data;
+ version = ntohs(*(__be16 *)&data[PTP_V1_VERSION_OFFSET]);
if (version != PTP_VERSION_V1) {
return false;
}
@@ -1396,13 +1383,14 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
/* PTP V1 uses all six bytes of the UUID to match the packet
* to the timestamp
*/
- match_data_012 = skb->data + PTP_V1_UUID_OFFSET;
- match_data_345 = skb->data + PTP_V1_UUID_OFFSET + 3;
+ match_data_012 = data + PTP_V1_UUID_OFFSET;
+ match_data_345 = data + PTP_V1_UUID_OFFSET + 3;
} else {
if (!pskb_may_pull(skb, PTP_V2_MIN_LENGTH)) {
return false;
}
- version = skb->data[PTP_V2_VERSION_OFFSET];
+ data = skb->data;
+ version = data[PTP_V2_VERSION_OFFSET];
if ((version & PTP_VERSION_V2_MASK) != PTP_VERSION_V2) {
return false;
}
@@ -1414,17 +1402,17 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
* enhanced mode fixes this issue and uses bytes 0-2
* and byte 5-7 of the UUID.
*/
- match_data_345 = skb->data + PTP_V2_UUID_OFFSET + 5;
+ match_data_345 = data + PTP_V2_UUID_OFFSET + 5;
if (ptp->mode == MC_CMD_PTP_MODE_V2) {
- match_data_012 = skb->data + PTP_V2_UUID_OFFSET + 2;
+ match_data_012 = data + PTP_V2_UUID_OFFSET + 2;
} else {
- match_data_012 = skb->data + PTP_V2_UUID_OFFSET + 0;
+ match_data_012 = data + PTP_V2_UUID_OFFSET + 0;
BUG_ON(ptp->mode != MC_CMD_PTP_MODE_V2_ENHANCED);
}
}
/* Does this packet require timestamping? */
- if (ntohs(*(__be16 *)&skb->data[PTP_DPORT_OFFSET]) == PTP_EVENT_PORT) {
+ if (ntohs(*(__be16 *)&data[PTP_DPORT_OFFSET]) == PTP_EVENT_PORT) {
match->state = PTP_PACKET_STATE_UNMATCHED;
/* We expect the sequence number to be in the same position in
@@ -1440,8 +1428,8 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
(match_data_345[0] << 24));
match->words[1] = (match_data_345[1] |
(match_data_345[2] << 8) |
- (skb->data[PTP_V1_SEQUENCE_OFFSET +
- PTP_V1_SEQUENCE_LENGTH - 1] <<
+ (data[PTP_V1_SEQUENCE_OFFSET +
+ PTP_V1_SEQUENCE_LENGTH - 1] <<
16));
} else {
match->state = PTP_PACKET_STATE_MATCH_UNWANTED;
@@ -1635,13 +1623,9 @@ static void ptp_event_rx(struct efx_nic *efx, struct efx_ptp_data *ptp)
list_add_tail(&evt->link, &ptp->evt_list);
queue_work(ptp->workwq, &ptp->work);
- } else if (!ptp->evt_overflow) {
- /* Log a warning message and set the event overflow flag.
- * The message won't be logged again until the event queue
- * becomes empty.
- */
+ } else if (net_ratelimit()) {
+ /* Log a rate-limited warning message. */
netif_err(efx, rx_err, efx->net_dev, "PTP event queue overflow\n");
- ptp->evt_overflow = true;
}
spin_unlock_bh(&ptp->evt_lock);
}
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 75d11fa..fa94753 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -787,15 +787,6 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
* Requires TX checksum offload support.
*/
-/* Number of bytes inserted at the start of a TSO header buffer,
- * similar to NET_IP_ALIGN.
- */
-#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
-#define TSOH_OFFSET 0
-#else
-#define TSOH_OFFSET NET_IP_ALIGN
-#endif
-
#define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2))
/**
@@ -882,13 +873,13 @@ static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue,
EFX_BUG_ON_PARANOID(buffer->flags);
EFX_BUG_ON_PARANOID(buffer->unmap_len);
- if (likely(len <= TSOH_STD_SIZE - TSOH_OFFSET)) {
+ if (likely(len <= TSOH_STD_SIZE - NET_IP_ALIGN)) {
unsigned index =
(tx_queue->insert_count & tx_queue->ptr_mask) / 2;
struct efx_buffer *page_buf =
&tx_queue->tsoh_page[index / TSOH_PER_PAGE];
unsigned offset =
- TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + TSOH_OFFSET;
+ TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + NET_IP_ALIGN;
if (unlikely(!page_buf->addr) &&
efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
@@ -901,10 +892,10 @@ static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue,
} else {
tx_queue->tso_long_headers++;
- buffer->heap_buf = kmalloc(TSOH_OFFSET + len, GFP_ATOMIC);
+ buffer->heap_buf = kmalloc(NET_IP_ALIGN + len, GFP_ATOMIC);
if (unlikely(!buffer->heap_buf))
return NULL;
- result = (u8 *)buffer->heap_buf + TSOH_OFFSET;
+ result = (u8 *)buffer->heap_buf + NET_IP_ALIGN;
buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_HEAP;
}
@@ -1011,7 +1002,7 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
static int tso_start(struct tso_state *st, struct efx_nic *efx,
const struct sk_buff *skb)
{
- bool use_options = efx_nic_rev(efx) >= EFX_REV_HUNT_A0;
+ bool use_opt_desc = efx_nic_rev(efx) >= EFX_REV_HUNT_A0;
struct device *dma_dev = &efx->pci_dev->dev;
unsigned int header_len, in_len;
dma_addr_t dma_addr;
@@ -1037,7 +1028,7 @@ static int tso_start(struct tso_state *st, struct efx_nic *efx,
st->out_len = skb->len - header_len;
- if (!use_options) {
+ if (!use_opt_desc) {
st->header_unmap_len = 0;
if (likely(in_len == 0)) {
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 8e2266e..79606f47a 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -9041,7 +9041,7 @@ static void niu_try_msix(struct niu *np, u8 *ldg_num_map)
struct msix_entry msi_vec[NIU_NUM_LDG];
struct niu_parent *parent = np->parent;
struct pci_dev *pdev = np->pdev;
- int i, num_irqs, err;
+ int i, num_irqs;
u8 first_ldg;
first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
@@ -9053,21 +9053,16 @@ static void niu_try_msix(struct niu *np, u8 *ldg_num_map)
(np->port == 0 ? 3 : 1));
BUG_ON(num_irqs > (NIU_NUM_LDG / parent->num_ports));
-retry:
for (i = 0; i < num_irqs; i++) {
msi_vec[i].vector = 0;
msi_vec[i].entry = i;
}
- err = pci_enable_msix(pdev, msi_vec, num_irqs);
- if (err < 0) {
+ num_irqs = pci_enable_msix_range(pdev, msi_vec, 1, num_irqs);
+ if (num_irqs < 0) {
np->flags &= ~NIU_FLAGS_MSIX;
return;
}
- if (err > 0) {
- num_irqs = err;
- goto retry;
- }
np->flags |= NIU_FLAGS_MSIX;
for (i = 0; i < num_irqs; i++)
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index ffd4d12..53f85dd 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1477,7 +1477,6 @@ static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
{
struct cpsw_priv *priv = netdev_priv(dev);
- struct mii_ioctl_data *data = if_mii(req);
int slave_no = cpsw_slave_index(priv);
if (!netif_running(dev))
@@ -1490,14 +1489,11 @@ static int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
case SIOCGHWTSTAMP:
return cpsw_hwtstamp_get(dev, req);
#endif
- case SIOCGMIIPHY:
- data->phy_id = priv->slaves[slave_no].phy->addr;
- break;
- default:
- return -ENOTSUPP;
}
- return 0;
+ if (!priv->slaves[slave_no].phy)
+ return -EOPNOTSUPP;
+ return phy_mii_ioctl(priv->slaves[slave_no].phy, req, cmd);
}
static void cpsw_ndo_tx_timeout(struct net_device *ndev)
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index 0df36c6..104d46f 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -641,11 +641,10 @@ static int w5100_hw_probe(struct platform_device *pdev)
if (!mem)
return -ENXIO;
mem_size = resource_size(mem);
- if (!devm_request_mem_region(&pdev->dev, mem->start, mem_size, name))
- return -EBUSY;
- priv->base = devm_ioremap(&pdev->dev, mem->start, mem_size);
- if (!priv->base)
- return -EBUSY;
+
+ priv->base = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
spin_lock_init(&priv->reg_lock);
priv->indirect = mem_size < W5100_BUS_DIRECT_SIZE;
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index 71c27b3..1f33c4c 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -561,11 +561,10 @@ static int w5300_hw_probe(struct platform_device *pdev)
if (!mem)
return -ENXIO;
mem_size = resource_size(mem);
- if (!devm_request_mem_region(&pdev->dev, mem->start, mem_size, name))
- return -EBUSY;
- priv->base = devm_ioremap(&pdev->dev, mem->start, mem_size);
- if (!priv->base)
- return -EBUSY;
+
+ priv->base = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(priv->base))
+ return PTR_ERR(priv->base);
spin_lock_init(&priv->reg_lock);
priv->indirect = mem_size < W5300_BUS_DIRECT_SIZE;
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 7b594ce..39fc230 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -139,6 +139,8 @@ int rndis_filter_set_device_mac(struct hv_device *hdev, char *mac);
#define NVSP_PROTOCOL_VERSION_1 2
#define NVSP_PROTOCOL_VERSION_2 0x30002
+#define NVSP_PROTOCOL_VERSION_4 0x40000
+#define NVSP_PROTOCOL_VERSION_5 0x50000
enum {
NVSP_MSG_TYPE_NONE = 0,
@@ -193,6 +195,23 @@ enum {
NVSP_MSG2_TYPE_ALLOC_CHIMNEY_HANDLE,
NVSP_MSG2_TYPE_ALLOC_CHIMNEY_HANDLE_COMP,
+
+ NVSP_MSG2_MAX = NVSP_MSG2_TYPE_ALLOC_CHIMNEY_HANDLE_COMP,
+
+ /* Version 4 messages */
+ NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION,
+ NVSP_MSG4_TYPE_SWITCH_DATA_PATH,
+ NVSP_MSG4_TYPE_UPLINK_CONNECT_STATE_DEPRECATED,
+
+ NVSP_MSG4_MAX = NVSP_MSG4_TYPE_UPLINK_CONNECT_STATE_DEPRECATED,
+
+ /* Version 5 messages */
+ NVSP_MSG5_TYPE_OID_QUERY_EX,
+ NVSP_MSG5_TYPE_OID_QUERY_EX_COMP,
+ NVSP_MSG5_TYPE_SUBCHANNEL,
+ NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE,
+
+ NVSP_MSG5_MAX = NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE,
};
enum {
@@ -447,10 +466,44 @@ union nvsp_2_message_uber {
struct nvsp_2_free_rxbuf free_rxbuf;
} __packed;
+enum nvsp_subchannel_operation {
+ NVSP_SUBCHANNEL_NONE = 0,
+ NVSP_SUBCHANNEL_ALLOCATE,
+ NVSP_SUBCHANNEL_MAX
+};
+
+struct nvsp_5_subchannel_request {
+ u32 op;
+ u32 num_subchannels;
+} __packed;
+
+struct nvsp_5_subchannel_complete {
+ u32 status;
+ u32 num_subchannels; /* Actual number of subchannels allocated */
+} __packed;
+
+struct nvsp_5_send_indirect_table {
+ /* The number of entries in the send indirection table */
+ u32 count;
+
+ /* The offset of the send indireciton table from top of this struct.
+ * The send indirection table tells which channel to put the send
+ * traffic on. Each entry is a channel number.
+ */
+ u32 offset;
+} __packed;
+
+union nvsp_5_message_uber {
+ struct nvsp_5_subchannel_request subchn_req;
+ struct nvsp_5_subchannel_complete subchn_comp;
+ struct nvsp_5_send_indirect_table send_table;
+} __packed;
+
union nvsp_all_messages {
union nvsp_message_init_uber init_msg;
union nvsp_1_message_uber v1_msg;
union nvsp_2_message_uber v2_msg;
+ union nvsp_5_message_uber v5_msg;
} __packed;
/* ALL Messages */
@@ -506,6 +559,8 @@ struct netvsc_device {
/* Holds rndis device info */
void *extension;
+ /* The recive buffer for this device */
+ unsigned char cb_buffer[NETVSC_PACKET_SIZE];
};
/* NdisInitialize message */
@@ -846,12 +901,6 @@ struct rndis_message {
};
-struct rndis_filter_packet {
- void *completion_ctx;
- void (*completion)(void *context);
- struct rndis_message msg;
-};
-
/* Handy macros */
/* get the size of an RNDIS message. Pass in the message type, */
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 03a2c6e..1a0280d 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -290,7 +290,7 @@ static int negotiate_nvsp_ver(struct hv_device *device,
NVSP_STAT_SUCCESS)
return -EINVAL;
- if (nvsp_ver != NVSP_PROTOCOL_VERSION_2)
+ if (nvsp_ver == NVSP_PROTOCOL_VERSION_1)
return 0;
/* NVSPv2 only: Send NDIS config */
@@ -314,6 +314,9 @@ static int netvsc_connect_vsp(struct hv_device *device)
struct nvsp_message *init_packet;
int ndis_version;
struct net_device *ndev;
+ u32 ver_list[] = { NVSP_PROTOCOL_VERSION_1, NVSP_PROTOCOL_VERSION_2,
+ NVSP_PROTOCOL_VERSION_4, NVSP_PROTOCOL_VERSION_5 };
+ int i, num_ver = 4; /* number of different NVSP versions */
net_device = get_outbound_net_device(device);
if (!net_device)
@@ -323,13 +326,14 @@ static int netvsc_connect_vsp(struct hv_device *device)
init_packet = &net_device->channel_init_pkt;
/* Negotiate the latest NVSP protocol supported */
- if (negotiate_nvsp_ver(device, net_device, init_packet,
- NVSP_PROTOCOL_VERSION_2) == 0) {
- net_device->nvsp_version = NVSP_PROTOCOL_VERSION_2;
- } else if (negotiate_nvsp_ver(device, net_device, init_packet,
- NVSP_PROTOCOL_VERSION_1) == 0) {
- net_device->nvsp_version = NVSP_PROTOCOL_VERSION_1;
- } else {
+ for (i = num_ver - 1; i >= 0; i--)
+ if (negotiate_nvsp_ver(device, net_device, init_packet,
+ ver_list[i]) == 0) {
+ net_device->nvsp_version = ver_list[i];
+ break;
+ }
+
+ if (i < 0) {
ret = -EPROTO;
goto cleanup;
}
@@ -339,7 +343,10 @@ static int netvsc_connect_vsp(struct hv_device *device)
/* Send the ndis version */
memset(init_packet, 0, sizeof(struct nvsp_message));
- ndis_version = 0x00050001;
+ if (net_device->nvsp_version <= NVSP_PROTOCOL_VERSION_4)
+ ndis_version = 0x00050001;
+ else
+ ndis_version = 0x0006001e;
init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_NDIS_VER;
init_packet->msg.v1_msg.
@@ -432,17 +439,14 @@ static inline u32 hv_ringbuf_avail_percent(
return avail_write * 100 / ring_info->ring_datasize;
}
-static void netvsc_send_completion(struct hv_device *device,
+static void netvsc_send_completion(struct netvsc_device *net_device,
+ struct hv_device *device,
struct vmpacket_descriptor *packet)
{
- struct netvsc_device *net_device;
struct nvsp_message *nvsp_packet;
struct hv_netvsc_packet *nvsc_packet;
struct net_device *ndev;
- net_device = get_inbound_net_device(device);
- if (!net_device)
- return;
ndev = net_device->ndev;
nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
@@ -561,13 +565,13 @@ int netvsc_send(struct hv_device *device,
}
static void netvsc_send_recv_completion(struct hv_device *device,
+ struct netvsc_device *net_device,
u64 transaction_id, u32 status)
{
struct nvsp_message recvcompMessage;
int retries = 0;
int ret;
struct net_device *ndev;
- struct netvsc_device *net_device = hv_get_drvdata(device);
ndev = net_device->ndev;
@@ -653,14 +657,15 @@ static void netvsc_receive_completion(void *context)
/* Send a receive completion for the xfer page packet */
if (fsend_receive_comp)
- netvsc_send_recv_completion(device, transaction_id, status);
+ netvsc_send_recv_completion(device, net_device, transaction_id,
+ status);
}
-static void netvsc_receive(struct hv_device *device,
- struct vmpacket_descriptor *packet)
+static void netvsc_receive(struct netvsc_device *net_device,
+ struct hv_device *device,
+ struct vmpacket_descriptor *packet)
{
- struct netvsc_device *net_device;
struct vmtransfer_page_packet_header *vmxferpage_packet;
struct nvsp_message *nvsp_packet;
struct hv_netvsc_packet *netvsc_packet = NULL;
@@ -673,9 +678,6 @@ static void netvsc_receive(struct hv_device *device,
LIST_HEAD(listHead);
- net_device = get_inbound_net_device(device);
- if (!net_device)
- return;
ndev = net_device->ndev;
/*
@@ -741,7 +743,7 @@ static void netvsc_receive(struct hv_device *device,
spin_unlock_irqrestore(&net_device->recv_pkt_list_lock,
flags);
- netvsc_send_recv_completion(device,
+ netvsc_send_recv_completion(device, net_device,
vmxferpage_packet->d.trans_id,
NVSP_STAT_FAIL);
@@ -800,22 +802,16 @@ static void netvsc_channel_cb(void *context)
struct netvsc_device *net_device;
u32 bytes_recvd;
u64 request_id;
- unsigned char *packet;
struct vmpacket_descriptor *desc;
unsigned char *buffer;
int bufferlen = NETVSC_PACKET_SIZE;
struct net_device *ndev;
- packet = kzalloc(NETVSC_PACKET_SIZE * sizeof(unsigned char),
- GFP_ATOMIC);
- if (!packet)
- return;
- buffer = packet;
-
net_device = get_inbound_net_device(device);
if (!net_device)
- goto out;
+ return;
ndev = net_device->ndev;
+ buffer = net_device->cb_buffer;
do {
ret = vmbus_recvpacket_raw(device->channel, buffer, bufferlen,
@@ -825,11 +821,13 @@ static void netvsc_channel_cb(void *context)
desc = (struct vmpacket_descriptor *)buffer;
switch (desc->type) {
case VM_PKT_COMP:
- netvsc_send_completion(device, desc);
+ netvsc_send_completion(net_device,
+ device, desc);
break;
case VM_PKT_DATA_USING_XFER_PAGES:
- netvsc_receive(device, desc);
+ netvsc_receive(net_device,
+ device, desc);
break;
default:
@@ -841,23 +839,16 @@ static void netvsc_channel_cb(void *context)
break;
}
- /* reset */
- if (bufferlen > NETVSC_PACKET_SIZE) {
- kfree(buffer);
- buffer = packet;
- bufferlen = NETVSC_PACKET_SIZE;
- }
} else {
- /* reset */
- if (bufferlen > NETVSC_PACKET_SIZE) {
- kfree(buffer);
- buffer = packet;
- bufferlen = NETVSC_PACKET_SIZE;
- }
-
+ /*
+ * We are done for this pass.
+ */
break;
}
+
} else if (ret == -ENOBUFS) {
+ if (bufferlen > NETVSC_PACKET_SIZE)
+ kfree(buffer);
/* Handle large packet */
buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
if (buffer == NULL) {
@@ -872,8 +863,8 @@ static void netvsc_channel_cb(void *context)
}
} while (1);
-out:
- kfree(buffer);
+ if (bufferlen > NETVSC_PACKET_SIZE)
+ kfree(buffer);
return;
}
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 7141a19..9ef6be9 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -155,7 +155,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
/* Allocate a netvsc packet based on # of frags. */
packet = kzalloc(sizeof(struct hv_netvsc_packet) +
(num_pages * sizeof(struct hv_page_buffer)) +
- sizeof(struct rndis_filter_packet) +
+ sizeof(struct rndis_message) +
NDIS_VLAN_PPI_SIZE, GFP_ATOMIC);
if (!packet) {
/* out of memory, drop packet */
@@ -327,7 +327,7 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
if (nvdev == NULL || nvdev->destroy)
return -ENODEV;
- if (nvdev->nvsp_version == NVSP_PROTOCOL_VERSION_2)
+ if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
limit = NETVSC_MTU;
if (mtu < 68 || mtu > limit)
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 1084e5d..f0cc8ef 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -58,9 +58,6 @@ struct rndis_request {
u8 request_ext[RNDIS_EXT_LEN];
};
-static void rndis_filter_send_completion(void *ctx);
-
-
static struct rndis_device *get_rndis_device(void)
{
struct rndis_device *device;
@@ -277,7 +274,7 @@ static void rndis_filter_receive_response(struct rndis_device *dev,
"rndis response buffer overflow "
"detected (size %u max %zu)\n",
resp->msg_len,
- sizeof(struct rndis_filter_packet));
+ sizeof(struct rndis_message));
if (resp->ndis_msg_type ==
RNDIS_MSG_RESET_C) {
@@ -898,17 +895,14 @@ int rndis_filter_close(struct hv_device *dev)
int rndis_filter_send(struct hv_device *dev,
struct hv_netvsc_packet *pkt)
{
- int ret;
- struct rndis_filter_packet *filter_pkt;
struct rndis_message *rndis_msg;
struct rndis_packet *rndis_pkt;
u32 rndis_msg_size;
bool isvlan = pkt->vlan_tci & VLAN_TAG_PRESENT;
/* Add the rndis header */
- filter_pkt = (struct rndis_filter_packet *)pkt->extension;
+ rndis_msg = (struct rndis_message *)pkt->extension;
- rndis_msg = &filter_pkt->msg;
rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
if (isvlan)
rndis_msg_size += NDIS_VLAN_PPI_SIZE;
@@ -961,34 +955,5 @@ int rndis_filter_send(struct hv_device *dev,
pkt->page_buf[1].len = rndis_msg_size - pkt->page_buf[0].len;
}
- /* Save the packet send completion and context */
- filter_pkt->completion = pkt->completion.send.send_completion;
- filter_pkt->completion_ctx =
- pkt->completion.send.send_completion_ctx;
-
- /* Use ours */
- pkt->completion.send.send_completion = rndis_filter_send_completion;
- pkt->completion.send.send_completion_ctx = filter_pkt;
-
- ret = netvsc_send(dev, pkt);
- if (ret != 0) {
- /*
- * Reset the completion to originals to allow retries from
- * above
- */
- pkt->completion.send.send_completion =
- filter_pkt->completion;
- pkt->completion.send.send_completion_ctx =
- filter_pkt->completion_ctx;
- }
-
- return ret;
-}
-
-static void rndis_filter_send_completion(void *ctx)
-{
- struct rndis_filter_packet *filter_pkt = ctx;
-
- /* Pass it back to the original handler */
- filter_pkt->completion(filter_pkt->completion_ctx);
+ return netvsc_send(dev, pkt);
}
diff --git a/drivers/net/ieee802154/Kconfig b/drivers/net/ieee802154/Kconfig
index 08ae465..9aa06ec 100644
--- a/drivers/net/ieee802154/Kconfig
+++ b/drivers/net/ieee802154/Kconfig
@@ -15,9 +15,9 @@ config IEEE802154_FAKEHARD
depends on IEEE802154_DRIVERS
---help---
Say Y here to enable the fake driver that serves as an example
- of HardMAC device driver.
+ of HardMAC device driver.
- This driver can also be built as a module. To do so say M here.
+ This driver can also be built as a module. To do so say M here.
The module will be called 'fakehard'.
config IEEE802154_FAKELB
@@ -31,17 +31,23 @@ config IEEE802154_FAKELB
The module will be called 'fakelb'.
config IEEE802154_AT86RF230
- depends on IEEE802154_DRIVERS && MAC802154
- tristate "AT86RF230/231 transceiver driver"
- depends on SPI
+ depends on IEEE802154_DRIVERS && MAC802154
+ tristate "AT86RF230/231/212 transceiver driver"
+ depends on SPI
+ ---help---
+ Say Y here to enable the at86rf230/231/212 SPI 802.15.4 wireless
+ controller.
+
+ This driver can also be built as a module. To do so, say M here.
+ the module will be called 'at86rf230'.
config IEEE802154_MRF24J40
- tristate "Microchip MRF24J40 transceiver driver"
- depends on IEEE802154_DRIVERS && MAC802154
- depends on SPI
- ---help---
- Say Y here to enable the MRF24J20 SPI 802.15.4 wireless
- controller.
-
- This driver can also be built as a module. To do so, say M here.
- the module will be called 'mrf24j40'.
+ tristate "Microchip MRF24J40 transceiver driver"
+ depends on IEEE802154_DRIVERS && MAC802154
+ depends on SPI
+ ---help---
+ Say Y here to enable the MRF24J20 SPI 802.15.4 wireless
+ controller.
+
+ This driver can also be built as a module. To do so, say M here.
+ the module will be called 'mrf24j40'.
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index ab31544..03e24c5 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -37,7 +37,6 @@
struct at86rf230_local {
struct spi_device *spi;
- int rstn, slp_tr, dig2;
u8 part;
u8 vers;
@@ -53,8 +52,16 @@ struct at86rf230_local {
spinlock_t lock;
bool irq_busy;
bool is_tx;
+ bool tx_aret;
+
+ int rssi_base_val;
};
+static bool is_rf212(struct at86rf230_local *local)
+{
+ return local->part == 7;
+}
+
#define RG_TRX_STATUS (0x01)
#define SR_TRX_STATUS 0x01, 0x1f, 0
#define SR_RESERVED_01_3 0x01, 0x20, 5
@@ -100,7 +107,10 @@ struct at86rf230_local {
#define SR_SFD_VALUE 0x0b, 0xff, 0
#define RG_TRX_CTRL_2 (0x0c)
#define SR_OQPSK_DATA_RATE 0x0c, 0x03, 0
-#define SR_RESERVED_0c_2 0x0c, 0x7c, 2
+#define SR_SUB_MODE 0x0c, 0x04, 2
+#define SR_BPSK_QPSK 0x0c, 0x08, 3
+#define SR_OQPSK_SUB1_RC_EN 0x0c, 0x10, 4
+#define SR_RESERVED_0c_5 0x0c, 0x60, 5
#define SR_RX_SAFE_MODE 0x0c, 0x80, 7
#define RG_ANT_DIV (0x0d)
#define SR_ANT_CTRL 0x0d, 0x03, 0
@@ -145,7 +155,7 @@ struct at86rf230_local {
#define SR_RESERVED_17_5 0x17, 0x08, 3
#define SR_AACK_UPLD_RES_FT 0x17, 0x10, 4
#define SR_AACK_FLTR_RES_FT 0x17, 0x20, 5
-#define SR_RESERVED_17_2 0x17, 0x40, 6
+#define SR_CSMA_LBT_MODE 0x17, 0x40, 6
#define SR_RESERVED_17_1 0x17, 0x80, 7
#define RG_FTN_CTRL (0x18)
#define SR_RESERVED_18_2 0x18, 0x7f, 0
@@ -244,6 +254,57 @@ struct at86rf230_local {
#define STATE_TRANSITION_IN_PROGRESS 0x1F
static int
+__at86rf230_detect_device(struct spi_device *spi, u16 *man_id, u8 *part,
+ u8 *version)
+{
+ u8 data[4];
+ u8 *buf = kmalloc(2, GFP_KERNEL);
+ int status;
+ struct spi_message msg;
+ struct spi_transfer xfer = {
+ .len = 2,
+ .tx_buf = buf,
+ .rx_buf = buf,
+ };
+ u8 reg;
+
+ if (!buf)
+ return -ENOMEM;
+
+ for (reg = RG_PART_NUM; reg <= RG_MAN_ID_1; reg++) {
+ buf[0] = (reg & CMD_REG_MASK) | CMD_REG;
+ buf[1] = 0xff;
+ dev_vdbg(&spi->dev, "buf[0] = %02x\n", buf[0]);
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ status = spi_sync(spi, &msg);
+ dev_vdbg(&spi->dev, "status = %d\n", status);
+ if (msg.status)
+ status = msg.status;
+
+ dev_vdbg(&spi->dev, "status = %d\n", status);
+ dev_vdbg(&spi->dev, "buf[0] = %02x\n", buf[0]);
+ dev_vdbg(&spi->dev, "buf[1] = %02x\n", buf[1]);
+
+ if (status == 0)
+ data[reg - RG_PART_NUM] = buf[1];
+ else
+ break;
+ }
+
+ if (status == 0) {
+ *part = data[0];
+ *version = data[1];
+ *man_id = (data[3] << 8) | data[2];
+ }
+
+ kfree(buf);
+
+ return status;
+}
+
+static int
__at86rf230_write(struct at86rf230_local *lp, u8 addr, u8 data)
{
u8 *buf = lp->buf;
@@ -489,7 +550,9 @@ at86rf230_state(struct ieee802154_dev *dev, int state)
} while (val == STATE_TRANSITION_IN_PROGRESS);
- if (val == desired_status)
+ if (val == desired_status ||
+ (desired_status == STATE_RX_ON && val == STATE_BUSY_RX) ||
+ (desired_status == STATE_RX_AACK_ON && val == STATE_BUSY_RX_AACK))
return 0;
pr_err("unexpected state change: %d, asked for %d\n", val, state);
@@ -510,7 +573,11 @@ at86rf230_start(struct ieee802154_dev *dev)
if (rc)
return rc;
- return at86rf230_state(dev, STATE_RX_ON);
+ rc = at86rf230_state(dev, STATE_FORCE_TX_ON);
+ if (rc)
+ return rc;
+
+ return at86rf230_state(dev, STATE_RX_AACK_ON);
}
static void
@@ -520,6 +587,39 @@ at86rf230_stop(struct ieee802154_dev *dev)
}
static int
+at86rf230_set_channel(struct at86rf230_local *lp, int page, int channel)
+{
+ lp->rssi_base_val = -91;
+
+ return at86rf230_write_subreg(lp, SR_CHANNEL, channel);
+}
+
+static int
+at86rf212_set_channel(struct at86rf230_local *lp, int page, int channel)
+{
+ int rc;
+
+ if (channel == 0)
+ rc = at86rf230_write_subreg(lp, SR_SUB_MODE, 0);
+ else
+ rc = at86rf230_write_subreg(lp, SR_SUB_MODE, 1);
+ if (rc < 0)
+ return rc;
+
+ if (page == 0) {
+ rc = at86rf230_write_subreg(lp, SR_BPSK_QPSK, 0);
+ lp->rssi_base_val = -100;
+ } else {
+ rc = at86rf230_write_subreg(lp, SR_BPSK_QPSK, 1);
+ lp->rssi_base_val = -98;
+ }
+ if (rc < 0)
+ return rc;
+
+ return at86rf230_write_subreg(lp, SR_CHANNEL, channel);
+}
+
+static int
at86rf230_channel(struct ieee802154_dev *dev, int page, int channel)
{
struct at86rf230_local *lp = dev->priv;
@@ -527,14 +627,22 @@ at86rf230_channel(struct ieee802154_dev *dev, int page, int channel)
might_sleep();
- if (page != 0 || channel < 11 || channel > 26) {
+ if (page < 0 || page > 31 ||
+ !(lp->dev->phy->channels_supported[page] & BIT(channel))) {
WARN_ON(1);
return -EINVAL;
}
- rc = at86rf230_write_subreg(lp, SR_CHANNEL, channel);
+ if (is_rf212(lp))
+ rc = at86rf212_set_channel(lp, page, channel);
+ else
+ rc = at86rf230_set_channel(lp, page, channel);
+ if (rc < 0)
+ return rc;
+
msleep(1); /* Wait for PLL */
dev->phy->current_channel = channel;
+ dev->phy->current_page = page;
return 0;
}
@@ -568,6 +676,12 @@ at86rf230_xmit(struct ieee802154_dev *dev, struct sk_buff *skb)
if (rc)
goto err_rx;
+ if (lp->tx_aret) {
+ rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_TX_ARET_ON);
+ if (rc)
+ goto err_rx;
+ }
+
rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_BUSY_TX);
if (rc)
goto err_rx;
@@ -668,6 +782,93 @@ at86rf230_set_hw_addr_filt(struct ieee802154_dev *dev,
return 0;
}
+static int
+at86rf212_set_txpower(struct ieee802154_dev *dev, int db)
+{
+ struct at86rf230_local *lp = dev->priv;
+
+ /* typical maximum output is 5dBm with RG_PHY_TX_PWR 0x60, lower five
+ * bits decrease power in 1dB steps. 0x60 represents extra PA gain of
+ * 0dB.
+ * thus, supported values for db range from -26 to 5, for 31dB of
+ * reduction to 0dB of reduction.
+ */
+ if (db > 5 || db < -26)
+ return -EINVAL;
+
+ db = -(db - 5);
+
+ return __at86rf230_write(lp, RG_PHY_TX_PWR, 0x60 | db);
+}
+
+static int
+at86rf212_set_lbt(struct ieee802154_dev *dev, bool on)
+{
+ struct at86rf230_local *lp = dev->priv;
+
+ return at86rf230_write_subreg(lp, SR_CSMA_LBT_MODE, on);
+}
+
+static int
+at86rf212_set_cca_mode(struct ieee802154_dev *dev, u8 mode)
+{
+ struct at86rf230_local *lp = dev->priv;
+
+ return at86rf230_write_subreg(lp, SR_CCA_MODE, mode);
+}
+
+static int
+at86rf212_set_cca_ed_level(struct ieee802154_dev *dev, s32 level)
+{
+ struct at86rf230_local *lp = dev->priv;
+ int desens_steps;
+
+ if (level < lp->rssi_base_val || level > 30)
+ return -EINVAL;
+
+ desens_steps = (level - lp->rssi_base_val) * 100 / 207;
+
+ return at86rf230_write_subreg(lp, SR_CCA_ED_THRES, desens_steps);
+}
+
+static int
+at86rf212_set_csma_params(struct ieee802154_dev *dev, u8 min_be, u8 max_be,
+ u8 retries)
+{
+ struct at86rf230_local *lp = dev->priv;
+ int rc;
+
+ if (min_be > max_be || max_be > 8 || retries > 5)
+ return -EINVAL;
+
+ rc = at86rf230_write_subreg(lp, SR_MIN_BE, min_be);
+ if (rc)
+ return rc;
+
+ rc = at86rf230_write_subreg(lp, SR_MAX_BE, max_be);
+ if (rc)
+ return rc;
+
+ return at86rf230_write_subreg(lp, SR_MAX_CSMA_RETRIES, max_be);
+}
+
+static int
+at86rf212_set_frame_retries(struct ieee802154_dev *dev, s8 retries)
+{
+ struct at86rf230_local *lp = dev->priv;
+ int rc = 0;
+
+ if (retries < -1 || retries > 15)
+ return -EINVAL;
+
+ lp->tx_aret = retries >= 0;
+
+ if (retries >= 0)
+ rc = at86rf230_write_subreg(lp, SR_MAX_FRAME_RETRIES, retries);
+
+ return rc;
+}
+
static struct ieee802154_ops at86rf230_ops = {
.owner = THIS_MODULE,
.xmit = at86rf230_xmit,
@@ -678,6 +879,22 @@ static struct ieee802154_ops at86rf230_ops = {
.set_hw_addr_filt = at86rf230_set_hw_addr_filt,
};
+static struct ieee802154_ops at86rf212_ops = {
+ .owner = THIS_MODULE,
+ .xmit = at86rf230_xmit,
+ .ed = at86rf230_ed,
+ .set_channel = at86rf230_channel,
+ .start = at86rf230_start,
+ .stop = at86rf230_stop,
+ .set_hw_addr_filt = at86rf230_set_hw_addr_filt,
+ .set_txpower = at86rf212_set_txpower,
+ .set_lbt = at86rf212_set_lbt,
+ .set_cca_mode = at86rf212_set_cca_mode,
+ .set_cca_ed_level = at86rf212_set_cca_ed_level,
+ .set_csma_params = at86rf212_set_csma_params,
+ .set_frame_retries = at86rf212_set_frame_retries,
+};
+
static void at86rf230_irqwork(struct work_struct *work)
{
struct at86rf230_local *lp =
@@ -752,22 +969,15 @@ static int at86rf230_hw_init(struct at86rf230_local *lp)
struct at86rf230_platform_data *pdata = lp->spi->dev.platform_data;
int rc, irq_pol;
u8 status;
+ u8 csma_seed[2];
rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &status);
if (rc)
return rc;
- dev_info(&lp->spi->dev, "Status: %02x\n", status);
- if (status == STATE_P_ON) {
- rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_TRX_OFF);
- if (rc)
- return rc;
- msleep(1);
- rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &status);
- if (rc)
- return rc;
- dev_info(&lp->spi->dev, "Status: %02x\n", status);
- }
+ rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_FORCE_TRX_OFF);
+ if (rc)
+ return rc;
/* configure irq polarity, defaults to high active */
if (pdata->irq_type & (IRQF_TRIGGER_FALLING | IRQF_TRIGGER_LOW))
@@ -783,6 +993,14 @@ static int at86rf230_hw_init(struct at86rf230_local *lp)
if (rc)
return rc;
+ get_random_bytes(csma_seed, ARRAY_SIZE(csma_seed));
+ rc = at86rf230_write_subreg(lp, SR_CSMA_SEED_0, csma_seed[0]);
+ if (rc)
+ return rc;
+ rc = at86rf230_write_subreg(lp, SR_CSMA_SEED_1, csma_seed[1]);
+ if (rc)
+ return rc;
+
/* CLKM changes are applied immediately */
rc = at86rf230_write_subreg(lp, SR_CLKM_SHA_SEL, 0x00);
if (rc)
@@ -795,16 +1013,6 @@ static int at86rf230_hw_init(struct at86rf230_local *lp)
/* Wait the next SLEEP cycle */
msleep(100);
- rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_TX_ON);
- if (rc)
- return rc;
- msleep(1);
-
- rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &status);
- if (rc)
- return rc;
- dev_info(&lp->spi->dev, "Status: %02x\n", status);
-
rc = at86rf230_read_subreg(lp, SR_DVDD_OK, &status);
if (rc)
return rc;
@@ -824,26 +1032,18 @@ static int at86rf230_hw_init(struct at86rf230_local *lp)
return 0;
}
-static void at86rf230_fill_data(struct spi_device *spi)
-{
- struct at86rf230_local *lp = spi_get_drvdata(spi);
- struct at86rf230_platform_data *pdata = spi->dev.platform_data;
-
- lp->rstn = pdata->rstn;
- lp->slp_tr = pdata->slp_tr;
- lp->dig2 = pdata->dig2;
-}
-
static int at86rf230_probe(struct spi_device *spi)
{
struct at86rf230_platform_data *pdata;
struct ieee802154_dev *dev;
struct at86rf230_local *lp;
- u8 man_id_0, man_id_1, status;
+ u16 man_id = 0;
+ u8 part = 0, version = 0, status;
irq_handler_t irq_handler;
work_func_t irq_worker;
- int rc, supported = 0;
+ int rc;
const char *chip;
+ struct ieee802154_ops *ops = NULL;
if (!spi->irq) {
dev_err(&spi->dev, "no IRQ specified\n");
@@ -856,116 +1056,117 @@ static int at86rf230_probe(struct spi_device *spi)
return -EINVAL;
}
- dev = ieee802154_alloc_device(sizeof(*lp), &at86rf230_ops);
- if (!dev)
- return -ENOMEM;
-
- lp = dev->priv;
- lp->dev = dev;
-
- lp->spi = spi;
-
- dev->parent = &spi->dev;
- dev->extra_tx_headroom = 0;
- /* We do support only 2.4 Ghz */
- dev->phy->channels_supported[0] = 0x7FFF800;
- dev->flags = IEEE802154_HW_OMIT_CKSUM;
-
- if (pdata->irq_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
- irq_worker = at86rf230_irqwork;
- irq_handler = at86rf230_isr;
- } else {
- irq_worker = at86rf230_irqwork_level;
- irq_handler = at86rf230_isr_level;
- }
-
- mutex_init(&lp->bmux);
- INIT_WORK(&lp->irqwork, irq_worker);
- spin_lock_init(&lp->lock);
- init_completion(&lp->tx_complete);
-
- spi_set_drvdata(spi, lp);
-
- at86rf230_fill_data(spi);
-
- rc = gpio_request(lp->rstn, "rstn");
+ rc = gpio_request(pdata->rstn, "rstn");
if (rc)
- goto err_rstn;
+ return rc;
- if (gpio_is_valid(lp->slp_tr)) {
- rc = gpio_request(lp->slp_tr, "slp_tr");
+ if (gpio_is_valid(pdata->slp_tr)) {
+ rc = gpio_request(pdata->slp_tr, "slp_tr");
if (rc)
goto err_slp_tr;
}
- rc = gpio_direction_output(lp->rstn, 1);
+ rc = gpio_direction_output(pdata->rstn, 1);
if (rc)
goto err_gpio_dir;
- if (gpio_is_valid(lp->slp_tr)) {
- rc = gpio_direction_output(lp->slp_tr, 0);
+ if (gpio_is_valid(pdata->slp_tr)) {
+ rc = gpio_direction_output(pdata->slp_tr, 0);
if (rc)
goto err_gpio_dir;
}
/* Reset */
msleep(1);
- gpio_set_value(lp->rstn, 0);
+ gpio_set_value(pdata->rstn, 0);
msleep(1);
- gpio_set_value(lp->rstn, 1);
+ gpio_set_value(pdata->rstn, 1);
msleep(1);
- rc = at86rf230_read_subreg(lp, SR_MAN_ID_0, &man_id_0);
- if (rc)
- goto err_gpio_dir;
- rc = at86rf230_read_subreg(lp, SR_MAN_ID_1, &man_id_1);
- if (rc)
+ rc = __at86rf230_detect_device(spi, &man_id, &part, &version);
+ if (rc < 0)
goto err_gpio_dir;
- if (man_id_1 != 0x00 || man_id_0 != 0x1f) {
+ if (man_id != 0x001f) {
dev_err(&spi->dev, "Non-Atmel dev found (MAN_ID %02x %02x)\n",
- man_id_1, man_id_0);
+ man_id >> 8, man_id & 0xFF);
rc = -EINVAL;
goto err_gpio_dir;
}
- rc = at86rf230_read_subreg(lp, SR_PART_NUM, &lp->part);
- if (rc)
- goto err_gpio_dir;
-
- rc = at86rf230_read_subreg(lp, SR_VERSION_NUM, &lp->vers);
- if (rc)
- goto err_gpio_dir;
-
- switch (lp->part) {
+ switch (part) {
case 2:
chip = "at86rf230";
- /* supported = 1; FIXME: should be easy to support; */
+ /* FIXME: should be easy to support; */
break;
case 3:
chip = "at86rf231";
- supported = 1;
+ ops = &at86rf230_ops;
+ break;
+ case 7:
+ chip = "at86rf212";
+ if (version == 1)
+ ops = &at86rf212_ops;
break;
default:
chip = "UNKNOWN";
break;
}
- dev_info(&spi->dev, "Detected %s chip version %d\n", chip, lp->vers);
- if (!supported) {
+ dev_info(&spi->dev, "Detected %s chip version %d\n", chip, version);
+ if (!ops) {
rc = -ENOTSUPP;
goto err_gpio_dir;
}
+ dev = ieee802154_alloc_device(sizeof(*lp), ops);
+ if (!dev) {
+ rc = -ENOMEM;
+ goto err_gpio_dir;
+ }
+
+ lp = dev->priv;
+ lp->dev = dev;
+ lp->part = part;
+ lp->vers = version;
+
+ lp->spi = spi;
+
+ dev->parent = &spi->dev;
+ dev->extra_tx_headroom = 0;
+ dev->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AACK;
+
+ if (pdata->irq_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
+ irq_worker = at86rf230_irqwork;
+ irq_handler = at86rf230_isr;
+ } else {
+ irq_worker = at86rf230_irqwork_level;
+ irq_handler = at86rf230_isr_level;
+ }
+
+ mutex_init(&lp->bmux);
+ INIT_WORK(&lp->irqwork, irq_worker);
+ spin_lock_init(&lp->lock);
+ init_completion(&lp->tx_complete);
+
+ spi_set_drvdata(spi, lp);
+
+ if (is_rf212(lp)) {
+ dev->phy->channels_supported[0] = 0x00007FF;
+ dev->phy->channels_supported[2] = 0x00007FF;
+ } else {
+ dev->phy->channels_supported[0] = 0x7FFF800;
+ }
+
rc = at86rf230_hw_init(lp);
if (rc)
- goto err_gpio_dir;
+ goto err_hw_init;
rc = request_irq(spi->irq, irq_handler,
IRQF_SHARED | pdata->irq_type,
dev_name(&spi->dev), lp);
if (rc)
- goto err_gpio_dir;
+ goto err_hw_init;
/* Read irq status register to reset irq line */
rc = at86rf230_read_subreg(lp, RG_IRQ_STATUS, 0xff, 0, &status);
@@ -980,30 +1181,33 @@ static int at86rf230_probe(struct spi_device *spi)
err_irq:
free_irq(spi->irq, lp);
+err_hw_init:
flush_work(&lp->irqwork);
-err_gpio_dir:
- if (gpio_is_valid(lp->slp_tr))
- gpio_free(lp->slp_tr);
-err_slp_tr:
- gpio_free(lp->rstn);
-err_rstn:
+ spi_set_drvdata(spi, NULL);
mutex_destroy(&lp->bmux);
ieee802154_free_device(lp->dev);
+
+err_gpio_dir:
+ if (gpio_is_valid(pdata->slp_tr))
+ gpio_free(pdata->slp_tr);
+err_slp_tr:
+ gpio_free(pdata->rstn);
return rc;
}
static int at86rf230_remove(struct spi_device *spi)
{
struct at86rf230_local *lp = spi_get_drvdata(spi);
+ struct at86rf230_platform_data *pdata = spi->dev.platform_data;
ieee802154_unregister_device(lp->dev);
free_irq(spi->irq, lp);
flush_work(&lp->irqwork);
- if (gpio_is_valid(lp->slp_tr))
- gpio_free(lp->slp_tr);
- gpio_free(lp->rstn);
+ if (gpio_is_valid(pdata->slp_tr))
+ gpio_free(pdata->slp_tr);
+ gpio_free(pdata->rstn);
mutex_destroy(&lp->bmux);
ieee802154_free_device(lp->dev);
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c
index c5011e0..282effe 100644
--- a/drivers/net/loopback.c
+++ b/drivers/net/loopback.c
@@ -136,16 +136,9 @@ static const struct ethtool_ops loopback_ethtool_ops = {
static int loopback_dev_init(struct net_device *dev)
{
- int i;
- dev->lstats = alloc_percpu(struct pcpu_lstats);
+ dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
if (!dev->lstats)
return -ENOMEM;
-
- for_each_possible_cpu(i) {
- struct pcpu_lstats *lb_stats;
- lb_stats = per_cpu_ptr(dev->lstats, i);
- u64_stats_init(&lb_stats->syncp);
- }
return 0;
}
@@ -160,6 +153,7 @@ static const struct net_device_ops loopback_ops = {
.ndo_init = loopback_dev_init,
.ndo_start_xmit= loopback_xmit,
.ndo_get_stats64 = loopback_get_stats64,
+ .ndo_set_mac_address = eth_mac_addr,
};
/*
@@ -174,6 +168,7 @@ static void loopback_setup(struct net_device *dev)
dev->tx_queue_len = 0;
dev->type = ARPHRD_LOOPBACK; /* 0x0001*/
dev->flags = IFF_LOOPBACK;
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
dev->hw_features = NETIF_F_ALL_TSO | NETIF_F_UFO;
dev->features = NETIF_F_SG | NETIF_F_FRAGLIST
@@ -181,6 +176,7 @@ static void loopback_setup(struct net_device *dev)
| NETIF_F_UFO
| NETIF_F_HW_CSUM
| NETIF_F_RXCSUM
+ | NETIF_F_SCTP_CSUM
| NETIF_F_HIGHDMA
| NETIF_F_LLTX
| NETIF_F_NETNS_LOCAL
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 1831fb7..c683ac2 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -537,7 +537,6 @@ static int macvlan_init(struct net_device *dev)
{
struct macvlan_dev *vlan = netdev_priv(dev);
const struct net_device *lowerdev = vlan->lowerdev;
- int i;
dev->state = (dev->state & ~MACVLAN_STATE_MASK) |
(lowerdev->state & MACVLAN_STATE_MASK);
@@ -549,16 +548,10 @@ static int macvlan_init(struct net_device *dev)
macvlan_set_lockdep_class(dev);
- vlan->pcpu_stats = alloc_percpu(struct vlan_pcpu_stats);
+ vlan->pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
if (!vlan->pcpu_stats)
return -ENOMEM;
- for_each_possible_cpu(i) {
- struct vlan_pcpu_stats *mvlstats;
- mvlstats = per_cpu_ptr(vlan->pcpu_stats, i);
- u64_stats_init(&mvlstats->syncp);
- }
-
return 0;
}
diff --git a/drivers/net/nlmon.c b/drivers/net/nlmon.c
index d2bb12b..14ce7de 100644
--- a/drivers/net/nlmon.c
+++ b/drivers/net/nlmon.c
@@ -47,16 +47,7 @@ static int nlmon_change_mtu(struct net_device *dev, int new_mtu)
static int nlmon_dev_init(struct net_device *dev)
{
- int i;
-
- dev->lstats = alloc_percpu(struct pcpu_lstats);
-
- for_each_possible_cpu(i) {
- struct pcpu_lstats *nlmstats;
- nlmstats = per_cpu_ptr(dev->lstats, i);
- u64_stats_init(&nlmstats->syncp);
- }
-
+ dev->lstats = netdev_alloc_pcpu_stats(struct pcpu_lstats);
return dev->lstats == NULL ? -ENOMEM : 0;
}
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index 9b5d46c..6a17f92 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -71,6 +71,12 @@ config BCM63XX_PHY
---help---
Currently supports the 6348 and 6358 PHYs.
+config BCM7XXX_PHY
+ tristate "Drivers for Broadcom 7xxx SOCs internal PHYs"
+ ---help---
+ Currently supports the BCM7366, BCM7439, BCM7445, and
+ 40nm and 65nm generation of BCM7xxx Set Top Box SoCs.
+
config BCM87XX_PHY
tristate "Driver for Broadcom BCM8706 and BCM8727 PHYs"
help
diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile
index 9013dfa..07d2402 100644
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -12,6 +12,7 @@ obj-$(CONFIG_SMSC_PHY) += smsc.o
obj-$(CONFIG_VITESSE_PHY) += vitesse.o
obj-$(CONFIG_BROADCOM_PHY) += broadcom.o
obj-$(CONFIG_BCM63XX_PHY) += bcm63xx.o
+obj-$(CONFIG_BCM7XXX_PHY) += bcm7xxx.o
obj-$(CONFIG_BCM87XX_PHY) += bcm87xx.o
obj-$(CONFIG_ICPLUS_PHY) += icplus.o
obj-$(CONFIG_REALTEK_PHY) += realtek.o
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
new file mode 100644
index 0000000..6973372
--- /dev/null
+++ b/drivers/net/phy/bcm7xxx.c
@@ -0,0 +1,343 @@
+/*
+ * Broadcom BCM7xxx internal transceivers support.
+ *
+ * Copyright (C) 2014, Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/phy.h>
+#include <linux/delay.h>
+#include <linux/bitops.h>
+#include <linux/brcmphy.h>
+
+/* Broadcom BCM7xxx internal PHY registers */
+#define MII_BCM7XXX_CHANNEL_WIDTH 0x2000
+
+/* 40nm only register definitions */
+#define MII_BCM7XXX_100TX_AUX_CTL 0x10
+#define MII_BCM7XXX_100TX_FALSE_CAR 0x13
+#define MII_BCM7XXX_100TX_DISC 0x14
+#define MII_BCM7XXX_AUX_MODE 0x1d
+#define MII_BCM7XX_64CLK_MDIO BIT(12)
+#define MII_BCM7XXX_CORE_BASE1E 0x1e
+#define MII_BCM7XXX_TEST 0x1f
+#define MII_BCM7XXX_SHD_MODE_2 BIT(2)
+
+static int bcm7445_config_init(struct phy_device *phydev)
+{
+ int ret;
+ const struct bcm7445_regs {
+ int reg;
+ u16 value;
+ } bcm7445_regs_cfg[] = {
+ /* increases ADC latency by 24ns */
+ { MII_BCM54XX_EXP_SEL, 0x0038 },
+ { MII_BCM54XX_EXP_DATA, 0xAB95 },
+ /* increases internal 1V LDO voltage by 5% */
+ { MII_BCM54XX_EXP_SEL, 0x2038 },
+ { MII_BCM54XX_EXP_DATA, 0xBB22 },
+ /* reduce RX low pass filter corner frequency */
+ { MII_BCM54XX_EXP_SEL, 0x6038 },
+ { MII_BCM54XX_EXP_DATA, 0xFFC5 },
+ /* reduce RX high pass filter corner frequency */
+ { MII_BCM54XX_EXP_SEL, 0x003a },
+ { MII_BCM54XX_EXP_DATA, 0x2002 },
+ };
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(bcm7445_regs_cfg); i++) {
+ ret = phy_write(phydev,
+ bcm7445_regs_cfg[i].reg,
+ bcm7445_regs_cfg[i].value);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void phy_write_exp(struct phy_device *phydev,
+ u16 reg, u16 value)
+{
+ phy_write(phydev, MII_BCM54XX_EXP_SEL, MII_BCM54XX_EXP_SEL_ER | reg);
+ phy_write(phydev, MII_BCM54XX_EXP_DATA, value);
+}
+
+static void phy_write_misc(struct phy_device *phydev,
+ u16 reg, u16 chl, u16 value)
+{
+ int tmp;
+
+ phy_write(phydev, MII_BCM54XX_AUX_CTL, MII_BCM54XX_AUXCTL_SHDWSEL_MISC);
+
+ tmp = phy_read(phydev, MII_BCM54XX_AUX_CTL);
+ tmp |= MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA;
+ phy_write(phydev, MII_BCM54XX_AUX_CTL, tmp);
+
+ tmp = (chl * MII_BCM7XXX_CHANNEL_WIDTH) | reg;
+ phy_write(phydev, MII_BCM54XX_EXP_SEL, tmp);
+
+ phy_write(phydev, MII_BCM54XX_EXP_DATA, value);
+}
+
+static int bcm7xxx_28nm_afe_config_init(struct phy_device *phydev)
+{
+ /* write AFE_RXCONFIG_0 */
+ phy_write_misc(phydev, 0x38, 0x0000, 0xeb19);
+
+ /* write AFE_RXCONFIG_1 */
+ phy_write_misc(phydev, 0x38, 0x0001, 0x9a3f);
+
+ /* write AFE_RX_LP_COUNTER */
+ phy_write_misc(phydev, 0x38, 0x0003, 0x7fc7);
+
+ /* write AFE_HPF_TRIM_OTHERS */
+ phy_write_misc(phydev, 0x3A, 0x0000, 0x000b);
+
+ /* write AFTE_TX_CONFIG */
+ phy_write_misc(phydev, 0x39, 0x0000, 0x0800);
+
+ /* Increase VCO range to prevent unlocking problem of PLL at low
+ * temp
+ */
+ phy_write_misc(phydev, 0x0032, 0x0001, 0x0048);
+
+ /* Change Ki to 011 */
+ phy_write_misc(phydev, 0x0032, 0x0002, 0x021b);
+
+ /* Disable loading of TVCO buffer to bandgap, set bandgap trim
+ * to 111
+ */
+ phy_write_misc(phydev, 0x0033, 0x0000, 0x0e20);
+
+ /* Adjust bias current trim by -3 */
+ phy_write_misc(phydev, 0x000a, 0x0000, 0x690b);
+
+ /* Switch to CORE_BASE1E */
+ phy_write(phydev, MII_BCM7XXX_CORE_BASE1E, 0xd);
+
+ /* Reset R_CAL/RC_CAL Engine */
+ phy_write_exp(phydev, 0x00b0, 0x0010);
+
+ /* Disable Reset R_CAL/RC_CAL Engine */
+ phy_write_exp(phydev, 0x00b0, 0x0000);
+
+ return 0;
+}
+
+static int bcm7xxx_28nm_config_init(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = bcm7445_config_init(phydev);
+ if (ret)
+ return ret;
+
+ return bcm7xxx_28nm_afe_config_init(phydev);
+}
+
+static int phy_set_clr_bits(struct phy_device *dev, int location,
+ int set_mask, int clr_mask)
+{
+ int v, ret;
+
+ v = phy_read(dev, location);
+ if (v < 0)
+ return v;
+
+ v &= ~clr_mask;
+ v |= set_mask;
+
+ ret = phy_write(dev, location, v);
+ if (ret < 0)
+ return ret;
+
+ return v;
+}
+
+static int bcm7xxx_config_init(struct phy_device *phydev)
+{
+ int ret;
+
+ /* Enable 64 clock MDIO */
+ phy_write(phydev, MII_BCM7XXX_AUX_MODE, MII_BCM7XX_64CLK_MDIO);
+ phy_read(phydev, MII_BCM7XXX_AUX_MODE);
+
+ /* Workaround only required for 100Mbits/sec */
+ if (!(phydev->dev_flags & PHY_BRCM_100MBPS_WAR))
+ return 0;
+
+ /* set shadow mode 2 */
+ ret = phy_set_clr_bits(phydev, MII_BCM7XXX_TEST,
+ MII_BCM7XXX_SHD_MODE_2, MII_BCM7XXX_SHD_MODE_2);
+ if (ret < 0)
+ return ret;
+
+ /* set iddq_clkbias */
+ phy_write(phydev, MII_BCM7XXX_100TX_DISC, 0x0F00);
+ udelay(10);
+
+ /* reset iddq_clkbias */
+ phy_write(phydev, MII_BCM7XXX_100TX_DISC, 0x0C00);
+
+ phy_write(phydev, MII_BCM7XXX_100TX_FALSE_CAR, 0x7555);
+
+ /* reset shadow mode 2 */
+ ret = phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, MII_BCM7XXX_SHD_MODE_2, 0);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+/* Workaround for putting the PHY in IDDQ mode, required
+ * for all BCM7XXX PHYs
+ */
+static int bcm7xxx_suspend(struct phy_device *phydev)
+{
+ int ret;
+ const struct bcm7xxx_regs {
+ int reg;
+ u16 value;
+ } bcm7xxx_suspend_cfg[] = {
+ { MII_BCM7XXX_TEST, 0x008b },
+ { MII_BCM7XXX_100TX_AUX_CTL, 0x01c0 },
+ { MII_BCM7XXX_100TX_DISC, 0x7000 },
+ { MII_BCM7XXX_TEST, 0x000f },
+ { MII_BCM7XXX_100TX_AUX_CTL, 0x20d0 },
+ { MII_BCM7XXX_TEST, 0x000b },
+ };
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(bcm7xxx_suspend_cfg); i++) {
+ ret = phy_write(phydev,
+ bcm7xxx_suspend_cfg[i].reg,
+ bcm7xxx_suspend_cfg[i].value);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int bcm7xxx_dummy_config_init(struct phy_device *phydev)
+{
+ return 0;
+}
+
+static struct phy_driver bcm7xxx_driver[] = {
+{
+ .phy_id = PHY_ID_BCM7366,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Broadcom BCM7366",
+ .features = PHY_GBIT_FEATURES |
+ SUPPORTED_Pause | SUPPORTED_Asym_Pause,
+ .flags = PHY_IS_INTERNAL,
+ .config_init = bcm7xxx_28nm_afe_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .suspend = bcm7xxx_suspend,
+ .resume = bcm7xxx_28nm_afe_config_init,
+ .driver = { .owner = THIS_MODULE },
+}, {
+ .phy_id = PHY_ID_BCM7439,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Broadcom BCM7439",
+ .features = PHY_GBIT_FEATURES |
+ SUPPORTED_Pause | SUPPORTED_Asym_Pause,
+ .flags = PHY_IS_INTERNAL,
+ .config_init = bcm7xxx_28nm_afe_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .suspend = bcm7xxx_suspend,
+ .resume = bcm7xxx_28nm_afe_config_init,
+ .driver = { .owner = THIS_MODULE },
+}, {
+ .phy_id = PHY_ID_BCM7445,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Broadcom BCM7445",
+ .features = PHY_GBIT_FEATURES |
+ SUPPORTED_Pause | SUPPORTED_Asym_Pause,
+ .flags = PHY_IS_INTERNAL,
+ .config_init = bcm7xxx_28nm_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .suspend = bcm7xxx_suspend,
+ .resume = bcm7xxx_28nm_config_init,
+ .driver = { .owner = THIS_MODULE },
+}, {
+ .name = "Broadcom BCM7XXX 28nm",
+ .phy_id = PHY_ID_BCM7XXX_28,
+ .phy_id_mask = PHY_BCM_OUI_MASK,
+ .features = PHY_GBIT_FEATURES |
+ SUPPORTED_Pause | SUPPORTED_Asym_Pause,
+ .flags = PHY_IS_INTERNAL,
+ .config_init = bcm7xxx_28nm_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .suspend = bcm7xxx_suspend,
+ .resume = bcm7xxx_28nm_config_init,
+ .driver = { .owner = THIS_MODULE },
+}, {
+ .phy_id = PHY_BCM_OUI_4,
+ .phy_id_mask = 0xffff0000,
+ .name = "Broadcom BCM7XXX 40nm",
+ .features = PHY_GBIT_FEATURES |
+ SUPPORTED_Pause | SUPPORTED_Asym_Pause,
+ .flags = PHY_IS_INTERNAL,
+ .config_init = bcm7xxx_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .suspend = bcm7xxx_suspend,
+ .resume = bcm7xxx_config_init,
+ .driver = { .owner = THIS_MODULE },
+}, {
+ .phy_id = PHY_BCM_OUI_5,
+ .phy_id_mask = 0xffffff00,
+ .name = "Broadcom BCM7XXX 65nm",
+ .features = PHY_BASIC_FEATURES |
+ SUPPORTED_Pause | SUPPORTED_Asym_Pause,
+ .flags = PHY_IS_INTERNAL,
+ .config_init = bcm7xxx_dummy_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .suspend = bcm7xxx_suspend,
+ .resume = bcm7xxx_config_init,
+ .driver = { .owner = THIS_MODULE },
+} };
+
+static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = {
+ { PHY_ID_BCM7366, 0xfffffff0, },
+ { PHY_ID_BCM7439, 0xfffffff0, },
+ { PHY_ID_BCM7445, 0xfffffff0, },
+ { PHY_ID_BCM7XXX_28, 0xfffffc00 },
+ { PHY_BCM_OUI_4, 0xffff0000 },
+ { PHY_BCM_OUI_5, 0xffffff00 },
+ { }
+};
+
+static int __init bcm7xxx_phy_init(void)
+{
+ return phy_drivers_register(bcm7xxx_driver,
+ ARRAY_SIZE(bcm7xxx_driver));
+}
+
+static void __exit bcm7xxx_phy_exit(void)
+{
+ phy_drivers_unregister(bcm7xxx_driver,
+ ARRAY_SIZE(bcm7xxx_driver));
+}
+
+module_init(bcm7xxx_phy_init);
+module_exit(bcm7xxx_phy_exit);
+
+MODULE_DEVICE_TABLE(mdio, bcm7xxx_tbl);
+
+MODULE_DESCRIPTION("Broadcom BCM7xxx internal PHY driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Broadcom Corporation");
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index f8c90ea..34088d6 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -25,58 +25,6 @@
#define BRCM_PHY_REV(phydev) \
((phydev)->drv->phy_id & ~((phydev)->drv->phy_id_mask))
-
-#define MII_BCM54XX_ECR 0x10 /* BCM54xx extended control register */
-#define MII_BCM54XX_ECR_IM 0x1000 /* Interrupt mask */
-#define MII_BCM54XX_ECR_IF 0x0800 /* Interrupt force */
-
-#define MII_BCM54XX_ESR 0x11 /* BCM54xx extended status register */
-#define MII_BCM54XX_ESR_IS 0x1000 /* Interrupt status */
-
-#define MII_BCM54XX_EXP_DATA 0x15 /* Expansion register data */
-#define MII_BCM54XX_EXP_SEL 0x17 /* Expansion register select */
-#define MII_BCM54XX_EXP_SEL_SSD 0x0e00 /* Secondary SerDes select */
-#define MII_BCM54XX_EXP_SEL_ER 0x0f00 /* Expansion register select */
-
-#define MII_BCM54XX_AUX_CTL 0x18 /* Auxiliary control register */
-#define MII_BCM54XX_ISR 0x1a /* BCM54xx interrupt status register */
-#define MII_BCM54XX_IMR 0x1b /* BCM54xx interrupt mask register */
-#define MII_BCM54XX_INT_CRCERR 0x0001 /* CRC error */
-#define MII_BCM54XX_INT_LINK 0x0002 /* Link status changed */
-#define MII_BCM54XX_INT_SPEED 0x0004 /* Link speed change */
-#define MII_BCM54XX_INT_DUPLEX 0x0008 /* Duplex mode changed */
-#define MII_BCM54XX_INT_LRS 0x0010 /* Local receiver status changed */
-#define MII_BCM54XX_INT_RRS 0x0020 /* Remote receiver status changed */
-#define MII_BCM54XX_INT_SSERR 0x0040 /* Scrambler synchronization error */
-#define MII_BCM54XX_INT_UHCD 0x0080 /* Unsupported HCD negotiated */
-#define MII_BCM54XX_INT_NHCD 0x0100 /* No HCD */
-#define MII_BCM54XX_INT_NHCDL 0x0200 /* No HCD link */
-#define MII_BCM54XX_INT_ANPR 0x0400 /* Auto-negotiation page received */
-#define MII_BCM54XX_INT_LC 0x0800 /* All counters below 128 */
-#define MII_BCM54XX_INT_HC 0x1000 /* Counter above 32768 */
-#define MII_BCM54XX_INT_MDIX 0x2000 /* MDIX status change */
-#define MII_BCM54XX_INT_PSERR 0x4000 /* Pair swap error */
-
-#define MII_BCM54XX_SHD 0x1c /* 0x1c shadow registers */
-#define MII_BCM54XX_SHD_WRITE 0x8000
-#define MII_BCM54XX_SHD_VAL(x) ((x & 0x1f) << 10)
-#define MII_BCM54XX_SHD_DATA(x) ((x & 0x3ff) << 0)
-
-/*
- * AUXILIARY CONTROL SHADOW ACCESS REGISTERS. (PHY REG 0x18)
- */
-#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x0000
-#define MII_BCM54XX_AUXCTL_ACTL_TX_6DB 0x0400
-#define MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA 0x0800
-
-#define MII_BCM54XX_AUXCTL_MISC_WREN 0x8000
-#define MII_BCM54XX_AUXCTL_MISC_FORCE_AMDIX 0x0200
-#define MII_BCM54XX_AUXCTL_MISC_RDSEL_MISC 0x7000
-#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC 0x0007
-
-#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x0000
-
-
/*
* Broadcom LED source encodings. These are used in BCM5461, BCM5481,
* BCM5482, and possibly some others.
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 71e4900..76f54b3 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -432,8 +432,28 @@ phy_id_show(struct device *dev, struct device_attribute *attr, char *buf)
}
static DEVICE_ATTR_RO(phy_id);
+static ssize_t
+phy_interface_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct phy_device *phydev = to_phy_device(dev);
+
+ return sprintf(buf, "%s\n", phy_modes(phydev->interface));
+}
+static DEVICE_ATTR_RO(phy_interface);
+
+static ssize_t
+phy_has_fixups_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct phy_device *phydev = to_phy_device(dev);
+
+ return sprintf(buf, "%d\n", phydev->has_fixups);
+}
+static DEVICE_ATTR_RO(phy_has_fixups);
+
static struct attribute *mdio_dev_attrs[] = {
&dev_attr_phy_id.attr,
+ &dev_attr_phy_interface.attr,
+ &dev_attr_phy_has_fixups.attr,
NULL,
};
ATTRIBUTE_GROUPS(mdio_dev);
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 5a8993b..0c9e434 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -148,15 +148,52 @@ static int ks8737_config_intr(struct phy_device *phydev)
return rc < 0 ? rc : 0;
}
+static int kszphy_setup_led(struct phy_device *phydev,
+ unsigned int reg, unsigned int shift)
+{
+
+ struct device *dev = &phydev->dev;
+ struct device_node *of_node = dev->of_node;
+ int rc, temp;
+ u32 val;
+
+ if (!of_node && dev->parent->of_node)
+ of_node = dev->parent->of_node;
+
+ if (of_property_read_u32(of_node, "micrel,led-mode", &val))
+ return 0;
+
+ temp = phy_read(phydev, reg);
+ if (temp < 0)
+ return temp;
+
+ temp &= 3 << shift;
+ temp |= val << shift;
+ rc = phy_write(phydev, reg, temp);
+
+ return rc < 0 ? rc : 0;
+}
+
static int kszphy_config_init(struct phy_device *phydev)
{
return 0;
}
+static int kszphy_config_init_led8041(struct phy_device *phydev)
+{
+ /* single led control, register 0x1e bits 15..14 */
+ return kszphy_setup_led(phydev, 0x1e, 14);
+}
+
static int ksz8021_config_init(struct phy_device *phydev)
{
- int rc;
const u16 val = KSZPHY_OMSO_B_CAST_OFF | KSZPHY_OMSO_RMII_OVERRIDE;
+ int rc;
+
+ rc = kszphy_setup_led(phydev, 0x1f, 4);
+ if (rc)
+ dev_err(&phydev->dev, "failed to set led mode\n");
+
phy_write(phydev, MII_KSZPHY_OMSO, val);
rc = ksz_config_flags(phydev);
return rc < 0 ? rc : 0;
@@ -166,6 +203,10 @@ static int ks8051_config_init(struct phy_device *phydev)
{
int rc;
+ rc = kszphy_setup_led(phydev, 0x1f, 4);
+ if (rc)
+ dev_err(&phydev->dev, "failed to set led mode\n");
+
rc = ksz_config_flags(phydev);
return rc < 0 ? rc : 0;
}
@@ -327,7 +368,7 @@ static struct phy_driver ksphy_driver[] = {
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause
| SUPPORTED_Asym_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
- .config_init = kszphy_config_init,
+ .config_init = kszphy_config_init_led8041,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
@@ -342,7 +383,7 @@ static struct phy_driver ksphy_driver[] = {
.features = PHY_BASIC_FEATURES |
SUPPORTED_Pause | SUPPORTED_Asym_Pause,
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
- .config_init = kszphy_config_init,
+ .config_init = kszphy_config_init_led8041,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
@@ -371,7 +412,7 @@ static struct phy_driver ksphy_driver[] = {
.phy_id_mask = 0x00ffffff,
.features = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
- .config_init = kszphy_config_init,
+ .config_init = kszphy_config_init_led8041,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 19c9eca..643b5d6 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -38,6 +38,26 @@
#include <asm/irq.h>
+static const char *phy_speed_to_str(int speed)
+{
+ switch (speed) {
+ case SPEED_10:
+ return "10Mbps";
+ case SPEED_100:
+ return "100Mbps";
+ case SPEED_1000:
+ return "1Gbps";
+ case SPEED_2500:
+ return "2.5Gbps";
+ case SPEED_10000:
+ return "10Gbps";
+ case SPEED_UNKNOWN:
+ return "Unknown";
+ default:
+ return "Unsupported (update phy.c)";
+ }
+}
+
/**
* phy_print_status - Convenience function to print out the current phy status
* @phydev: the phy_device struct
@@ -45,12 +65,13 @@
void phy_print_status(struct phy_device *phydev)
{
if (phydev->link) {
- pr_info("%s - Link is Up - %d/%s\n",
- dev_name(&phydev->dev),
- phydev->speed,
- DUPLEX_FULL == phydev->duplex ? "Full" : "Half");
+ netdev_info(phydev->attached_dev,
+ "Link is Up - %s/%s - flow control %s\n",
+ phy_speed_to_str(phydev->speed),
+ DUPLEX_FULL == phydev->duplex ? "Full" : "Half",
+ phydev->pause ? "rx/tx" : "off");
} else {
- pr_info("%s - Link is Down\n", dev_name(&phydev->dev));
+ netdev_info(phydev->attached_dev, "Link is Down\n");
}
}
EXPORT_SYMBOL(phy_print_status);
@@ -62,7 +83,7 @@ EXPORT_SYMBOL(phy_print_status);
* If the @phydev driver has an ack_interrupt function, call it to
* ack and clear the phy device's interrupt.
*
- * Returns 0 on success on < 0 on error.
+ * Returns 0 on success or < 0 on error.
*/
static int phy_clear_interrupt(struct phy_device *phydev)
{
@@ -77,7 +98,7 @@ static int phy_clear_interrupt(struct phy_device *phydev)
* @phydev: the phy_device struct
* @interrupts: interrupt flags to configure for this @phydev
*
- * Returns 0 on success on < 0 on error.
+ * Returns 0 on success or < 0 on error.
*/
static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
{
@@ -93,15 +114,16 @@ static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts)
* phy_aneg_done - return auto-negotiation status
* @phydev: target phy_device struct
*
- * Description: Reads the status register and returns 0 either if
- * auto-negotiation is incomplete, or if there was an error.
- * Returns BMSR_ANEGCOMPLETE if auto-negotiation is done.
+ * Description: Return the auto-negotiation status from this @phydev
+ * Returns > 0 on success or < 0 on error. 0 means that auto-negotiation
+ * is still pending.
*/
static inline int phy_aneg_done(struct phy_device *phydev)
{
- int retval = phy_read(phydev, MII_BMSR);
+ if (phydev->drv->aneg_done)
+ return phydev->drv->aneg_done(phydev);
- return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE);
+ return genphy_aneg_done(phydev);
}
/* A structure for mapping a particular speed and duplex
@@ -283,7 +305,10 @@ int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd)
ethtool_cmd_speed_set(cmd, phydev->speed);
cmd->duplex = phydev->duplex;
- cmd->port = PORT_MII;
+ if (phydev->interface == PHY_INTERFACE_MODE_MOCA)
+ cmd->port = PORT_BNC;
+ else
+ cmd->port = PORT_MII;
cmd->phy_address = phydev->addr;
cmd->transceiver = phy_is_internal(phydev) ?
XCVR_INTERNAL : XCVR_EXTERNAL;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 4b970f7..a2fbb3e 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -139,6 +139,7 @@ static int phy_scan_fixups(struct phy_device *phydev)
mutex_unlock(&phy_fixup_lock);
return err;
}
+ phydev->has_fixups = true;
}
}
mutex_unlock(&phy_fixup_lock);
@@ -534,16 +535,16 @@ static int phy_poll_reset(struct phy_device *phydev)
int phy_init_hw(struct phy_device *phydev)
{
- int ret;
+ int ret = 0;
if (!phydev->drv || !phydev->drv->config_init)
return 0;
- ret = phy_write(phydev, MII_BMCR, BMCR_RESET);
- if (ret < 0)
- return ret;
+ if (phydev->drv->soft_reset)
+ ret = phydev->drv->soft_reset(phydev);
+ else
+ ret = genphy_soft_reset(phydev);
- ret = phy_poll_reset(phydev);
if (ret < 0)
return ret;
@@ -865,6 +866,22 @@ int genphy_config_aneg(struct phy_device *phydev)
}
EXPORT_SYMBOL(genphy_config_aneg);
+/**
+ * genphy_aneg_done - return auto-negotiation status
+ * @phydev: target phy_device struct
+ *
+ * Description: Reads the status register and returns 0 either if
+ * auto-negotiation is incomplete, or if there was an error.
+ * Returns BMSR_ANEGCOMPLETE if auto-negotiation is done.
+ */
+int genphy_aneg_done(struct phy_device *phydev)
+{
+ int retval = phy_read(phydev, MII_BMSR);
+
+ return (retval < 0) ? retval : (retval & BMSR_ANEGCOMPLETE);
+}
+EXPORT_SYMBOL(genphy_aneg_done);
+
static int gen10g_config_aneg(struct phy_device *phydev)
{
return 0;
@@ -1030,6 +1047,27 @@ static int gen10g_read_status(struct phy_device *phydev)
return 0;
}
+/**
+ * genphy_soft_reset - software reset the PHY via BMCR_RESET bit
+ * @phydev: target phy_device struct
+ *
+ * Description: Perform a software PHY reset using the standard
+ * BMCR_RESET bit and poll for the reset bit to be cleared.
+ *
+ * Returns: 0 on success, < 0 on failure
+ */
+int genphy_soft_reset(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_write(phydev, MII_BMCR, BMCR_RESET);
+ if (ret < 0)
+ return ret;
+
+ return phy_poll_reset(phydev);
+}
+EXPORT_SYMBOL(genphy_soft_reset);
+
static int genphy_config_init(struct phy_device *phydev)
{
int val;
@@ -1076,6 +1114,12 @@ static int genphy_config_init(struct phy_device *phydev)
return 0;
}
+static int gen10g_soft_reset(struct phy_device *phydev)
+{
+ /* Do nothing for now */
+ return 0;
+}
+
static int gen10g_config_init(struct phy_device *phydev)
{
/* Temporarily just say we support everything */
@@ -1250,9 +1294,11 @@ static struct phy_driver genphy_driver[] = {
.phy_id = 0xffffffff,
.phy_id_mask = 0xffffffff,
.name = "Generic PHY",
+ .soft_reset = genphy_soft_reset,
.config_init = genphy_config_init,
.features = 0,
.config_aneg = genphy_config_aneg,
+ .aneg_done = genphy_aneg_done,
.read_status = genphy_read_status,
.suspend = genphy_suspend,
.resume = genphy_resume,
@@ -1261,6 +1307,7 @@ static struct phy_driver genphy_driver[] = {
.phy_id = 0xffffffff,
.phy_id_mask = 0xffffffff,
.name = "Generic 10G PHY",
+ .soft_reset = gen10g_soft_reset,
.config_init = gen10g_config_init,
.features = 0,
.config_aneg = gen10g_config_aneg,
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index c8624a8..aea92f0 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1540,16 +1540,10 @@ static int team_init(struct net_device *dev)
mutex_init(&team->lock);
team_set_no_mode(team);
- team->pcpu_stats = alloc_percpu(struct team_pcpu_stats);
+ team->pcpu_stats = netdev_alloc_pcpu_stats(struct team_pcpu_stats);
if (!team->pcpu_stats)
return -ENOMEM;
- for_each_possible_cpu(i) {
- struct team_pcpu_stats *team_stats;
- team_stats = per_cpu_ptr(team->pcpu_stats, i);
- u64_stats_init(&team_stats->syncp);
- }
-
for (i = 0; i < TEAM_PORT_HASHENTRIES; i++)
INIT_HLIST_HEAD(&team->en_port_hlist[i]);
INIT_LIST_HEAD(&team->port_list);
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index d89dbe3..0654bd3 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -23,7 +23,7 @@
#include <linux/ipv6.h>
/* Version Information */
-#define DRIVER_VERSION "v1.04.0 (2014/01/15)"
+#define DRIVER_VERSION "v1.05.0 (2014/02/18)"
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
#define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters"
#define MODULENAME "r8152"
@@ -62,6 +62,8 @@
#define PLA_RSTTELLY 0xe800
#define PLA_CR 0xe813
#define PLA_CRWECR 0xe81c
+#define PLA_CONFIG12 0xe81e /* CONFIG1, CONFIG2 */
+#define PLA_CONFIG34 0xe820 /* CONFIG3, CONFIG4 */
#define PLA_CONFIG5 0xe822
#define PLA_PHY_PWR 0xe84c
#define PLA_OOB_CTRL 0xe84f
@@ -216,7 +218,14 @@
/* PAL_BDC_CR */
#define ALDPS_PROXY_MODE 0x0001
+/* PLA_CONFIG34 */
+#define LINK_ON_WAKE_EN 0x0010
+#define LINK_OFF_WAKE_EN 0x0008
+
/* PLA_CONFIG5 */
+#define BWF_EN 0x0040
+#define MWF_EN 0x0020
+#define UWF_EN 0x0010
#define LAN_WAKE_EN 0x0002
/* PLA_LED_FEATURE */
@@ -436,6 +445,8 @@ enum rtl8152_flags {
RTL8152_SET_RX_MODE,
WORK_ENABLE,
RTL8152_LINK_CHG,
+ SELECTIVE_SUSPEND,
+ PHY_RESET,
};
/* Define these values to match your device */
@@ -514,11 +525,13 @@ struct r8152 {
void (*init)(struct r8152 *);
int (*enable)(struct r8152 *);
void (*disable)(struct r8152 *);
+ void (*up)(struct r8152 *);
void (*down)(struct r8152 *);
void (*unload)(struct r8152 *);
} rtl_ops;
int intr_interval;
+ u32 saved_wolopts;
u32 msg_enable;
u32 tx_qlen;
u16 ocp_base;
@@ -865,11 +878,21 @@ static u16 sram_read(struct r8152 *tp, u16 addr)
static int read_mii_word(struct net_device *netdev, int phy_id, int reg)
{
struct r8152 *tp = netdev_priv(netdev);
+ int ret;
if (phy_id != R8152_PHY_ID)
return -EINVAL;
- return r8152_mdio_read(tp, reg);
+ ret = usb_autopm_get_interface(tp->intf);
+ if (ret < 0)
+ goto out;
+
+ ret = r8152_mdio_read(tp, reg);
+
+ usb_autopm_put_interface(tp->intf);
+
+out:
+ return ret;
}
static
@@ -880,7 +903,12 @@ void write_mii_word(struct net_device *netdev, int phy_id, int reg, int val)
if (phy_id != R8152_PHY_ID)
return;
+ if (usb_autopm_get_interface(tp->intf) < 0)
+ return;
+
r8152_mdio_write(tp, reg, val);
+
+ usb_autopm_put_interface(tp->intf);
}
static
@@ -889,11 +917,26 @@ int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags);
static inline void set_ethernet_addr(struct r8152 *tp)
{
struct net_device *dev = tp->netdev;
+ int ret;
u8 node_id[8] = {0};
- if (pla_ocp_read(tp, PLA_IDR, sizeof(node_id), node_id) < 0)
+ if (tp->version == RTL_VER_01)
+ ret = pla_ocp_read(tp, PLA_IDR, sizeof(node_id), node_id);
+ else
+ ret = pla_ocp_read(tp, PLA_BACKUP, sizeof(node_id), node_id);
+
+ if (ret < 0) {
netif_notice(tp, probe, dev, "inet addr fail\n");
- else {
+ } else {
+ if (tp->version != RTL_VER_01) {
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR,
+ CRWECR_CONFIG);
+ pla_ocp_write(tp, PLA_IDR, BYTE_EN_SIX_BYTES,
+ sizeof(node_id), node_id);
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR,
+ CRWECR_NORAML);
+ }
+
memcpy(dev->dev_addr, node_id, dev->addr_len);
memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
}
@@ -951,6 +994,8 @@ static void read_bulk_callback(struct urb *urb)
if (!netif_carrier_ok(netdev))
return;
+ usb_mark_last_busy(tp->udev);
+
switch (status) {
case 0:
if (urb->actual_length < ETH_ZLEN)
@@ -1018,6 +1063,8 @@ static void write_bulk_callback(struct urb *urb)
list_add_tail(&agg->list, &tp->tx_free);
spin_unlock_irqrestore(&tp->tx_lock, flags);
+ usb_autopm_put_interface_async(tp->intf);
+
if (!netif_carrier_ok(tp->netdev))
return;
@@ -1028,7 +1075,7 @@ static void write_bulk_callback(struct urb *urb)
return;
if (!skb_queue_empty(&tp->tx_queue))
- tasklet_schedule(&tp->tl);
+ schedule_delayed_work(&tp->schedule, 0);
}
static void intr_callback(struct urb *urb)
@@ -1284,9 +1331,16 @@ r8152_tx_csum(struct r8152 *tp, struct tx_desc *desc, struct sk_buff *skb)
static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
{
- int remain;
+ struct sk_buff_head skb_head, *tx_queue = &tp->tx_queue;
+ unsigned long flags;
+ int remain, ret;
u8 *tx_data;
+ __skb_queue_head_init(&skb_head);
+ spin_lock_irqsave(&tx_queue->lock, flags);
+ skb_queue_splice_init(tx_queue, &skb_head);
+ spin_unlock_irqrestore(&tx_queue->lock, flags);
+
tx_data = agg->head;
agg->skb_num = agg->skb_len = 0;
remain = rx_buf_sz;
@@ -1296,14 +1350,14 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
struct sk_buff *skb;
unsigned int len;
- skb = skb_dequeue(&tp->tx_queue);
+ skb = __skb_dequeue(&skb_head);
if (!skb)
break;
remain -= sizeof(*tx_desc);
len = skb->len;
if (remain < len) {
- skb_queue_head(&tp->tx_queue, skb);
+ __skb_queue_head(&skb_head, skb);
break;
}
@@ -1321,28 +1375,50 @@ static int r8152_tx_agg_fill(struct r8152 *tp, struct tx_agg *agg)
remain = rx_buf_sz - (int)(tx_agg_align(tx_data) - agg->head);
}
- netif_tx_lock(tp->netdev);
+ if (!skb_queue_empty(&skb_head)) {
+ spin_lock_irqsave(&tx_queue->lock, flags);
+ skb_queue_splice(&skb_head, tx_queue);
+ spin_unlock_irqrestore(&tx_queue->lock, flags);
+ }
+
+ netif_tx_lock_bh(tp->netdev);
if (netif_queue_stopped(tp->netdev) &&
skb_queue_len(&tp->tx_queue) < tp->tx_qlen)
netif_wake_queue(tp->netdev);
- netif_tx_unlock(tp->netdev);
+ netif_tx_unlock_bh(tp->netdev);
+
+ ret = usb_autopm_get_interface(tp->intf);
+ if (ret < 0)
+ goto out_tx_fill;
usb_fill_bulk_urb(agg->urb, tp->udev, usb_sndbulkpipe(tp->udev, 2),
agg->head, (int)(tx_data - (u8 *)agg->head),
(usb_complete_t)write_bulk_callback, agg);
- return usb_submit_urb(agg->urb, GFP_ATOMIC);
+ ret = usb_submit_urb(agg->urb, GFP_KERNEL);
+ if (ret < 0)
+ usb_autopm_put_interface(tp->intf);
+
+out_tx_fill:
+ return ret;
}
static void rx_bottom(struct r8152 *tp)
{
unsigned long flags;
- struct list_head *cursor, *next;
+ struct list_head *cursor, *next, rx_queue;
+ if (list_empty(&tp->rx_done))
+ return;
+
+ INIT_LIST_HEAD(&rx_queue);
spin_lock_irqsave(&tp->rx_lock, flags);
- list_for_each_safe(cursor, next, &tp->rx_done) {
+ list_splice_init(&tp->rx_done, &rx_queue);
+ spin_unlock_irqrestore(&tp->rx_lock, flags);
+
+ list_for_each_safe(cursor, next, &rx_queue) {
struct rx_desc *rx_desc;
struct rx_agg *agg;
int len_used = 0;
@@ -1351,7 +1427,6 @@ static void rx_bottom(struct r8152 *tp)
int ret;
list_del_init(cursor);
- spin_unlock_irqrestore(&tp->rx_lock, flags);
agg = list_entry(cursor, struct rx_agg, list);
urb = agg->urb;
@@ -1389,7 +1464,7 @@ static void rx_bottom(struct r8152 *tp)
memcpy(skb->data, rx_data, pkt_len);
skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, netdev);
- netif_rx(skb);
+ netif_receive_skb(skb);
stats->rx_packets++;
stats->rx_bytes += pkt_len;
@@ -1401,13 +1476,13 @@ static void rx_bottom(struct r8152 *tp)
submit:
ret = r8152_submit_rx(tp, agg, GFP_ATOMIC);
- spin_lock_irqsave(&tp->rx_lock, flags);
if (ret && ret != -ENODEV) {
- list_add_tail(&agg->list, next);
+ spin_lock_irqsave(&tp->rx_lock, flags);
+ list_add_tail(&agg->list, &tp->rx_done);
+ spin_unlock_irqrestore(&tp->rx_lock, flags);
tasklet_schedule(&tp->tl);
}
}
- spin_unlock_irqrestore(&tp->rx_lock, flags);
}
static void tx_bottom(struct r8152 *tp)
@@ -1465,7 +1540,6 @@ static void bottom_half(unsigned long data)
return;
rx_bottom(tp);
- tx_bottom(tp);
}
static
@@ -1478,6 +1552,27 @@ int r8152_submit_rx(struct r8152 *tp, struct rx_agg *agg, gfp_t mem_flags)
return usb_submit_urb(agg->urb, mem_flags);
}
+static void rtl_drop_queued_tx(struct r8152 *tp)
+{
+ struct net_device_stats *stats = &tp->netdev->stats;
+ struct sk_buff_head skb_head, *tx_queue = &tp->tx_queue;
+ unsigned long flags;
+ struct sk_buff *skb;
+
+ if (skb_queue_empty(tx_queue))
+ return;
+
+ __skb_queue_head_init(&skb_head);
+ spin_lock_irqsave(&tx_queue->lock, flags);
+ skb_queue_splice_init(tx_queue, &skb_head);
+ spin_unlock_irqrestore(&tx_queue->lock, flags);
+
+ while ((skb = __skb_dequeue(&skb_head))) {
+ dev_kfree_skb(skb);
+ stats->tx_dropped++;
+ }
+}
+
static void rtl8152_tx_timeout(struct net_device *netdev)
{
struct r8152 *tp = netdev_priv(netdev);
@@ -1554,7 +1649,7 @@ static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
netif_stop_queue(netdev);
if (!list_empty(&tp->tx_free))
- tasklet_schedule(&tp->tl);
+ schedule_delayed_work(&tp->schedule, 0);
return NETDEV_TX_OK;
}
@@ -1613,6 +1708,18 @@ static void rtl_set_eee_plus(struct r8152 *tp)
}
}
+static void rxdy_gated_en(struct r8152 *tp, bool enable)
+{
+ u32 ocp_data;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
+ if (enable)
+ ocp_data |= RXDY_GATED_EN;
+ else
+ ocp_data &= ~RXDY_GATED_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
+}
+
static int rtl_enable(struct r8152 *tp)
{
u32 ocp_data;
@@ -1624,9 +1731,7 @@ static int rtl_enable(struct r8152 *tp)
ocp_data |= CR_RE | CR_TE;
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, ocp_data);
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
- ocp_data &= ~RXDY_GATED_EN;
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
+ rxdy_gated_en(tp, false);
INIT_LIST_HEAD(&tp->rx_done);
ret = 0;
@@ -1681,8 +1786,6 @@ static int rtl8153_enable(struct r8152 *tp)
static void rtl8152_disable(struct r8152 *tp)
{
- struct net_device_stats *stats = rtl8152_get_stats(tp->netdev);
- struct sk_buff *skb;
u32 ocp_data;
int i;
@@ -1690,17 +1793,12 @@ static void rtl8152_disable(struct r8152 *tp)
ocp_data &= ~RCR_ACPT_ALL;
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
- while ((skb = skb_dequeue(&tp->tx_queue))) {
- dev_kfree_skb(skb);
- stats->tx_dropped++;
- }
+ rtl_drop_queued_tx(tp);
for (i = 0; i < RTL8152_MAX_TX; i++)
usb_kill_urb(tp->tx_info[i].urb);
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
- ocp_data |= RXDY_GATED_EN;
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
+ rxdy_gated_en(tp, true);
for (i = 0; i < 1000; i++) {
ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL);
@@ -1721,6 +1819,198 @@ static void rtl8152_disable(struct r8152 *tp)
rtl8152_nic_reset(tp);
}
+static void r8152_power_cut_en(struct r8152 *tp, bool enable)
+{
+ u32 ocp_data;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_UPS_CTRL);
+ if (enable)
+ ocp_data |= POWER_CUT;
+ else
+ ocp_data &= ~POWER_CUT;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_UPS_CTRL, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS);
+ ocp_data &= ~RESUME_INDICATE;
+ ocp_write_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS, ocp_data);
+
+}
+
+#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
+
+static u32 __rtl_get_wol(struct r8152 *tp)
+{
+ u32 ocp_data;
+ u32 wolopts = 0;
+
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG5);
+ if (!(ocp_data & LAN_WAKE_EN))
+ return 0;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34);
+ if (ocp_data & LINK_ON_WAKE_EN)
+ wolopts |= WAKE_PHY;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG5);
+ if (ocp_data & UWF_EN)
+ wolopts |= WAKE_UCAST;
+ if (ocp_data & BWF_EN)
+ wolopts |= WAKE_BCAST;
+ if (ocp_data & MWF_EN)
+ wolopts |= WAKE_MCAST;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL);
+ if (ocp_data & MAGIC_EN)
+ wolopts |= WAKE_MAGIC;
+
+ return wolopts;
+}
+
+static void __rtl_set_wol(struct r8152 *tp, u32 wolopts)
+{
+ u32 ocp_data;
+
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34);
+ ocp_data &= ~LINK_ON_WAKE_EN;
+ if (wolopts & WAKE_PHY)
+ ocp_data |= LINK_ON_WAKE_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG5);
+ ocp_data &= ~(UWF_EN | BWF_EN | MWF_EN | LAN_WAKE_EN);
+ if (wolopts & WAKE_UCAST)
+ ocp_data |= UWF_EN;
+ if (wolopts & WAKE_BCAST)
+ ocp_data |= BWF_EN;
+ if (wolopts & WAKE_MCAST)
+ ocp_data |= MWF_EN;
+ if (wolopts & WAKE_ANY)
+ ocp_data |= LAN_WAKE_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG5, ocp_data);
+
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL);
+ ocp_data &= ~MAGIC_EN;
+ if (wolopts & WAKE_MAGIC)
+ ocp_data |= MAGIC_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL, ocp_data);
+
+ if (wolopts & WAKE_ANY)
+ device_set_wakeup_enable(&tp->udev->dev, true);
+ else
+ device_set_wakeup_enable(&tp->udev->dev, false);
+}
+
+static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable)
+{
+ if (enable) {
+ u32 ocp_data;
+
+ __rtl_set_wol(tp, WAKE_ANY);
+
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CONFIG34);
+ ocp_data |= LINK_OFF_WAKE_EN;
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_CONFIG34, ocp_data);
+
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
+ } else {
+ __rtl_set_wol(tp, tp->saved_wolopts);
+ }
+}
+
+static void rtl_phy_reset(struct r8152 *tp)
+{
+ u16 data;
+ int i;
+
+ clear_bit(PHY_RESET, &tp->flags);
+
+ data = r8152_mdio_read(tp, MII_BMCR);
+
+ /* don't reset again before the previous one complete */
+ if (data & BMCR_RESET)
+ return;
+
+ data |= BMCR_RESET;
+ r8152_mdio_write(tp, MII_BMCR, data);
+
+ for (i = 0; i < 50; i++) {
+ msleep(20);
+ if ((r8152_mdio_read(tp, MII_BMCR) & BMCR_RESET) == 0)
+ break;
+ }
+}
+
+static void rtl_clear_bp(struct r8152 *tp)
+{
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_0, 0);
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_2, 0);
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_4, 0);
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_6, 0);
+ ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_0, 0);
+ ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_2, 0);
+ ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_4, 0);
+ ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_6, 0);
+ mdelay(3);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_BA, 0);
+ ocp_write_word(tp, MCU_TYPE_USB, USB_BP_BA, 0);
+}
+
+static void r8153_clear_bp(struct r8152 *tp)
+{
+ ocp_write_byte(tp, MCU_TYPE_PLA, PLA_BP_EN, 0);
+ ocp_write_byte(tp, MCU_TYPE_USB, USB_BP_EN, 0);
+ rtl_clear_bp(tp);
+}
+
+static void r8153_teredo_off(struct r8152 *tp)
+{
+ u32 ocp_data;
+
+ ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG);
+ ocp_data &= ~(TEREDO_SEL | TEREDO_RS_EVENT_MASK | OOB_TEREDO_EN);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG, ocp_data);
+
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_WDT6_CTRL, WDT6_SET_MODE);
+ ocp_write_word(tp, MCU_TYPE_PLA, PLA_REALWOW_TIMER, 0);
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TEREDO_TIMER, 0);
+}
+
+static void r8152b_disable_aldps(struct r8152 *tp)
+{
+ ocp_reg_write(tp, OCP_ALDPS_CONFIG, ENPDNPS | LINKENA | DIS_SDSAVE);
+ msleep(20);
+}
+
+static inline void r8152b_enable_aldps(struct r8152 *tp)
+{
+ ocp_reg_write(tp, OCP_ALDPS_CONFIG, ENPWRSAVE | ENPDNPS |
+ LINKENA | DIS_SDSAVE);
+}
+
+static void r8152b_hw_phy_cfg(struct r8152 *tp)
+{
+ u16 data;
+
+ data = r8152_mdio_read(tp, MII_BMCR);
+ if (data & BMCR_PDOWN) {
+ data &= ~BMCR_PDOWN;
+ r8152_mdio_write(tp, MII_BMCR, data);
+ }
+
+ r8152b_disable_aldps(tp);
+
+ rtl_clear_bp(tp);
+
+ r8152b_enable_aldps(tp);
+ set_bit(PHY_RESET, &tp->flags);
+}
+
static void r8152b_exit_oob(struct r8152 *tp)
{
u32 ocp_data;
@@ -1730,9 +2020,9 @@ static void r8152b_exit_oob(struct r8152 *tp)
ocp_data &= ~RCR_ACPT_ALL;
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
- ocp_data |= RXDY_GATED_EN;
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
+ rxdy_gated_en(tp, true);
+ r8153_teredo_off(tp);
+ r8152b_hw_phy_cfg(tp);
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CR, 0x00);
@@ -1838,10 +2128,6 @@ static void r8152b_enter_oob(struct r8152 *tp)
ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8152_RMS);
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL);
- ocp_data |= MAGIC_EN;
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL, ocp_data);
-
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CPCR);
ocp_data |= CPCR_RX_VLAN;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_CPCR, ocp_data);
@@ -1854,36 +2140,26 @@ static void r8152b_enter_oob(struct r8152 *tp)
ocp_data |= NOW_IS_OOB | DIS_MCU_CLROOB;
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
- ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG5, LAN_WAKE_EN);
-
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
- ocp_data &= ~RXDY_GATED_EN;
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
+ rxdy_gated_en(tp, false);
ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
ocp_data |= RCR_APM | RCR_AM | RCR_AB;
ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
}
-static void r8152b_disable_aldps(struct r8152 *tp)
-{
- ocp_reg_write(tp, OCP_ALDPS_CONFIG, ENPDNPS | LINKENA | DIS_SDSAVE);
- msleep(20);
-}
-
-static inline void r8152b_enable_aldps(struct r8152 *tp)
-{
- ocp_reg_write(tp, OCP_ALDPS_CONFIG, ENPWRSAVE | ENPDNPS |
- LINKENA | DIS_SDSAVE);
-}
-
static void r8153_hw_phy_cfg(struct r8152 *tp)
{
u32 ocp_data;
u16 data;
ocp_reg_write(tp, OCP_ADC_CFG, CKADSEL_L | ADC_EN | EN_EMI_L);
- r8152_mdio_write(tp, MII_BMCR, BMCR_ANENABLE);
+ data = r8152_mdio_read(tp, MII_BMCR);
+ if (data & BMCR_PDOWN) {
+ data &= ~BMCR_PDOWN;
+ r8152_mdio_write(tp, MII_BMCR, data);
+ }
+
+ r8153_clear_bp(tp);
if (tp->version == RTL_VER_03) {
data = ocp_reg_read(tp, OCP_EEE_CFG);
@@ -1919,9 +2195,11 @@ static void r8153_hw_phy_cfg(struct r8152 *tp)
data = sram_read(tp, SRAM_10M_AMP2);
data |= AMP_DN;
sram_write(tp, SRAM_10M_AMP2, data);
+
+ set_bit(PHY_RESET, &tp->flags);
}
-static void r8153_u1u2en(struct r8152 *tp, int enable)
+static void r8153_u1u2en(struct r8152 *tp, bool enable)
{
u8 u1u2[8];
@@ -1933,7 +2211,7 @@ static void r8153_u1u2en(struct r8152 *tp, int enable)
usb_ocp_write(tp, USB_TOLERANCE, BYTE_EN_SIX_BYTES, sizeof(u1u2), u1u2);
}
-static void r8153_u2p3en(struct r8152 *tp, int enable)
+static void r8153_u2p3en(struct r8152 *tp, bool enable)
{
u32 ocp_data;
@@ -1945,7 +2223,7 @@ static void r8153_u2p3en(struct r8152 *tp, int enable)
ocp_write_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL, ocp_data);
}
-static void r8153_power_cut_en(struct r8152 *tp, int enable)
+static void r8153_power_cut_en(struct r8152 *tp, bool enable)
{
u32 ocp_data;
@@ -1961,28 +2239,12 @@ static void r8153_power_cut_en(struct r8152 *tp, int enable)
ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data);
}
-static void r8153_teredo_off(struct r8152 *tp)
-{
- u32 ocp_data;
-
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG);
- ocp_data &= ~(TEREDO_SEL | TEREDO_RS_EVENT_MASK | OOB_TEREDO_EN);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG, ocp_data);
-
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_WDT6_CTRL, WDT6_SET_MODE);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_REALWOW_TIMER, 0);
- ocp_write_dword(tp, MCU_TYPE_PLA, PLA_TEREDO_TIMER, 0);
-}
-
static void r8153_first_init(struct r8152 *tp)
{
u32 ocp_data;
int i;
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
- ocp_data |= RXDY_GATED_EN;
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
-
+ rxdy_gated_en(tp, true);
r8153_teredo_off(tp);
ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
@@ -2075,10 +2337,6 @@ static void r8153_enter_oob(struct r8152 *tp)
ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8152_RMS);
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL);
- ocp_data |= MAGIC_EN;
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_CFG_WOL, ocp_data);
-
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG);
ocp_data &= ~TEREDO_WAKE_MASK;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG, ocp_data);
@@ -2095,11 +2353,7 @@ static void r8153_enter_oob(struct r8152 *tp)
ocp_data |= NOW_IS_OOB | DIS_MCU_CLROOB;
ocp_write_byte(tp, MCU_TYPE_PLA, PLA_OOB_CTRL, ocp_data);
- ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG5, LAN_WAKE_EN);
-
- ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MISC_1);
- ocp_data &= ~RXDY_GATED_EN;
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_MISC_1, ocp_data);
+ rxdy_gated_en(tp, false);
ocp_data = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
ocp_data |= RCR_APM | RCR_AM | RCR_AB;
@@ -2190,12 +2444,26 @@ static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex)
bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
}
+ if (test_bit(PHY_RESET, &tp->flags))
+ bmcr |= BMCR_RESET;
+
if (tp->mii.supports_gmii)
r8152_mdio_write(tp, MII_CTRL1000, gbcr);
r8152_mdio_write(tp, MII_ADVERTISE, anar);
r8152_mdio_write(tp, MII_BMCR, bmcr);
+ if (test_bit(PHY_RESET, &tp->flags)) {
+ int i;
+
+ clear_bit(PHY_RESET, &tp->flags);
+ for (i = 0; i < 50; i++) {
+ msleep(20);
+ if ((r8152_mdio_read(tp, MII_BMCR) & BMCR_RESET) == 0)
+ break;
+ }
+ }
+
out:
return ret;
@@ -2203,12 +2471,7 @@ out:
static void rtl8152_down(struct r8152 *tp)
{
- u32 ocp_data;
-
- ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_UPS_CTRL);
- ocp_data &= ~POWER_CUT;
- ocp_write_word(tp, MCU_TYPE_USB, USB_UPS_CTRL, ocp_data);
-
+ r8152_power_cut_en(tp, false);
r8152b_disable_aldps(tp);
r8152b_enter_oob(tp);
r8152b_enable_aldps(tp);
@@ -2216,8 +2479,8 @@ static void rtl8152_down(struct r8152 *tp)
static void rtl8153_down(struct r8152 *tp)
{
- r8153_u1u2en(tp, 0);
- r8153_power_cut_en(tp, 0);
+ r8153_u1u2en(tp, false);
+ r8153_power_cut_en(tp, false);
r8153_disable_aldps(tp);
r8153_enter_oob(tp);
r8153_enable_aldps(tp);
@@ -2252,6 +2515,9 @@ static void rtl_work_func_t(struct work_struct *work)
{
struct r8152 *tp = container_of(work, struct r8152, schedule.work);
+ if (usb_autopm_get_interface(tp->intf) < 0)
+ return;
+
if (!test_bit(WORK_ENABLE, &tp->flags))
goto out1;
@@ -2264,8 +2530,14 @@ static void rtl_work_func_t(struct work_struct *work)
if (test_bit(RTL8152_SET_RX_MODE, &tp->flags))
_rtl8152_set_rx_mode(tp->netdev);
+ if (tp->speed & LINK_STATUS)
+ tx_bottom(tp);
+
+ if (test_bit(PHY_RESET, &tp->flags))
+ rtl_phy_reset(tp);
+
out1:
- return;
+ usb_autopm_put_interface(tp->intf);
}
static int rtl8152_open(struct net_device *netdev)
@@ -2273,6 +2545,27 @@ static int rtl8152_open(struct net_device *netdev)
struct r8152 *tp = netdev_priv(netdev);
int res = 0;
+ res = alloc_all_mem(tp);
+ if (res)
+ goto out;
+
+ res = usb_autopm_get_interface(tp->intf);
+ if (res < 0) {
+ free_all_mem(tp);
+ goto out;
+ }
+
+ /* The WORK_ENABLE may be set when autoresume occurs */
+ if (test_bit(WORK_ENABLE, &tp->flags)) {
+ clear_bit(WORK_ENABLE, &tp->flags);
+ usb_kill_urb(tp->intr_urb);
+ cancel_delayed_work_sync(&tp->schedule);
+ if (tp->speed & LINK_STATUS)
+ tp->rtl_ops.disable(tp);
+ }
+
+ tp->rtl_ops.up(tp);
+
rtl8152_set_speed(tp, AUTONEG_ENABLE,
tp->mii.supports_gmii ? SPEED_1000 : SPEED_100,
DUPLEX_FULL);
@@ -2286,9 +2579,12 @@ static int rtl8152_open(struct net_device *netdev)
netif_device_detach(tp->netdev);
netif_warn(tp, ifup, netdev, "intr_urb submit failed: %d\n",
res);
+ free_all_mem(tp);
}
+ usb_autopm_put_interface(tp->intf);
+out:
return res;
}
@@ -2301,33 +2597,30 @@ static int rtl8152_close(struct net_device *netdev)
usb_kill_urb(tp->intr_urb);
cancel_delayed_work_sync(&tp->schedule);
netif_stop_queue(netdev);
- tasklet_disable(&tp->tl);
- tp->rtl_ops.disable(tp);
- tasklet_enable(&tp->tl);
- return res;
-}
+ res = usb_autopm_get_interface(tp->intf);
+ if (res < 0) {
+ rtl_drop_queued_tx(tp);
+ } else {
+ /*
+ * The autosuspend may have been enabled and wouldn't
+ * be disable when autoresume occurs, because the
+ * netif_running() would be false.
+ */
+ if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
+ rtl_runtime_suspend_enable(tp, false);
+ clear_bit(SELECTIVE_SUSPEND, &tp->flags);
+ }
-static void rtl_clear_bp(struct r8152 *tp)
-{
- ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_0, 0);
- ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_2, 0);
- ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_4, 0);
- ocp_write_dword(tp, MCU_TYPE_PLA, PLA_BP_6, 0);
- ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_0, 0);
- ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_2, 0);
- ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_4, 0);
- ocp_write_dword(tp, MCU_TYPE_USB, USB_BP_6, 0);
- mdelay(3);
- ocp_write_word(tp, MCU_TYPE_PLA, PLA_BP_BA, 0);
- ocp_write_word(tp, MCU_TYPE_USB, USB_BP_BA, 0);
-}
+ tasklet_disable(&tp->tl);
+ tp->rtl_ops.down(tp);
+ tasklet_enable(&tp->tl);
+ usb_autopm_put_interface(tp->intf);
+ }
-static void r8153_clear_bp(struct r8152 *tp)
-{
- ocp_write_byte(tp, MCU_TYPE_PLA, PLA_BP_EN, 0);
- ocp_write_byte(tp, MCU_TYPE_USB, USB_BP_EN, 0);
- rtl_clear_bp(tp);
+ free_all_mem(tp);
+
+ return res;
}
static void r8152b_enable_eee(struct r8152 *tp)
@@ -2378,18 +2671,9 @@ static void r8152b_enable_fc(struct r8152 *tp)
r8152_mdio_write(tp, MII_ADVERTISE, anar);
}
-static void r8152b_hw_phy_cfg(struct r8152 *tp)
-{
- r8152_mdio_write(tp, MII_BMCR, BMCR_ANENABLE);
- r8152b_disable_aldps(tp);
-}
-
static void r8152b_init(struct r8152 *tp)
{
u32 ocp_data;
- int i;
-
- rtl_clear_bp(tp);
if (tp->version == RTL_VER_01) {
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE);
@@ -2397,17 +2681,7 @@ static void r8152b_init(struct r8152 *tp)
ocp_write_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE, ocp_data);
}
- r8152b_hw_phy_cfg(tp);
-
- ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_UPS_CTRL);
- ocp_data &= ~POWER_CUT;
- ocp_write_word(tp, MCU_TYPE_USB, USB_UPS_CTRL, ocp_data);
-
- ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS);
- ocp_data &= ~RESUME_INDICATE;
- ocp_write_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS, ocp_data);
-
- r8152b_exit_oob(tp);
+ r8152_power_cut_en(tp, false);
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR);
ocp_data |= TX_10M_IDLE_EN | PFM_PWM_SWITCH;
@@ -2424,14 +2698,6 @@ static void r8152b_init(struct r8152 *tp)
r8152b_enable_aldps(tp);
r8152b_enable_fc(tp);
- r8152_mdio_write(tp, MII_BMCR, BMCR_RESET | BMCR_ANENABLE |
- BMCR_ANRESTART);
- for (i = 0; i < 100; i++) {
- udelay(100);
- if (!(r8152_mdio_read(tp, MII_BMCR) & BMCR_RESET))
- break;
- }
-
/* enable rx aggregation */
ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL);
ocp_data &= ~RX_AGG_DISABLE;
@@ -2443,7 +2709,7 @@ static void r8153_init(struct r8152 *tp)
u32 ocp_data;
int i;
- r8153_u1u2en(tp, 0);
+ r8153_u1u2en(tp, false);
for (i = 0; i < 500; i++) {
if (ocp_read_word(tp, MCU_TYPE_PLA, PLA_BOOT_CTRL) &
@@ -2459,14 +2725,12 @@ static void r8153_init(struct r8152 *tp)
msleep(20);
}
- r8153_u2p3en(tp, 0);
+ r8153_u2p3en(tp, false);
ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_WDT11_CTRL);
ocp_data &= ~TIMER11_EN;
ocp_write_word(tp, MCU_TYPE_USB, USB_WDT11_CTRL, ocp_data);
- r8153_clear_bp(tp);
-
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE);
ocp_data &= ~LED_MODE_MASK;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_LED_FEATURE, ocp_data);
@@ -2484,10 +2748,8 @@ static void r8153_init(struct r8152 *tp)
ocp_data |= SEN_VAL_NORMAL | SEL_RXIDLE;
ocp_write_word(tp, MCU_TYPE_USB, USB_AFE_CTRL2, ocp_data);
- r8153_power_cut_en(tp, 0);
- r8153_u1u2en(tp, 1);
-
- r8153_first_init(tp);
+ r8153_power_cut_en(tp, false);
+ r8153_u1u2en(tp, true);
ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL, ALDPS_SPDWN_RATIO);
ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, EEE_SPDWN_RATIO);
@@ -2502,26 +2764,30 @@ static void r8153_init(struct r8152 *tp)
r8153_enable_eee(tp);
r8153_enable_aldps(tp);
r8152b_enable_fc(tp);
-
- r8152_mdio_write(tp, MII_BMCR, BMCR_RESET | BMCR_ANENABLE |
- BMCR_ANRESTART);
}
static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
{
struct r8152 *tp = usb_get_intfdata(intf);
- netif_device_detach(tp->netdev);
+ if (PMSG_IS_AUTO(message))
+ set_bit(SELECTIVE_SUSPEND, &tp->flags);
+ else
+ netif_device_detach(tp->netdev);
if (netif_running(tp->netdev)) {
clear_bit(WORK_ENABLE, &tp->flags);
usb_kill_urb(tp->intr_urb);
cancel_delayed_work_sync(&tp->schedule);
- tasklet_disable(&tp->tl);
+ if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
+ rtl_runtime_suspend_enable(tp, true);
+ } else {
+ tasklet_disable(&tp->tl);
+ tp->rtl_ops.down(tp);
+ tasklet_enable(&tp->tl);
+ }
}
- tp->rtl_ops.down(tp);
-
return 0;
}
@@ -2529,22 +2795,77 @@ static int rtl8152_resume(struct usb_interface *intf)
{
struct r8152 *tp = usb_get_intfdata(intf);
- tp->rtl_ops.init(tp);
- netif_device_attach(tp->netdev);
+ if (!test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
+ tp->rtl_ops.init(tp);
+ netif_device_attach(tp->netdev);
+ }
+
if (netif_running(tp->netdev)) {
- rtl8152_set_speed(tp, AUTONEG_ENABLE,
+ if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
+ rtl_runtime_suspend_enable(tp, false);
+ clear_bit(SELECTIVE_SUSPEND, &tp->flags);
+ if (tp->speed & LINK_STATUS)
+ tp->rtl_ops.disable(tp);
+ } else {
+ tp->rtl_ops.up(tp);
+ rtl8152_set_speed(tp, AUTONEG_ENABLE,
tp->mii.supports_gmii ? SPEED_1000 : SPEED_100,
DUPLEX_FULL);
+ }
tp->speed = 0;
netif_carrier_off(tp->netdev);
set_bit(WORK_ENABLE, &tp->flags);
usb_submit_urb(tp->intr_urb, GFP_KERNEL);
- tasklet_enable(&tp->tl);
}
return 0;
}
+static void rtl8152_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct r8152 *tp = netdev_priv(dev);
+
+ if (usb_autopm_get_interface(tp->intf) < 0)
+ return;
+
+ wol->supported = WAKE_ANY;
+ wol->wolopts = __rtl_get_wol(tp);
+
+ usb_autopm_put_interface(tp->intf);
+}
+
+static int rtl8152_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+ struct r8152 *tp = netdev_priv(dev);
+ int ret;
+
+ ret = usb_autopm_get_interface(tp->intf);
+ if (ret < 0)
+ goto out_set_wol;
+
+ __rtl_set_wol(tp, wol->wolopts);
+ tp->saved_wolopts = wol->wolopts & WAKE_ANY;
+
+ usb_autopm_put_interface(tp->intf);
+
+out_set_wol:
+ return ret;
+}
+
+static u32 rtl8152_get_msglevel(struct net_device *dev)
+{
+ struct r8152 *tp = netdev_priv(dev);
+
+ return tp->msg_enable;
+}
+
+static void rtl8152_set_msglevel(struct net_device *dev, u32 value)
+{
+ struct r8152 *tp = netdev_priv(dev);
+
+ tp->msg_enable = value;
+}
+
static void rtl8152_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
{
@@ -2569,8 +2890,18 @@ int rtl8152_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd)
static int rtl8152_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct r8152 *tp = netdev_priv(dev);
+ int ret;
+
+ ret = usb_autopm_get_interface(tp->intf);
+ if (ret < 0)
+ goto out;
+
+ ret = rtl8152_set_speed(tp, cmd->autoneg, cmd->speed, cmd->duplex);
- return rtl8152_set_speed(tp, cmd->autoneg, cmd->speed, cmd->duplex);
+ usb_autopm_put_interface(tp->intf);
+
+out:
+ return ret;
}
static struct ethtool_ops ops = {
@@ -2578,13 +2909,21 @@ static struct ethtool_ops ops = {
.get_settings = rtl8152_get_settings,
.set_settings = rtl8152_set_settings,
.get_link = ethtool_op_get_link,
+ .get_msglevel = rtl8152_get_msglevel,
+ .set_msglevel = rtl8152_set_msglevel,
+ .get_wol = rtl8152_get_wol,
+ .set_wol = rtl8152_set_wol,
};
static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
{
struct r8152 *tp = netdev_priv(netdev);
struct mii_ioctl_data *data = if_mii(rq);
- int res = 0;
+ int res;
+
+ res = usb_autopm_get_interface(tp->intf);
+ if (res < 0)
+ goto out;
switch (cmd) {
case SIOCGMIIPHY:
@@ -2607,6 +2946,9 @@ static int rtl8152_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
res = -EOPNOTSUPP;
}
+ usb_autopm_put_interface(tp->intf);
+
+out:
return res;
}
@@ -2659,22 +3001,13 @@ static void r8152b_get_version(struct r8152 *tp)
static void rtl8152_unload(struct r8152 *tp)
{
- u32 ocp_data;
-
- if (tp->version != RTL_VER_01) {
- ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_UPS_CTRL);
- ocp_data |= POWER_CUT;
- ocp_write_word(tp, MCU_TYPE_USB, USB_UPS_CTRL, ocp_data);
- }
-
- ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS);
- ocp_data &= ~RESUME_INDICATE;
- ocp_write_word(tp, MCU_TYPE_USB, USB_PM_CTRL_STATUS, ocp_data);
+ if (tp->version != RTL_VER_01)
+ r8152_power_cut_en(tp, true);
}
static void rtl8153_unload(struct r8152 *tp)
{
- r8153_power_cut_en(tp, 1);
+ r8153_power_cut_en(tp, true);
}
static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id)
@@ -2689,6 +3022,7 @@ static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id)
ops->init = r8152b_init;
ops->enable = rtl8152_enable;
ops->disable = rtl8152_disable;
+ ops->up = r8152b_exit_oob;
ops->down = rtl8152_down;
ops->unload = rtl8152_unload;
ret = 0;
@@ -2697,6 +3031,7 @@ static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id)
ops->init = r8153_init;
ops->enable = rtl8153_enable;
ops->disable = rtl8152_disable;
+ ops->up = r8153_first_init;
ops->down = rtl8153_down;
ops->unload = rtl8153_unload;
ret = 0;
@@ -2712,6 +3047,7 @@ static int rtl_ops_init(struct r8152 *tp, const struct usb_device_id *id)
ops->init = r8153_init;
ops->enable = rtl8153_enable;
ops->disable = rtl8152_disable;
+ ops->up = r8153_first_init;
ops->down = rtl8153_down;
ops->unload = rtl8153_unload;
ret = 0;
@@ -2775,14 +3111,12 @@ static int rtl8152_probe(struct usb_interface *intf,
tp->mii.phy_id = R8152_PHY_ID;
tp->mii.supports_gmii = 0;
+ intf->needs_remote_wakeup = 1;
+
r8152b_get_version(tp);
tp->rtl_ops.init(tp);
set_ethernet_addr(tp);
- ret = alloc_all_mem(tp);
- if (ret)
- goto out;
-
usb_set_intfdata(intf, tp);
ret = register_netdev(netdev);
@@ -2791,6 +3125,12 @@ static int rtl8152_probe(struct usb_interface *intf,
goto out1;
}
+ tp->saved_wolopts = __rtl_get_wol(tp);
+ if (tp->saved_wolopts)
+ device_set_wakeup_enable(&udev->dev, true);
+ else
+ device_set_wakeup_enable(&udev->dev, false);
+
netif_info(tp, probe, netdev, "%s\n", DRIVER_VERSION);
return 0;
@@ -2812,7 +3152,6 @@ static void rtl8152_disconnect(struct usb_interface *intf)
tasklet_kill(&tp->tl);
unregister_netdev(tp->netdev);
tp->rtl_ops.unload(tp);
- free_all_mem(tp);
free_netdev(tp->netdev);
}
}
@@ -2835,6 +3174,8 @@ static struct usb_driver rtl8152_driver = {
.suspend = rtl8152_suspend,
.resume = rtl8152_resume,
.reset_resume = rtl8152_resume,
+ .supports_autosuspend = 1,
+ .disable_hub_initiated_lpm = 1,
};
module_usb_driver(rtl8152_driver);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 5b37437..3aca92e 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -14,6 +14,7 @@
#include <linux/etherdevice.h>
#include <linux/u64_stats_sync.h>
+#include <net/rtnetlink.h>
#include <net/dst.h>
#include <net/xfrm.h>
#include <linux/veth.h>
@@ -235,18 +236,9 @@ static int veth_change_mtu(struct net_device *dev, int new_mtu)
static int veth_dev_init(struct net_device *dev)
{
- int i;
-
- dev->vstats = alloc_percpu(struct pcpu_vstats);
+ dev->vstats = netdev_alloc_pcpu_stats(struct pcpu_vstats);
if (!dev->vstats)
return -ENOMEM;
-
- for_each_possible_cpu(i) {
- struct pcpu_vstats *veth_stats;
- veth_stats = per_cpu_ptr(dev->vstats, i);
- u64_stats_init(&veth_stats->syncp);
- }
-
return 0;
}
@@ -333,10 +325,9 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
nla_peer = data[VETH_INFO_PEER];
ifmp = nla_data(nla_peer);
- err = nla_parse(peer_tb, IFLA_MAX,
- nla_data(nla_peer) + sizeof(struct ifinfomsg),
- nla_len(nla_peer) - sizeof(struct ifinfomsg),
- ifla_policy);
+ err = rtnl_nla_parse_ifla(peer_tb,
+ nla_data(nla_peer) + sizeof(struct ifinfomsg),
+ nla_len(nla_peer) - sizeof(struct ifinfomsg));
if (err < 0)
return err;
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 3be786f..9275c8c 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -2729,47 +2729,35 @@ vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
/*
* Enable MSIx vectors.
* Returns :
- * 0 on successful enabling of required vectors,
* VMXNET3_LINUX_MIN_MSIX_VECT when only minimum number of vectors required
- * could be enabled.
- * number of vectors which can be enabled otherwise (this number is smaller
+ * were enabled.
+ * number of vectors which were enabled otherwise (this number is greater
* than VMXNET3_LINUX_MIN_MSIX_VECT)
*/
static int
-vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter,
- int vectors)
-{
- int err = 0, vector_threshold;
- vector_threshold = VMXNET3_LINUX_MIN_MSIX_VECT;
-
- while (vectors >= vector_threshold) {
- err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries,
- vectors);
- if (!err) {
- adapter->intr.num_intrs = vectors;
- return 0;
- } else if (err < 0) {
- dev_err(&adapter->netdev->dev,
- "Failed to enable MSI-X, error: %d\n", err);
- vectors = 0;
- } else if (err < vector_threshold) {
- break;
- } else {
- /* If fails to enable required number of MSI-x vectors
- * try enabling minimum number of vectors required.
- */
- dev_err(&adapter->netdev->dev,
- "Failed to enable %d MSI-X, trying %d instead\n",
- vectors, vector_threshold);
- vectors = vector_threshold;
- }
+vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter, int nvec)
+{
+ int ret = pci_enable_msix_range(adapter->pdev,
+ adapter->intr.msix_entries, nvec, nvec);
+
+ if (ret == -ENOSPC && nvec > VMXNET3_LINUX_MIN_MSIX_VECT) {
+ dev_err(&adapter->netdev->dev,
+ "Failed to enable %d MSI-X, trying %d\n",
+ nvec, VMXNET3_LINUX_MIN_MSIX_VECT);
+
+ ret = pci_enable_msix_range(adapter->pdev,
+ adapter->intr.msix_entries,
+ VMXNET3_LINUX_MIN_MSIX_VECT,
+ VMXNET3_LINUX_MIN_MSIX_VECT);
}
- dev_info(&adapter->pdev->dev,
- "Number of MSI-X interrupts which can be allocated "
- "is lower than min threshold required.\n");
- return err;
+ if (ret < 0) {
+ dev_err(&adapter->netdev->dev,
+ "Failed to enable MSI-X, error: %d\n", ret);
+ }
+
+ return ret;
}
@@ -2796,56 +2784,50 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
#ifdef CONFIG_PCI_MSI
if (adapter->intr.type == VMXNET3_IT_MSIX) {
- int vector, err = 0;
-
- adapter->intr.num_intrs = (adapter->share_intr ==
- VMXNET3_INTR_TXSHARE) ? 1 :
- adapter->num_tx_queues;
- adapter->intr.num_intrs += (adapter->share_intr ==
- VMXNET3_INTR_BUDDYSHARE) ? 0 :
- adapter->num_rx_queues;
- adapter->intr.num_intrs += 1; /* for link event */
-
- adapter->intr.num_intrs = (adapter->intr.num_intrs >
- VMXNET3_LINUX_MIN_MSIX_VECT
- ? adapter->intr.num_intrs :
- VMXNET3_LINUX_MIN_MSIX_VECT);
-
- for (vector = 0; vector < adapter->intr.num_intrs; vector++)
- adapter->intr.msix_entries[vector].entry = vector;
-
- err = vmxnet3_acquire_msix_vectors(adapter,
- adapter->intr.num_intrs);
+ int i, nvec;
+
+ nvec = adapter->share_intr == VMXNET3_INTR_TXSHARE ?
+ 1 : adapter->num_tx_queues;
+ nvec += adapter->share_intr == VMXNET3_INTR_BUDDYSHARE ?
+ 0 : adapter->num_rx_queues;
+ nvec += 1; /* for link event */
+ nvec = nvec > VMXNET3_LINUX_MIN_MSIX_VECT ?
+ nvec : VMXNET3_LINUX_MIN_MSIX_VECT;
+
+ for (i = 0; i < nvec; i++)
+ adapter->intr.msix_entries[i].entry = i;
+
+ nvec = vmxnet3_acquire_msix_vectors(adapter, nvec);
+ if (nvec < 0)
+ goto msix_err;
+
/* If we cannot allocate one MSIx vector per queue
* then limit the number of rx queues to 1
*/
- if (err == VMXNET3_LINUX_MIN_MSIX_VECT) {
+ if (nvec == VMXNET3_LINUX_MIN_MSIX_VECT) {
if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
|| adapter->num_rx_queues != 1) {
adapter->share_intr = VMXNET3_INTR_TXSHARE;
netdev_err(adapter->netdev,
"Number of rx queues : 1\n");
adapter->num_rx_queues = 1;
- adapter->intr.num_intrs =
- VMXNET3_LINUX_MIN_MSIX_VECT;
}
- return;
}
- if (!err)
- return;
+ adapter->intr.num_intrs = nvec;
+ return;
+
+msix_err:
/* If we cannot allocate MSIx vectors use only one rx queue */
dev_info(&adapter->pdev->dev,
"Failed to enable MSI-X, error %d. "
- "Limiting #rx queues to 1, try MSI.\n", err);
+ "Limiting #rx queues to 1, try MSI.\n", nvec);
adapter->intr.type = VMXNET3_IT_MSI;
}
if (adapter->intr.type == VMXNET3_IT_MSI) {
- int err;
- err = pci_enable_msi(adapter->pdev);
- if (!err) {
+ if (!pci_enable_msi(adapter->pdev)) {
adapter->num_rx_queues = 1;
adapter->intr.num_intrs = 1;
return;
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index b0f705c..eb59b14 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1132,7 +1132,6 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
{
struct vxlan_sock *vs;
struct vxlanhdr *vxh;
- __be16 port;
/* Need Vxlan and inner Ethernet header to be present */
if (!pskb_may_pull(skb, VXLAN_HLEN))
@@ -1150,8 +1149,6 @@ static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
if (iptunnel_pull_header(skb, VXLAN_HLEN, htons(ETH_P_TEB)))
goto drop;
- port = inet_sk(sk)->inet_sport;
-
vs = rcu_dereference_sk_user_data(sk);
if (!vs)
goto drop;
@@ -1978,19 +1975,11 @@ static int vxlan_init(struct net_device *dev)
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
struct vxlan_sock *vs;
- int i;
- dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
+ dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!dev->tstats)
return -ENOMEM;
- for_each_possible_cpu(i) {
- struct pcpu_sw_netstats *vxlan_stats;
- vxlan_stats = per_cpu_ptr(dev->tstats, i);
- u64_stats_init(&vxlan_stats->syncp);
- }
-
-
spin_lock(&vn->sock_lock);
vs = vxlan_find_sock(dev_net(dev), vxlan->dst_port);
if (vs) {
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index b59cfbe..6260b83 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -161,6 +161,8 @@ struct ath_common {
bool btcoex_enabled;
bool disable_ani;
bool bt_ant_diversity;
+
+ int last_rssi;
};
struct sk_buff *ath_rxbuf_alloc(struct ath_common *common,
diff --git a/drivers/net/wireless/ath/ath10k/txrx.c b/drivers/net/wireless/ath/ath10k/txrx.c
index 74f45fa6..27f20e0 100644
--- a/drivers/net/wireless/ath/ath10k/txrx.c
+++ b/drivers/net/wireless/ath/ath10k/txrx.c
@@ -204,7 +204,7 @@ static void process_rx_rates(struct ath10k *ar, struct htt_rx_info *info,
break;
/* 80MHZ */
case 2:
- status->flag |= RX_FLAG_80MHZ;
+ status->vht_flag |= RX_VHT_FLAG_80MHZ;
}
status->flag |= RX_FLAG_VHT;
@@ -266,7 +266,7 @@ void ath10k_process_rx(struct ath10k *ar, struct htt_rx_info *info)
status->flag & RX_FLAG_HT ? "ht" : "",
status->flag & RX_FLAG_VHT ? "vht" : "",
status->flag & RX_FLAG_40MHZ ? "40" : "",
- status->flag & RX_FLAG_80MHZ ? "80" : "",
+ status->vht_flag & RX_VHT_FLAG_80MHZ ? "80" : "",
status->flag & RX_FLAG_SHORT_GI ? "sgi " : "",
status->rate_idx,
status->vht_nss,
diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c
index fd4c89d..c2c6f46 100644
--- a/drivers/net/wireless/ath/ath6kl/cfg80211.c
+++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c
@@ -790,7 +790,7 @@ void ath6kl_cfg80211_connect_event(struct ath6kl_vif *vif, u16 channel,
if (nw_type & ADHOC_NETWORK) {
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "ad-hoc %s selected\n",
nw_type & ADHOC_CREATOR ? "creator" : "joiner");
- cfg80211_ibss_joined(vif->ndev, bssid, GFP_KERNEL);
+ cfg80211_ibss_joined(vif->ndev, bssid, chan, GFP_KERNEL);
cfg80211_put_bss(ar->wiphy, bss);
return;
}
@@ -861,13 +861,9 @@ void ath6kl_cfg80211_disconnect_event(struct ath6kl_vif *vif, u8 reason,
}
if (vif->nw_type & ADHOC_NETWORK) {
- if (vif->wdev.iftype != NL80211_IFTYPE_ADHOC) {
+ if (vif->wdev.iftype != NL80211_IFTYPE_ADHOC)
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG,
"%s: ath6k not in ibss mode\n", __func__);
- return;
- }
- memset(bssid, 0, ETH_ALEN);
- cfg80211_ibss_joined(vif->ndev, bssid, GFP_KERNEL);
return;
}
@@ -3256,6 +3252,15 @@ static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy,
struct ath6kl_vif *vif = netdev_priv(dev);
u16 interval;
int ret, rssi_thold;
+ int n_match_sets = request->n_match_sets;
+
+ /*
+ * If there's a matchset w/o an SSID, then assume it's just for
+ * the RSSI (nothing else is currently supported) and ignore it.
+ * The device only supports a global RSSI filter that we set below.
+ */
+ if (n_match_sets == 1 && !request->match_sets[0].ssid.ssid_len)
+ n_match_sets = 0;
if (ar->state != ATH6KL_STATE_ON)
return -EIO;
@@ -3268,11 +3273,11 @@ static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy,
ret = ath6kl_set_probed_ssids(ar, vif, request->ssids,
request->n_ssids,
request->match_sets,
- request->n_match_sets);
+ n_match_sets);
if (ret < 0)
return ret;
- if (!request->n_match_sets) {
+ if (!n_match_sets) {
ret = ath6kl_wmi_bssfilter_cmd(ar->wmi, vif->fw_vif_idx,
ALL_BSS_FILTER, 0);
if (ret < 0)
@@ -3286,12 +3291,12 @@ static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy,
if (test_bit(ATH6KL_FW_CAPABILITY_RSSI_SCAN_THOLD,
ar->fw_capabilities)) {
- if (request->rssi_thold <= NL80211_SCAN_RSSI_THOLD_OFF)
+ if (request->min_rssi_thold <= NL80211_SCAN_RSSI_THOLD_OFF)
rssi_thold = 0;
- else if (request->rssi_thold < -127)
+ else if (request->min_rssi_thold < -127)
rssi_thold = -127;
else
- rssi_thold = request->rssi_thold;
+ rssi_thold = request->min_rssi_thold;
ret = ath6kl_wmi_set_rssi_filter_cmd(ar->wmi, vif->fw_vif_idx,
rssi_thold);
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index 7b96b3e..8fcc029 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -120,18 +120,6 @@ config ATH9K_WOW
This option enables Wake on Wireless LAN support for certain cards.
Currently, AR9462 is supported.
-config ATH9K_LEGACY_RATE_CONTROL
- bool "Atheros ath9k rate control"
- depends on ATH9K
- default n
- ---help---
- Say Y, if you want to use the ath9k specific rate control
- module instead of minstrel_ht. Be warned that there are various
- issues with the ath9k RC and minstrel is a more robust algorithm.
- Note that even if this option is selected, "ath9k_rate_control"
- has to be passed to mac80211 using the module parameter,
- ieee80211_default_rc_algo.
-
config ATH9K_RFKILL
bool "Atheros ath9k rfkill support" if EXPERT
depends on ATH9K
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index a40e5c5..747975e 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -8,7 +8,6 @@ ath9k-y += beacon.o \
antenna.o
ath9k-$(CONFIG_ATH9K_BTCOEX_SUPPORT) += mci.o
-ath9k-$(CONFIG_ATH9K_LEGACY_RATE_CONTROL) += rc.o
ath9k-$(CONFIG_ATH9K_PCI) += pci.o
ath9k-$(CONFIG_ATH9K_AHB) += ahb.o
ath9k-$(CONFIG_ATH9K_DFS_DEBUGFS) += dfs_debug.o
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index a352128..ac8301e 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -23,10 +23,11 @@
#define MAX_MEASUREMENT MAX_IQCAL_MEASUREMENT
#define MAX_MAG_DELTA 11
#define MAX_PHS_DELTA 10
+#define MAXIQCAL 3
struct coeff {
- int mag_coeff[AR9300_MAX_CHAINS][MAX_MEASUREMENT];
- int phs_coeff[AR9300_MAX_CHAINS][MAX_MEASUREMENT];
+ int mag_coeff[AR9300_MAX_CHAINS][MAX_MEASUREMENT][MAXIQCAL];
+ int phs_coeff[AR9300_MAX_CHAINS][MAX_MEASUREMENT][MAXIQCAL];
int iqc_coeff[2];
};
@@ -655,9 +656,6 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
if (i2_m_q2_a0_d1 > 0x800)
i2_m_q2_a0_d1 = -((0xfff - i2_m_q2_a0_d1) + 1);
- if (i2_p_q2_a0_d1 > 0x1000)
- i2_p_q2_a0_d1 = -((0x1fff - i2_p_q2_a0_d1) + 1);
-
if (iq_corr_a0_d1 > 0x800)
iq_corr_a0_d1 = -((0xfff - iq_corr_a0_d1) + 1);
@@ -800,7 +798,7 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
if (q_q_coff > 63)
q_q_coff = 63;
- iqc_coeff[0] = (q_q_coff * 128) + q_i_coff;
+ iqc_coeff[0] = (q_q_coff * 128) + (0x7f & q_i_coff);
ath_dbg(common, CALIBRATE, "tx chain %d: iq corr coeff=%x\n",
chain_idx, iqc_coeff[0]);
@@ -831,7 +829,7 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
if (q_q_coff > 63)
q_q_coff = 63;
- iqc_coeff[1] = (q_q_coff * 128) + q_i_coff;
+ iqc_coeff[1] = (q_q_coff * 128) + (0x7f & q_i_coff);
ath_dbg(common, CALIBRATE, "rx chain %d: iq corr coeff=%x\n",
chain_idx, iqc_coeff[1]);
@@ -839,7 +837,8 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
return true;
}
-static void ar9003_hw_detect_outlier(int *mp_coeff, int nmeasurement,
+static void ar9003_hw_detect_outlier(int mp_coeff[][MAXIQCAL],
+ int nmeasurement,
int max_delta)
{
int mp_max = -64, max_idx = 0;
@@ -848,20 +847,20 @@ static void ar9003_hw_detect_outlier(int *mp_coeff, int nmeasurement,
/* find min/max mismatch across all calibrated gains */
for (i = 0; i < nmeasurement; i++) {
- if (mp_coeff[i] > mp_max) {
- mp_max = mp_coeff[i];
+ if (mp_coeff[i][0] > mp_max) {
+ mp_max = mp_coeff[i][0];
max_idx = i;
- } else if (mp_coeff[i] < mp_min) {
- mp_min = mp_coeff[i];
+ } else if (mp_coeff[i][0] < mp_min) {
+ mp_min = mp_coeff[i][0];
min_idx = i;
}
}
/* find average (exclude max abs value) */
for (i = 0; i < nmeasurement; i++) {
- if ((abs(mp_coeff[i]) < abs(mp_max)) ||
- (abs(mp_coeff[i]) < abs(mp_min))) {
- mp_avg += mp_coeff[i];
+ if ((abs(mp_coeff[i][0]) < abs(mp_max)) ||
+ (abs(mp_coeff[i][0]) < abs(mp_min))) {
+ mp_avg += mp_coeff[i][0];
mp_count++;
}
}
@@ -873,7 +872,7 @@ static void ar9003_hw_detect_outlier(int *mp_coeff, int nmeasurement,
if (mp_count)
mp_avg /= mp_count;
else
- mp_avg = mp_coeff[nmeasurement - 1];
+ mp_avg = mp_coeff[nmeasurement - 1][0];
/* detect outlier */
if (abs(mp_max - mp_min) > max_delta) {
@@ -882,15 +881,16 @@ static void ar9003_hw_detect_outlier(int *mp_coeff, int nmeasurement,
else
outlier_idx = min_idx;
- mp_coeff[outlier_idx] = mp_avg;
+ mp_coeff[outlier_idx][0] = mp_avg;
}
}
-static void ar9003_hw_tx_iqcal_load_avg_2_passes(struct ath_hw *ah,
- struct coeff *coeff,
- bool is_reusable)
+static void ar9003_hw_tx_iq_cal_outlier_detection(struct ath_hw *ah,
+ struct coeff *coeff,
+ bool is_reusable)
{
int i, im, nmeasurement;
+ int magnitude, phase;
u32 tx_corr_coeff[MAX_MEASUREMENT][AR9300_MAX_CHAINS];
struct ath9k_hw_cal_data *caldata = ah->caldata;
@@ -920,21 +920,30 @@ static void ar9003_hw_tx_iqcal_load_avg_2_passes(struct ath_hw *ah,
if (nmeasurement > MAX_MEASUREMENT)
nmeasurement = MAX_MEASUREMENT;
- /* detect outlier only if nmeasurement > 1 */
- if (nmeasurement > 1) {
- /* Detect magnitude outlier */
- ar9003_hw_detect_outlier(coeff->mag_coeff[i],
- nmeasurement, MAX_MAG_DELTA);
-
- /* Detect phase outlier */
- ar9003_hw_detect_outlier(coeff->phs_coeff[i],
- nmeasurement, MAX_PHS_DELTA);
+ /*
+ * Skip normal outlier detection for AR9550.
+ */
+ if (!AR_SREV_9550(ah)) {
+ /* detect outlier only if nmeasurement > 1 */
+ if (nmeasurement > 1) {
+ /* Detect magnitude outlier */
+ ar9003_hw_detect_outlier(coeff->mag_coeff[i],
+ nmeasurement,
+ MAX_MAG_DELTA);
+
+ /* Detect phase outlier */
+ ar9003_hw_detect_outlier(coeff->phs_coeff[i],
+ nmeasurement,
+ MAX_PHS_DELTA);
+ }
}
for (im = 0; im < nmeasurement; im++) {
+ magnitude = coeff->mag_coeff[i][im][0];
+ phase = coeff->phs_coeff[i][im][0];
- coeff->iqc_coeff[0] = (coeff->mag_coeff[i][im] & 0x7f) |
- ((coeff->phs_coeff[i][im] & 0x7f) << 7);
+ coeff->iqc_coeff[0] =
+ (phase & 0x7f) | ((magnitude & 0x7f) << 7);
if ((im % 2) == 0)
REG_RMW_FIELD(ah, tx_corr_coeff[im][i],
@@ -991,7 +1000,63 @@ static bool ar9003_hw_tx_iq_cal_run(struct ath_hw *ah)
return true;
}
-static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah, bool is_reusable)
+static void __ar955x_tx_iq_cal_sort(struct ath_hw *ah,
+ struct coeff *coeff,
+ int i, int nmeasurement)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ int im, ix, iy, temp;
+
+ for (im = 0; im < nmeasurement; im++) {
+ for (ix = 0; ix < MAXIQCAL - 1; ix++) {
+ for (iy = ix + 1; iy <= MAXIQCAL - 1; iy++) {
+ if (coeff->mag_coeff[i][im][iy] <
+ coeff->mag_coeff[i][im][ix]) {
+ temp = coeff->mag_coeff[i][im][ix];
+ coeff->mag_coeff[i][im][ix] =
+ coeff->mag_coeff[i][im][iy];
+ coeff->mag_coeff[i][im][iy] = temp;
+ }
+ if (coeff->phs_coeff[i][im][iy] <
+ coeff->phs_coeff[i][im][ix]) {
+ temp = coeff->phs_coeff[i][im][ix];
+ coeff->phs_coeff[i][im][ix] =
+ coeff->phs_coeff[i][im][iy];
+ coeff->phs_coeff[i][im][iy] = temp;
+ }
+ }
+ }
+ coeff->mag_coeff[i][im][0] = coeff->mag_coeff[i][im][MAXIQCAL / 2];
+ coeff->phs_coeff[i][im][0] = coeff->phs_coeff[i][im][MAXIQCAL / 2];
+
+ ath_dbg(common, CALIBRATE,
+ "IQCAL: Median [ch%d][gain%d]: mag = %d phase = %d\n",
+ i, im,
+ coeff->mag_coeff[i][im][0],
+ coeff->phs_coeff[i][im][0]);
+ }
+}
+
+static bool ar955x_tx_iq_cal_median(struct ath_hw *ah,
+ struct coeff *coeff,
+ int iqcal_idx,
+ int nmeasurement)
+{
+ int i;
+
+ if ((iqcal_idx + 1) != MAXIQCAL)
+ return false;
+
+ for (i = 0; i < AR9300_MAX_CHAINS; i++) {
+ __ar955x_tx_iq_cal_sort(ah, coeff, i, nmeasurement);
+ }
+
+ return true;
+}
+
+static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah,
+ int iqcal_idx,
+ bool is_reusable)
{
struct ath_common *common = ath9k_hw_common(ah);
const u32 txiqcal_status[AR9300_MAX_CHAINS] = {
@@ -1004,10 +1069,11 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah, bool is_reusable)
AR_PHY_CHAN_INFO_TAB_1,
AR_PHY_CHAN_INFO_TAB_2,
};
- struct coeff coeff;
+ static struct coeff coeff;
s32 iq_res[6];
int i, im, j;
- int nmeasurement;
+ int nmeasurement = 0;
+ bool outlier_detect = true;
for (i = 0; i < AR9300_MAX_CHAINS; i++) {
if (!(ah->txchainmask & (1 << i)))
@@ -1065,17 +1131,23 @@ static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah, bool is_reusable)
goto tx_iqcal_fail;
}
- coeff.mag_coeff[i][im] = coeff.iqc_coeff[0] & 0x7f;
- coeff.phs_coeff[i][im] =
+ coeff.phs_coeff[i][im][iqcal_idx] =
+ coeff.iqc_coeff[0] & 0x7f;
+ coeff.mag_coeff[i][im][iqcal_idx] =
(coeff.iqc_coeff[0] >> 7) & 0x7f;
- if (coeff.mag_coeff[i][im] > 63)
- coeff.mag_coeff[i][im] -= 128;
- if (coeff.phs_coeff[i][im] > 63)
- coeff.phs_coeff[i][im] -= 128;
+ if (coeff.mag_coeff[i][im][iqcal_idx] > 63)
+ coeff.mag_coeff[i][im][iqcal_idx] -= 128;
+ if (coeff.phs_coeff[i][im][iqcal_idx] > 63)
+ coeff.phs_coeff[i][im][iqcal_idx] -= 128;
}
}
- ar9003_hw_tx_iqcal_load_avg_2_passes(ah, &coeff, is_reusable);
+
+ if (AR_SREV_9550(ah))
+ outlier_detect = ar955x_tx_iq_cal_median(ah, &coeff,
+ iqcal_idx, nmeasurement);
+ if (outlier_detect)
+ ar9003_hw_tx_iq_cal_outlier_detection(ah, &coeff, is_reusable);
return;
@@ -1409,7 +1481,7 @@ skip_tx_iqcal:
}
if (txiqcal_done)
- ar9003_hw_tx_iq_cal_post_proc(ah, is_reusable);
+ ar9003_hw_tx_iq_cal_post_proc(ah, 0, is_reusable);
else if (caldata && test_bit(TXIQCAL_DONE, &caldata->cal_flags))
ar9003_hw_tx_iq_cal_reload(ah);
@@ -1455,14 +1527,38 @@ skip_tx_iqcal:
return true;
}
+static bool do_ar9003_agc_cal(struct ath_hw *ah)
+{
+ struct ath_common *common = ath9k_hw_common(ah);
+ bool status;
+
+ REG_WRITE(ah, AR_PHY_AGC_CONTROL,
+ REG_READ(ah, AR_PHY_AGC_CONTROL) |
+ AR_PHY_AGC_CONTROL_CAL);
+
+ status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
+ AR_PHY_AGC_CONTROL_CAL,
+ 0, AH_WAIT_TIMEOUT);
+ if (!status) {
+ ath_dbg(common, CALIBRATE,
+ "offset calibration failed to complete in %d ms,"
+ "noisy environment?\n",
+ AH_WAIT_TIMEOUT / 1000);
+ return false;
+ }
+
+ return true;
+}
+
static bool ar9003_hw_init_cal_soc(struct ath_hw *ah,
struct ath9k_channel *chan)
{
struct ath_common *common = ath9k_hw_common(ah);
struct ath9k_hw_cal_data *caldata = ah->caldata;
bool txiqcal_done = false;
- bool is_reusable = true, status = true;
+ bool status = true;
bool run_agc_cal = false, sep_iq_cal = false;
+ int i = 0;
/* Use chip chainmask only for calibration */
ar9003_hw_set_chain_masks(ah, ah->caps.rx_chainmask, ah->caps.tx_chainmask);
@@ -1485,7 +1581,12 @@ static bool ar9003_hw_init_cal_soc(struct ath_hw *ah,
* AGC calibration. Specifically, AR9550 in SoC chips.
*/
if (ah->enabled_cals & TX_IQ_ON_AGC_CAL) {
- txiqcal_done = true;
+ if (REG_READ_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_0,
+ AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL)) {
+ txiqcal_done = true;
+ } else {
+ txiqcal_done = false;
+ }
run_agc_cal = true;
} else {
sep_iq_cal = true;
@@ -1512,27 +1613,37 @@ skip_tx_iqcal:
if (AR_SREV_9330_11(ah))
ar9003_hw_manual_peak_cal(ah, 0, IS_CHAN_2GHZ(chan));
- /* Calibrate the AGC */
- REG_WRITE(ah, AR_PHY_AGC_CONTROL,
- REG_READ(ah, AR_PHY_AGC_CONTROL) |
- AR_PHY_AGC_CONTROL_CAL);
-
- /* Poll for offset calibration complete */
- status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
- AR_PHY_AGC_CONTROL_CAL,
- 0, AH_WAIT_TIMEOUT);
- }
+ /*
+ * For non-AR9550 chips, we just trigger AGC calibration
+ * in the HW, poll for completion and then process
+ * the results.
+ *
+ * For AR955x, we run it multiple times and use
+ * median IQ correction.
+ */
+ if (!AR_SREV_9550(ah)) {
+ status = do_ar9003_agc_cal(ah);
+ if (!status)
+ return false;
- if (!status) {
- ath_dbg(common, CALIBRATE,
- "offset calibration failed to complete in %d ms; noisy environment?\n",
- AH_WAIT_TIMEOUT / 1000);
- return false;
+ if (txiqcal_done)
+ ar9003_hw_tx_iq_cal_post_proc(ah, 0, false);
+ } else {
+ if (!txiqcal_done) {
+ status = do_ar9003_agc_cal(ah);
+ if (!status)
+ return false;
+ } else {
+ for (i = 0; i < MAXIQCAL; i++) {
+ status = do_ar9003_agc_cal(ah);
+ if (!status)
+ return false;
+ ar9003_hw_tx_iq_cal_post_proc(ah, i, false);
+ }
+ }
+ }
}
- if (txiqcal_done)
- ar9003_hw_tx_iq_cal_post_proc(ah, is_reusable);
-
/* Revert chainmask to runtime parameters */
ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index b5ac32c..21d13bc 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -30,7 +30,6 @@
#include "spectral.h"
struct ath_node;
-struct ath_rate_table;
extern struct ieee80211_ops ath9k_ops;
extern int ath9k_modparam_nohwcrypt;
@@ -150,6 +149,11 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
#define IS_CCK_RATE(rate) ((rate >= 0x18) && (rate <= 0x1e))
#define IS_OFDM_RATE(rate) ((rate >= 0x8) && (rate <= 0xf))
+enum {
+ WLAN_RC_PHY_OFDM,
+ WLAN_RC_PHY_CCK,
+};
+
struct ath_txq {
int mac80211_qnum; /* mac80211 queue number, -1 means not mac80211 Q */
u32 axq_qnum; /* ath9k hardware queue number */
@@ -442,7 +446,8 @@ void ath9k_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif,
void ath9k_beacon_assign_slot(struct ath_softc *sc, struct ieee80211_vif *vif);
void ath9k_beacon_remove_slot(struct ath_softc *sc, struct ieee80211_vif *vif);
void ath9k_set_beacon(struct ath_softc *sc);
-bool ath9k_csa_is_finished(struct ath_softc *sc);
+bool ath9k_csa_is_finished(struct ath_softc *sc, struct ieee80211_vif *vif);
+void ath9k_csa_update(struct ath_softc *sc);
/*******************/
/* Link Monitoring */
@@ -757,7 +762,6 @@ struct ath_softc {
#endif
struct ath9k_hw_cal_data caldata;
- int last_rssi;
#ifdef CONFIG_ATH9K_DEBUGFS
struct ath9k_debug debug;
@@ -774,7 +778,6 @@ struct ath_softc {
#endif
struct ath_descdma txsdma;
- struct ieee80211_vif *csa_vif;
struct ath_ant_comb ant_comb;
u8 ant_tx, ant_rx;
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 2e8bba0..32d00e8 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -292,11 +292,8 @@ static void ath9k_set_tsfadjust(struct ath_softc *sc, struct ieee80211_vif *vif)
(unsigned long long)tsfadjust, avp->av_bslot);
}
-bool ath9k_csa_is_finished(struct ath_softc *sc)
+bool ath9k_csa_is_finished(struct ath_softc *sc, struct ieee80211_vif *vif)
{
- struct ieee80211_vif *vif;
-
- vif = sc->csa_vif;
if (!vif || !vif->csa_active)
return false;
@@ -304,11 +301,23 @@ bool ath9k_csa_is_finished(struct ath_softc *sc)
return false;
ieee80211_csa_finish(vif);
-
- sc->csa_vif = NULL;
return true;
}
+static void ath9k_csa_update_vif(void *data, u8 *mac, struct ieee80211_vif *vif)
+{
+ struct ath_softc *sc = data;
+ ath9k_csa_is_finished(sc, vif);
+}
+
+void ath9k_csa_update(struct ath_softc *sc)
+{
+ ieee80211_iterate_active_interfaces(sc->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ ath9k_csa_update_vif,
+ sc);
+}
+
void ath9k_beacon_tasklet(unsigned long data)
{
struct ath_softc *sc = (struct ath_softc *)data;
@@ -362,13 +371,13 @@ void ath9k_beacon_tasklet(unsigned long data)
return;
}
- /* EDMA devices check that in the tx completion function. */
- if (!edma && ath9k_csa_is_finished(sc))
- return;
-
slot = ath9k_beacon_choose_slot(sc);
vif = sc->beacon.bslot[slot];
+ /* EDMA devices check that in the tx completion function. */
+ if (!edma && ath9k_csa_is_finished(sc, vif))
+ return;
+
if (!vif || !vif->bss_conf.enable_beacon)
return;
diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c
index 768c733..c6dd7f1 100644
--- a/drivers/net/wireless/ath/ath9k/common.c
+++ b/drivers/net/wireless/ath/ath9k/common.c
@@ -27,6 +27,250 @@ MODULE_AUTHOR("Atheros Communications");
MODULE_DESCRIPTION("Shared library for Atheros wireless 802.11n LAN cards.");
MODULE_LICENSE("Dual BSD/GPL");
+/* Assumes you've already done the endian to CPU conversion */
+bool ath9k_cmn_rx_accept(struct ath_common *common,
+ struct ieee80211_hdr *hdr,
+ struct ieee80211_rx_status *rxs,
+ struct ath_rx_status *rx_stats,
+ bool *decrypt_error,
+ unsigned int rxfilter)
+{
+ struct ath_hw *ah = common->ah;
+ bool is_mc, is_valid_tkip, strip_mic, mic_error;
+ __le16 fc;
+
+ fc = hdr->frame_control;
+
+ is_mc = !!is_multicast_ether_addr(hdr->addr1);
+ is_valid_tkip = rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID &&
+ test_bit(rx_stats->rs_keyix, common->tkip_keymap);
+ strip_mic = is_valid_tkip && ieee80211_is_data(fc) &&
+ ieee80211_has_protected(fc) &&
+ !(rx_stats->rs_status &
+ (ATH9K_RXERR_DECRYPT | ATH9K_RXERR_CRC | ATH9K_RXERR_MIC |
+ ATH9K_RXERR_KEYMISS));
+
+ /*
+ * Key miss events are only relevant for pairwise keys where the
+ * descriptor does contain a valid key index. This has been observed
+ * mostly with CCMP encryption.
+ */
+ if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID ||
+ !test_bit(rx_stats->rs_keyix, common->ccmp_keymap))
+ rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS;
+
+ mic_error = is_valid_tkip && !ieee80211_is_ctl(fc) &&
+ !ieee80211_has_morefrags(fc) &&
+ !(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) &&
+ (rx_stats->rs_status & ATH9K_RXERR_MIC);
+
+ /*
+ * The rx_stats->rs_status will not be set until the end of the
+ * chained descriptors so it can be ignored if rs_more is set. The
+ * rs_more will be false at the last element of the chained
+ * descriptors.
+ */
+ if (rx_stats->rs_status != 0) {
+ u8 status_mask;
+
+ if (rx_stats->rs_status & ATH9K_RXERR_CRC) {
+ rxs->flag |= RX_FLAG_FAILED_FCS_CRC;
+ mic_error = false;
+ }
+
+ if ((rx_stats->rs_status & ATH9K_RXERR_DECRYPT) ||
+ (!is_mc && (rx_stats->rs_status & ATH9K_RXERR_KEYMISS))) {
+ *decrypt_error = true;
+ mic_error = false;
+ }
+
+
+ /*
+ * Reject error frames with the exception of
+ * decryption and MIC failures. For monitor mode,
+ * we also ignore the CRC error.
+ */
+ status_mask = ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
+ ATH9K_RXERR_KEYMISS;
+
+ if (ah->is_monitoring && (rxfilter & FIF_FCSFAIL))
+ status_mask |= ATH9K_RXERR_CRC;
+
+ if (rx_stats->rs_status & ~status_mask)
+ return false;
+ }
+
+ /*
+ * For unicast frames the MIC error bit can have false positives,
+ * so all MIC error reports need to be validated in software.
+ * False negatives are not common, so skip software verification
+ * if the hardware considers the MIC valid.
+ */
+ if (strip_mic)
+ rxs->flag |= RX_FLAG_MMIC_STRIPPED;
+ else if (is_mc && mic_error)
+ rxs->flag |= RX_FLAG_MMIC_ERROR;
+
+ return true;
+}
+EXPORT_SYMBOL(ath9k_cmn_rx_accept);
+
+void ath9k_cmn_rx_skb_postprocess(struct ath_common *common,
+ struct sk_buff *skb,
+ struct ath_rx_status *rx_stats,
+ struct ieee80211_rx_status *rxs,
+ bool decrypt_error)
+{
+ struct ath_hw *ah = common->ah;
+ struct ieee80211_hdr *hdr;
+ int hdrlen, padpos, padsize;
+ u8 keyix;
+ __le16 fc;
+
+ /* see if any padding is done by the hw and remove it */
+ hdr = (struct ieee80211_hdr *) skb->data;
+ hdrlen = ieee80211_get_hdrlen_from_skb(skb);
+ fc = hdr->frame_control;
+ padpos = ieee80211_hdrlen(fc);
+
+ /* The MAC header is padded to have 32-bit boundary if the
+ * packet payload is non-zero. The general calculation for
+ * padsize would take into account odd header lengths:
+ * padsize = (4 - padpos % 4) % 4; However, since only
+ * even-length headers are used, padding can only be 0 or 2
+ * bytes and we can optimize this a bit. In addition, we must
+ * not try to remove padding from short control frames that do
+ * not have payload. */
+ padsize = padpos & 3;
+ if (padsize && skb->len>=padpos+padsize+FCS_LEN) {
+ memmove(skb->data + padsize, skb->data, padpos);
+ skb_pull(skb, padsize);
+ }
+
+ keyix = rx_stats->rs_keyix;
+
+ if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error &&
+ ieee80211_has_protected(fc)) {
+ rxs->flag |= RX_FLAG_DECRYPTED;
+ } else if (ieee80211_has_protected(fc)
+ && !decrypt_error && skb->len >= hdrlen + 4) {
+ keyix = skb->data[hdrlen + 3] >> 6;
+
+ if (test_bit(keyix, common->keymap))
+ rxs->flag |= RX_FLAG_DECRYPTED;
+ }
+ if (ah->sw_mgmt_crypto &&
+ (rxs->flag & RX_FLAG_DECRYPTED) &&
+ ieee80211_is_mgmt(fc))
+ /* Use software decrypt for management frames. */
+ rxs->flag &= ~RX_FLAG_DECRYPTED;
+}
+EXPORT_SYMBOL(ath9k_cmn_rx_skb_postprocess);
+
+int ath9k_cmn_process_rate(struct ath_common *common,
+ struct ieee80211_hw *hw,
+ struct ath_rx_status *rx_stats,
+ struct ieee80211_rx_status *rxs)
+{
+ struct ieee80211_supported_band *sband;
+ enum ieee80211_band band;
+ unsigned int i = 0;
+ struct ath_hw *ah = common->ah;
+
+ band = ah->curchan->chan->band;
+ sband = hw->wiphy->bands[band];
+
+ if (IS_CHAN_QUARTER_RATE(ah->curchan))
+ rxs->flag |= RX_FLAG_5MHZ;
+ else if (IS_CHAN_HALF_RATE(ah->curchan))
+ rxs->flag |= RX_FLAG_10MHZ;
+
+ if (rx_stats->rs_rate & 0x80) {
+ /* HT rate */
+ rxs->flag |= RX_FLAG_HT;
+ rxs->flag |= rx_stats->flag;
+ rxs->rate_idx = rx_stats->rs_rate & 0x7f;
+ return 0;
+ }
+
+ for (i = 0; i < sband->n_bitrates; i++) {
+ if (sband->bitrates[i].hw_value == rx_stats->rs_rate) {
+ rxs->rate_idx = i;
+ return 0;
+ }
+ if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) {
+ rxs->flag |= RX_FLAG_SHORTPRE;
+ rxs->rate_idx = i;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(ath9k_cmn_process_rate);
+
+void ath9k_cmn_process_rssi(struct ath_common *common,
+ struct ieee80211_hw *hw,
+ struct ath_rx_status *rx_stats,
+ struct ieee80211_rx_status *rxs)
+{
+ struct ath_hw *ah = common->ah;
+ int last_rssi;
+ int rssi = rx_stats->rs_rssi;
+ int i, j;
+
+ /*
+ * RSSI is not available for subframes in an A-MPDU.
+ */
+ if (rx_stats->rs_moreaggr) {
+ rxs->flag |= RX_FLAG_NO_SIGNAL_VAL;
+ return;
+ }
+
+ /*
+ * Check if the RSSI for the last subframe in an A-MPDU
+ * or an unaggregated frame is valid.
+ */
+ if (rx_stats->rs_rssi == ATH9K_RSSI_BAD) {
+ rxs->flag |= RX_FLAG_NO_SIGNAL_VAL;
+ return;
+ }
+
+ for (i = 0, j = 0; i < ARRAY_SIZE(rx_stats->rs_rssi_ctl); i++) {
+ s8 rssi;
+
+ if (!(ah->rxchainmask & BIT(i)))
+ continue;
+
+ rssi = rx_stats->rs_rssi_ctl[i];
+ if (rssi != ATH9K_RSSI_BAD) {
+ rxs->chains |= BIT(j);
+ rxs->chain_signal[j] = ah->noise + rssi;
+ }
+ j++;
+ }
+
+ /*
+ * Update Beacon RSSI, this is used by ANI.
+ */
+ if (rx_stats->is_mybeacon &&
+ ((ah->opmode == NL80211_IFTYPE_STATION) ||
+ (ah->opmode == NL80211_IFTYPE_ADHOC))) {
+ ATH_RSSI_LPF(common->last_rssi, rx_stats->rs_rssi);
+ last_rssi = common->last_rssi;
+
+ if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
+ rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER);
+ if (rssi < 0)
+ rssi = 0;
+
+ ah->stats.avgbrssi = rssi;
+ }
+
+ rxs->signal = ah->noise + rx_stats->rs_rssi;
+}
+EXPORT_SYMBOL(ath9k_cmn_process_rssi);
+
int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb)
{
struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
index eb85e1b..38b5609 100644
--- a/drivers/net/wireless/ath/ath9k/common.h
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -42,6 +42,25 @@
#define ATH_EP_RND(x, mul) \
(((x) + ((mul)/2)) / (mul))
+bool ath9k_cmn_rx_accept(struct ath_common *common,
+ struct ieee80211_hdr *hdr,
+ struct ieee80211_rx_status *rxs,
+ struct ath_rx_status *rx_stats,
+ bool *decrypt_error,
+ unsigned int rxfilter);
+void ath9k_cmn_rx_skb_postprocess(struct ath_common *common,
+ struct sk_buff *skb,
+ struct ath_rx_status *rx_stats,
+ struct ieee80211_rx_status *rxs,
+ bool decrypt_error);
+int ath9k_cmn_process_rate(struct ath_common *common,
+ struct ieee80211_hw *hw,
+ struct ath_rx_status *rx_stats,
+ struct ieee80211_rx_status *rxs);
+void ath9k_cmn_process_rssi(struct ath_common *common,
+ struct ieee80211_hw *hw,
+ struct ath_rx_status *rx_stats,
+ struct ieee80211_rx_status *rxs);
int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb);
struct ath9k_channel *ath9k_cmn_get_channel(struct ieee80211_hw *hw,
struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index cc7a025..559a68c 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -18,7 +18,6 @@
#define DEBUG_H
#include "hw.h"
-#include "rc.h"
#include "dfs_debug.h"
struct ath_txq;
diff --git a/drivers/net/wireless/ath/ath9k/dfs_debug.h b/drivers/net/wireless/ath/ath9k/dfs_debug.h
index 0a7ddf4..7936c91 100644
--- a/drivers/net/wireless/ath/ath9k/dfs_debug.h
+++ b/drivers/net/wireless/ath/ath9k/dfs_debug.h
@@ -21,6 +21,8 @@
#include "hw.h"
+struct ath_softc;
+
/**
* struct ath_dfs_stats - DFS Statistics per wiphy
* @pulses_total: pulses reported by HW
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 6d5d716..8e7153b 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -54,6 +54,8 @@ static struct usb_device_id ath9k_hif_usb_ids[] = {
.driver_info = AR9280_USB }, /* SMC Networks */
{ USB_DEVICE(0x0411, 0x017f),
.driver_info = AR9280_USB }, /* Sony UWA-BR100 */
+ { USB_DEVICE(0x0411, 0x0197),
+ .driver_info = AR9280_USB }, /* Buffalo WLI-UV-AG300P */
{ USB_DEVICE(0x04da, 0x3904),
.driver_info = AR9280_USB },
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 99a2031..ba83f58 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -277,7 +277,6 @@ struct ath9k_htc_rxbuf {
};
struct ath9k_htc_rx {
- int last_rssi; /* FIXME: per-STA */
struct list_head rxbuf;
spinlock_t rxbuflock;
};
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index c57d6b8..8d0b9bc 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -611,6 +611,7 @@ static void ath9k_init_misc(struct ath9k_htc_priv *priv)
memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
+ common->last_rssi = ATH_RSSI_DUMMY_MARKER;
priv->ah->opmode = NL80211_IFTYPE_STATION;
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index c9254a6..90dad41 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -1474,6 +1474,7 @@ static void ath9k_htc_bss_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
if ((vif->type == NL80211_IFTYPE_STATION) && bss_conf->assoc) {
common->curaid = bss_conf->aid;
+ common->last_rssi = ATH_RSSI_DUMMY_MARKER;
memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
}
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 12e0f32..47b2bfc 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -927,43 +927,39 @@ void ath9k_host_rx_init(struct ath9k_htc_priv *priv)
ath9k_hw_rxena(priv->ah);
ath9k_htc_opmode_init(priv);
ath9k_hw_startpcureceive(priv->ah, test_bit(OP_SCANNING, &priv->op_flags));
- priv->rx.last_rssi = ATH_RSSI_DUMMY_MARKER;
}
-static void ath9k_process_rate(struct ieee80211_hw *hw,
- struct ieee80211_rx_status *rxs,
- u8 rx_rate, u8 rs_flags)
+static inline void convert_htc_flag(struct ath_rx_status *rx_stats,
+ struct ath_htc_rx_status *rxstatus)
{
- struct ieee80211_supported_band *sband;
- enum ieee80211_band band;
- unsigned int i = 0;
-
- if (rx_rate & 0x80) {
- /* HT rate */
- rxs->flag |= RX_FLAG_HT;
- if (rs_flags & ATH9K_RX_2040)
- rxs->flag |= RX_FLAG_40MHZ;
- if (rs_flags & ATH9K_RX_GI)
- rxs->flag |= RX_FLAG_SHORT_GI;
- rxs->rate_idx = rx_rate & 0x7f;
- return;
- }
-
- band = hw->conf.chandef.chan->band;
- sband = hw->wiphy->bands[band];
-
- for (i = 0; i < sband->n_bitrates; i++) {
- if (sband->bitrates[i].hw_value == rx_rate) {
- rxs->rate_idx = i;
- return;
- }
- if (sband->bitrates[i].hw_value_short == rx_rate) {
- rxs->rate_idx = i;
- rxs->flag |= RX_FLAG_SHORTPRE;
- return;
- }
- }
+ rx_stats->flag = 0;
+ if (rxstatus->rs_flags & ATH9K_RX_2040)
+ rx_stats->flag |= RX_FLAG_40MHZ;
+ if (rxstatus->rs_flags & ATH9K_RX_GI)
+ rx_stats->flag |= RX_FLAG_SHORT_GI;
+}
+static void rx_status_htc_to_ath(struct ath_rx_status *rx_stats,
+ struct ath_htc_rx_status *rxstatus)
+{
+ rx_stats->rs_datalen = rxstatus->rs_datalen;
+ rx_stats->rs_status = rxstatus->rs_status;
+ rx_stats->rs_phyerr = rxstatus->rs_phyerr;
+ rx_stats->rs_rssi = rxstatus->rs_rssi;
+ rx_stats->rs_keyix = rxstatus->rs_keyix;
+ rx_stats->rs_rate = rxstatus->rs_rate;
+ rx_stats->rs_antenna = rxstatus->rs_antenna;
+ rx_stats->rs_more = rxstatus->rs_more;
+
+ memcpy(rx_stats->rs_rssi_ctl, rxstatus->rs_rssi_ctl,
+ sizeof(rx_stats->rs_rssi_ctl));
+ memcpy(rx_stats->rs_rssi_ext, rxstatus->rs_rssi_ext,
+ sizeof(rx_stats->rs_rssi_ext));
+
+ rx_stats->rs_isaggr = rxstatus->rs_isaggr;
+ rx_stats->rs_moreaggr = rxstatus->rs_moreaggr;
+ rx_stats->rs_num_delims = rxstatus->rs_num_delims;
+ convert_htc_flag(rx_stats, rxstatus);
}
static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
@@ -975,10 +971,10 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
struct ieee80211_hw *hw = priv->hw;
struct sk_buff *skb = rxbuf->skb;
struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath_hw *ah = common->ah;
struct ath_htc_rx_status *rxstatus;
- int hdrlen, padsize;
- int last_rssi = ATH_RSSI_DUMMY_MARKER;
- __le16 fc;
+ struct ath_rx_status rx_stats;
+ bool decrypt_error;
if (skb->len < HTC_RX_FRAME_HEADER_SIZE) {
ath_err(common, "Corrupted RX frame, dropping (len: %d)\n",
@@ -999,103 +995,39 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
ath9k_htc_err_stat_rx(priv, rxstatus);
/* Get the RX status information */
- memcpy(&rxbuf->rxstatus, rxstatus, HTC_RX_FRAME_HEADER_SIZE);
- skb_pull(skb, HTC_RX_FRAME_HEADER_SIZE);
-
- hdr = (struct ieee80211_hdr *)skb->data;
- fc = hdr->frame_control;
- hdrlen = ieee80211_get_hdrlen_from_skb(skb);
-
- padsize = hdrlen & 3;
- if (padsize && skb->len >= hdrlen+padsize+FCS_LEN) {
- memmove(skb->data + padsize, skb->data, hdrlen);
- skb_pull(skb, padsize);
- }
memset(rx_status, 0, sizeof(struct ieee80211_rx_status));
- if (rxbuf->rxstatus.rs_status != 0) {
- if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_CRC)
- rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
- if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_PHY)
- goto rx_next;
-
- if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_DECRYPT) {
- /* FIXME */
- } else if (rxbuf->rxstatus.rs_status & ATH9K_RXERR_MIC) {
- if (ieee80211_is_ctl(fc))
- /*
- * Sometimes, we get invalid
- * MIC failures on valid control frames.
- * Remove these mic errors.
- */
- rxbuf->rxstatus.rs_status &= ~ATH9K_RXERR_MIC;
- else
- rx_status->flag |= RX_FLAG_MMIC_ERROR;
- }
-
- /*
- * Reject error frames with the exception of
- * decryption and MIC failures. For monitor mode,
- * we also ignore the CRC error.
- */
- if (priv->ah->opmode == NL80211_IFTYPE_MONITOR) {
- if (rxbuf->rxstatus.rs_status &
- ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
- ATH9K_RXERR_CRC))
- goto rx_next;
- } else {
- if (rxbuf->rxstatus.rs_status &
- ~(ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC)) {
- goto rx_next;
- }
- }
- }
-
- if (!(rxbuf->rxstatus.rs_status & ATH9K_RXERR_DECRYPT)) {
- u8 keyix;
- keyix = rxbuf->rxstatus.rs_keyix;
- if (keyix != ATH9K_RXKEYIX_INVALID) {
- rx_status->flag |= RX_FLAG_DECRYPTED;
- } else if (ieee80211_has_protected(fc) &&
- skb->len >= hdrlen + 4) {
- keyix = skb->data[hdrlen + 3] >> 6;
- if (test_bit(keyix, common->keymap))
- rx_status->flag |= RX_FLAG_DECRYPTED;
- }
- }
-
- ath9k_process_rate(hw, rx_status, rxbuf->rxstatus.rs_rate,
- rxbuf->rxstatus.rs_flags);
-
- if (rxbuf->rxstatus.rs_rssi != ATH9K_RSSI_BAD &&
- !rxbuf->rxstatus.rs_moreaggr)
- ATH_RSSI_LPF(priv->rx.last_rssi,
- rxbuf->rxstatus.rs_rssi);
-
- last_rssi = priv->rx.last_rssi;
+ /* Copy everything from ath_htc_rx_status (HTC_RX_FRAME_HEADER).
+ * After this, we can drop this part of skb. */
+ rx_status_htc_to_ath(&rx_stats, rxstatus);
+ rx_status->mactime = be64_to_cpu(rxstatus->rs_tstamp);
+ skb_pull(skb, HTC_RX_FRAME_HEADER_SIZE);
- if (ath_is_mybeacon(common, hdr)) {
- s8 rssi = rxbuf->rxstatus.rs_rssi;
+ /*
+ * everything but the rate is checked here, the rate check is done
+ * separately to avoid doing two lookups for a rate for each frame.
+ */
+ hdr = (struct ieee80211_hdr *)skb->data;
+ if (!ath9k_cmn_rx_accept(common, hdr, rx_status, &rx_stats,
+ &decrypt_error, priv->rxfilter))
+ goto rx_next;
- if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
- rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER);
+ ath9k_cmn_rx_skb_postprocess(common, skb, &rx_stats,
+ rx_status, decrypt_error);
- if (rssi < 0)
- rssi = 0;
+ if (ath9k_cmn_process_rate(common, hw, &rx_stats, rx_status))
+ goto rx_next;
- priv->ah->stats.avgbrssi = rssi;
- }
+ rx_stats.is_mybeacon = ath_is_mybeacon(common, hdr);
+ ath9k_cmn_process_rssi(common, hw, &rx_stats, rx_status);
- rx_status->mactime = be64_to_cpu(rxbuf->rxstatus.rs_tstamp);
- rx_status->band = hw->conf.chandef.chan->band;
- rx_status->freq = hw->conf.chandef.chan->center_freq;
- rx_status->signal = rxbuf->rxstatus.rs_rssi + ATH_DEFAULT_NOISE_FLOOR;
- rx_status->antenna = rxbuf->rxstatus.rs_antenna;
+ rx_status->band = ah->curchan->chan->band;
+ rx_status->freq = ah->curchan->chan->center_freq;
+ rx_status->antenna = rx_stats.rs_antenna;
rx_status->flag |= RX_FLAG_MACTIME_END;
return true;
-
rx_next:
return false;
}
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
index aac4a40..a0ff5b6 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
@@ -358,6 +358,36 @@ ret:
kfree_skb(skb);
}
+static void ath9k_htc_fw_panic_report(struct htc_target *htc_handle,
+ struct sk_buff *skb)
+{
+ uint32_t *pattern = (uint32_t *)skb->data;
+
+ switch (*pattern) {
+ case 0x33221199:
+ {
+ struct htc_panic_bad_vaddr *htc_panic;
+ htc_panic = (struct htc_panic_bad_vaddr *) skb->data;
+ dev_err(htc_handle->dev, "ath: firmware panic! "
+ "exccause: 0x%08x; pc: 0x%08x; badvaddr: 0x%08x.\n",
+ htc_panic->exccause, htc_panic->pc,
+ htc_panic->badvaddr);
+ break;
+ }
+ case 0x33221299:
+ {
+ struct htc_panic_bad_epid *htc_panic;
+ htc_panic = (struct htc_panic_bad_epid *) skb->data;
+ dev_err(htc_handle->dev, "ath: firmware panic! "
+ "bad epid: 0x%08x\n", htc_panic->epid);
+ break;
+ }
+ default:
+ dev_err(htc_handle->dev, "ath: uknown panic pattern!\n");
+ break;
+ }
+}
+
/*
* HTC Messages are handled directly here and the obtained SKB
* is freed.
@@ -379,6 +409,12 @@ void ath9k_htc_rx_msg(struct htc_target *htc_handle,
htc_hdr = (struct htc_frame_hdr *) skb->data;
epid = htc_hdr->endpoint_id;
+ if (epid == 0x99) {
+ ath9k_htc_fw_panic_report(htc_handle, skb);
+ kfree_skb(skb);
+ return;
+ }
+
if (epid >= ENDPOINT_MAX) {
if (pipe_id != USB_REG_IN_PIPE)
dev_kfree_skb_any(skb);
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.h b/drivers/net/wireless/ath/ath9k/htc_hst.h
index e1ffbb6..06474cc 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.h
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.h
@@ -77,6 +77,18 @@ struct htc_config_pipe_msg {
u8 credits;
} __packed;
+struct htc_panic_bad_vaddr {
+ __be32 pattern;
+ __be32 exccause;
+ __be32 pc;
+ __be32 badvaddr;
+} __packed;
+
+struct htc_panic_bad_epid {
+ __be32 pattern;
+ __be32 epid;
+} __packed;
+
struct htc_ep_callbacks {
void *priv;
void (*tx) (void *, struct sk_buff *, enum htc_endpoint_id, bool txok);
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 303ce27..2509c2f 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -23,7 +23,6 @@
#include "hw.h"
#include "hw-ops.h"
-#include "rc.h"
#include "ar9003_mac.h"
#include "ar9003_mci.h"
#include "ar9003_phy.h"
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 1fc2e5a..07a0315 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -538,7 +538,7 @@ static void ath9k_init_misc(struct ath_softc *sc)
setup_timer(&common->ani.timer, ath_ani_calibrate, (unsigned long)sc);
- sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
+ common->last_rssi = ATH_RSSI_DUMMY_MARKER;
sc->config.txpowlimit = ATH_TXPOWER_MAX;
memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
sc->beacon.slottime = ATH9K_SLOT_TIME_9;
@@ -1106,19 +1106,11 @@ static int __init ath9k_init(void)
{
int error;
- /* Register rate control algorithm */
- error = ath_rate_control_register();
- if (error != 0) {
- pr_err("Unable to register rate control algorithm: %d\n",
- error);
- goto err_out;
- }
-
error = ath_pci_init();
if (error < 0) {
pr_err("No PCI devices found, driver not installed\n");
error = -ENODEV;
- goto err_rate_unregister;
+ goto err_out;
}
error = ath_ahb_init();
@@ -1131,9 +1123,6 @@ static int __init ath9k_init(void)
err_pci_exit:
ath_pci_exit();
-
- err_rate_unregister:
- ath_rate_control_unregister();
err_out:
return error;
}
@@ -1144,7 +1133,6 @@ static void __exit ath9k_exit(void)
is_ath9k_unloaded = true;
ath_ahb_exit();
ath_pci_exit();
- ath_rate_control_unregister();
pr_info("%s: Driver unloaded\n", dev_info);
}
module_exit(ath9k_exit);
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index 1027137..89df634 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -155,12 +155,8 @@ struct ath_htc_rx_status {
u8 rs_status;
u8 rs_phyerr;
int8_t rs_rssi;
- int8_t rs_rssi_ctl0;
- int8_t rs_rssi_ctl1;
- int8_t rs_rssi_ctl2;
- int8_t rs_rssi_ext0;
- int8_t rs_rssi_ext1;
- int8_t rs_rssi_ext2;
+ int8_t rs_rssi_ctl[3];
+ int8_t rs_rssi_ext[3];
u8 rs_keyix;
u8 rs_rate;
u8 rs_antenna;
@@ -170,6 +166,7 @@ struct ath_htc_rx_status {
u8 rs_num_delims;
u8 rs_flags;
u8 rs_dummy;
+ /* FIXME: evm* never used? */
__be32 evm0;
__be32 evm1;
__be32 evm2;
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 5924f72d..afce549 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -1178,9 +1178,6 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
if (ath9k_uses_beacons(vif->type))
ath9k_beacon_remove_slot(sc, vif);
- if (sc->csa_vif == vif)
- sc->csa_vif = NULL;
-
ath9k_ps_wakeup(sc);
ath9k_calculate_summary_state(hw, NULL);
ath9k_ps_restore(sc);
@@ -1609,7 +1606,7 @@ static void ath9k_set_assoc_state(struct ath_softc *sc,
common->curaid = bss_conf->aid;
ath9k_hw_write_associd(sc->sc_ah);
- sc->last_rssi = ATH_RSSI_DUMMY_MARKER;
+ common->last_rssi = ATH_RSSI_DUMMY_MARKER;
sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
spin_lock_irqsave(&sc->sc_pm_lock, flags);
@@ -1866,7 +1863,7 @@ static void ath9k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
static bool ath9k_has_tx_pending(struct ath_softc *sc)
{
- int i, npend;
+ int i, npend = 0;
for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
if (!ATH_TXQ_SETUP(sc, i))
@@ -2086,13 +2083,8 @@ static void ath9k_channel_switch_beacon(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct cfg80211_chan_def *chandef)
{
- struct ath_softc *sc = hw->priv;
-
- /* mac80211 does not support CSA in multi-if cases (yet) */
- if (WARN_ON(sc->csa_vif))
- return;
-
- sc->csa_vif = vif;
+ /* depend on vif->csa_active only */
+ return;
}
struct ieee80211_ops ath9k_ops = {
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
deleted file mode 100644
index d829bb6..0000000
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ /dev/null
@@ -1,1495 +0,0 @@
-/*
- * Copyright (c) 2004 Video54 Technologies, Inc.
- * Copyright (c) 2004-2011 Atheros Communications, Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#include <linux/slab.h>
-#include <linux/export.h>
-
-#include "ath9k.h"
-
-static const struct ath_rate_table ar5416_11na_ratetable = {
- 68,
- 8, /* MCS start */
- {
- [0] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 6000,
- 5400, 0, 12 }, /* 6 Mb */
- [1] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 9000,
- 7800, 1, 18 }, /* 9 Mb */
- [2] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000,
- 10000, 2, 24 }, /* 12 Mb */
- [3] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000,
- 13900, 3, 36 }, /* 18 Mb */
- [4] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000,
- 17300, 4, 48 }, /* 24 Mb */
- [5] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000,
- 23000, 5, 72 }, /* 36 Mb */
- [6] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000,
- 27400, 6, 96 }, /* 48 Mb */
- [7] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000,
- 29300, 7, 108 }, /* 54 Mb */
- [8] = { RC_HT_SDT_2040, WLAN_RC_PHY_HT_20_SS, 6500,
- 6400, 0, 0 }, /* 6.5 Mb */
- [9] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 13000,
- 12700, 1, 1 }, /* 13 Mb */
- [10] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 19500,
- 18800, 2, 2 }, /* 19.5 Mb */
- [11] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 26000,
- 25000, 3, 3 }, /* 26 Mb */
- [12] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 39000,
- 36700, 4, 4 }, /* 39 Mb */
- [13] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 52000,
- 48100, 5, 5 }, /* 52 Mb */
- [14] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 58500,
- 53500, 6, 6 }, /* 58.5 Mb */
- [15] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 65000,
- 59000, 7, 7 }, /* 65 Mb */
- [16] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS_HGI, 72200,
- 65400, 7, 7 }, /* 75 Mb */
- [17] = { RC_INVALID, WLAN_RC_PHY_HT_20_DS, 13000,
- 12700, 8, 8 }, /* 13 Mb */
- [18] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 26000,
- 24800, 9, 9 }, /* 26 Mb */
- [19] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 39000,
- 36600, 10, 10 }, /* 39 Mb */
- [20] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 52000,
- 48100, 11, 11 }, /* 52 Mb */
- [21] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 78000,
- 69500, 12, 12 }, /* 78 Mb */
- [22] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 104000,
- 89500, 13, 13 }, /* 104 Mb */
- [23] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 117000,
- 98900, 14, 14 }, /* 117 Mb */
- [24] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 130000,
- 108300, 15, 15 }, /* 130 Mb */
- [25] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS_HGI, 144400,
- 120000, 15, 15 }, /* 144.4 Mb */
- [26] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 19500,
- 17400, 16, 16 }, /* 19.5 Mb */
- [27] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 39000,
- 35100, 17, 17 }, /* 39 Mb */
- [28] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 58500,
- 52600, 18, 18 }, /* 58.5 Mb */
- [29] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 78000,
- 70400, 19, 19 }, /* 78 Mb */
- [30] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 117000,
- 104900, 20, 20 }, /* 117 Mb */
- [31] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS_HGI, 130000,
- 115800, 20, 20 }, /* 130 Mb*/
- [32] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 156000,
- 137200, 21, 21 }, /* 156 Mb */
- [33] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 173300,
- 151100, 21, 21 }, /* 173.3 Mb */
- [34] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 175500,
- 152800, 22, 22 }, /* 175.5 Mb */
- [35] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 195000,
- 168400, 22, 22 }, /* 195 Mb*/
- [36] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 195000,
- 168400, 23, 23 }, /* 195 Mb */
- [37] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 216700,
- 185000, 23, 23 }, /* 216.7 Mb */
- [38] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 13500,
- 13200, 0, 0 }, /* 13.5 Mb*/
- [39] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 27500,
- 25900, 1, 1 }, /* 27.0 Mb*/
- [40] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 40500,
- 38600, 2, 2 }, /* 40.5 Mb*/
- [41] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 54000,
- 49800, 3, 3 }, /* 54 Mb */
- [42] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 81500,
- 72200, 4, 4 }, /* 81 Mb */
- [43] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 108000,
- 92900, 5, 5 }, /* 108 Mb */
- [44] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 121500,
- 102700, 6, 6 }, /* 121.5 Mb*/
- [45] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 135000,
- 112000, 7, 7 }, /* 135 Mb */
- [46] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS_HGI, 150000,
- 122000, 7, 7 }, /* 150 Mb */
- [47] = { RC_INVALID, WLAN_RC_PHY_HT_40_DS, 27000,
- 25800, 8, 8 }, /* 27 Mb */
- [48] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 54000,
- 49800, 9, 9 }, /* 54 Mb */
- [49] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 81000,
- 71900, 10, 10 }, /* 81 Mb */
- [50] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 108000,
- 92500, 11, 11 }, /* 108 Mb */
- [51] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 162000,
- 130300, 12, 12 }, /* 162 Mb */
- [52] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 216000,
- 162800, 13, 13 }, /* 216 Mb */
- [53] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 243000,
- 178200, 14, 14 }, /* 243 Mb */
- [54] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 270000,
- 192100, 15, 15 }, /* 270 Mb */
- [55] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS_HGI, 300000,
- 207000, 15, 15 }, /* 300 Mb */
- [56] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 40500,
- 36100, 16, 16 }, /* 40.5 Mb */
- [57] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 81000,
- 72900, 17, 17 }, /* 81 Mb */
- [58] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 121500,
- 108300, 18, 18 }, /* 121.5 Mb */
- [59] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 162000,
- 142000, 19, 19 }, /* 162 Mb */
- [60] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 243000,
- 205100, 20, 20 }, /* 243 Mb */
- [61] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS_HGI, 270000,
- 224700, 20, 20 }, /* 270 Mb */
- [62] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 324000,
- 263100, 21, 21 }, /* 324 Mb */
- [63] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 360000,
- 288000, 21, 21 }, /* 360 Mb */
- [64] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 364500,
- 290700, 22, 22 }, /* 364.5 Mb */
- [65] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 405000,
- 317200, 22, 22 }, /* 405 Mb */
- [66] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 405000,
- 317200, 23, 23 }, /* 405 Mb */
- [67] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 450000,
- 346400, 23, 23 }, /* 450 Mb */
- },
- 50, /* probe interval */
- WLAN_RC_HT_FLAG, /* Phy rates allowed initially */
-};
-
-/* 4ms frame limit not used for NG mode. The values filled
- * for HT are the 64K max aggregate limit */
-
-static const struct ath_rate_table ar5416_11ng_ratetable = {
- 72,
- 12, /* MCS start */
- {
- [0] = { RC_ALL, WLAN_RC_PHY_CCK, 1000,
- 900, 0, 2 }, /* 1 Mb */
- [1] = { RC_ALL, WLAN_RC_PHY_CCK, 2000,
- 1900, 1, 4 }, /* 2 Mb */
- [2] = { RC_ALL, WLAN_RC_PHY_CCK, 5500,
- 4900, 2, 11 }, /* 5.5 Mb */
- [3] = { RC_ALL, WLAN_RC_PHY_CCK, 11000,
- 8100, 3, 22 }, /* 11 Mb */
- [4] = { RC_INVALID, WLAN_RC_PHY_OFDM, 6000,
- 5400, 4, 12 }, /* 6 Mb */
- [5] = { RC_INVALID, WLAN_RC_PHY_OFDM, 9000,
- 7800, 5, 18 }, /* 9 Mb */
- [6] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000,
- 10100, 6, 24 }, /* 12 Mb */
- [7] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000,
- 14100, 7, 36 }, /* 18 Mb */
- [8] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000,
- 17700, 8, 48 }, /* 24 Mb */
- [9] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000,
- 23700, 9, 72 }, /* 36 Mb */
- [10] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000,
- 27400, 10, 96 }, /* 48 Mb */
- [11] = { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000,
- 30900, 11, 108 }, /* 54 Mb */
- [12] = { RC_INVALID, WLAN_RC_PHY_HT_20_SS, 6500,
- 6400, 0, 0 }, /* 6.5 Mb */
- [13] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 13000,
- 12700, 1, 1 }, /* 13 Mb */
- [14] = { RC_HT_SDT_20, WLAN_RC_PHY_HT_20_SS, 19500,
- 18800, 2, 2 }, /* 19.5 Mb*/
- [15] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 26000,
- 25000, 3, 3 }, /* 26 Mb */
- [16] = { RC_HT_SD_20, WLAN_RC_PHY_HT_20_SS, 39000,
- 36700, 4, 4 }, /* 39 Mb */
- [17] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 52000,
- 48100, 5, 5 }, /* 52 Mb */
- [18] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 58500,
- 53500, 6, 6 }, /* 58.5 Mb */
- [19] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS, 65000,
- 59000, 7, 7 }, /* 65 Mb */
- [20] = { RC_HT_S_20, WLAN_RC_PHY_HT_20_SS_HGI, 72200,
- 65400, 7, 7 }, /* 65 Mb*/
- [21] = { RC_INVALID, WLAN_RC_PHY_HT_20_DS, 13000,
- 12700, 8, 8 }, /* 13 Mb */
- [22] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 26000,
- 24800, 9, 9 }, /* 26 Mb */
- [23] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_DS, 39000,
- 36600, 10, 10 }, /* 39 Mb */
- [24] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 52000,
- 48100, 11, 11 }, /* 52 Mb */
- [25] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 78000,
- 69500, 12, 12 }, /* 78 Mb */
- [26] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 104000,
- 89500, 13, 13 }, /* 104 Mb */
- [27] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 117000,
- 98900, 14, 14 }, /* 117 Mb */
- [28] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS, 130000,
- 108300, 15, 15 }, /* 130 Mb */
- [29] = { RC_HT_DT_20, WLAN_RC_PHY_HT_20_DS_HGI, 144400,
- 120000, 15, 15 }, /* 144.4 Mb */
- [30] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 19500,
- 17400, 16, 16 }, /* 19.5 Mb */
- [31] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 39000,
- 35100, 17, 17 }, /* 39 Mb */
- [32] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 58500,
- 52600, 18, 18 }, /* 58.5 Mb */
- [33] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 78000,
- 70400, 19, 19 }, /* 78 Mb */
- [34] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS, 117000,
- 104900, 20, 20 }, /* 117 Mb */
- [35] = { RC_INVALID, WLAN_RC_PHY_HT_20_TS_HGI, 130000,
- 115800, 20, 20 }, /* 130 Mb */
- [36] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 156000,
- 137200, 21, 21 }, /* 156 Mb */
- [37] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 173300,
- 151100, 21, 21 }, /* 173.3 Mb */
- [38] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 175500,
- 152800, 22, 22 }, /* 175.5 Mb */
- [39] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 195000,
- 168400, 22, 22 }, /* 195 Mb */
- [40] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS, 195000,
- 168400, 23, 23 }, /* 195 Mb */
- [41] = { RC_HT_T_20, WLAN_RC_PHY_HT_20_TS_HGI, 216700,
- 185000, 23, 23 }, /* 216.7 Mb */
- [42] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 13500,
- 13200, 0, 0 }, /* 13.5 Mb */
- [43] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 27500,
- 25900, 1, 1 }, /* 27.0 Mb */
- [44] = { RC_HT_SDT_40, WLAN_RC_PHY_HT_40_SS, 40500,
- 38600, 2, 2 }, /* 40.5 Mb */
- [45] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 54000,
- 49800, 3, 3 }, /* 54 Mb */
- [46] = { RC_HT_SD_40, WLAN_RC_PHY_HT_40_SS, 81500,
- 72200, 4, 4 }, /* 81 Mb */
- [47] = { RC_HT_S_40 , WLAN_RC_PHY_HT_40_SS, 108000,
- 92900, 5, 5 }, /* 108 Mb */
- [48] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 121500,
- 102700, 6, 6 }, /* 121.5 Mb */
- [49] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS, 135000,
- 112000, 7, 7 }, /* 135 Mb */
- [50] = { RC_HT_S_40, WLAN_RC_PHY_HT_40_SS_HGI, 150000,
- 122000, 7, 7 }, /* 150 Mb */
- [51] = { RC_INVALID, WLAN_RC_PHY_HT_40_DS, 27000,
- 25800, 8, 8 }, /* 27 Mb */
- [52] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 54000,
- 49800, 9, 9 }, /* 54 Mb */
- [53] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_DS, 81000,
- 71900, 10, 10 }, /* 81 Mb */
- [54] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 108000,
- 92500, 11, 11 }, /* 108 Mb */
- [55] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 162000,
- 130300, 12, 12 }, /* 162 Mb */
- [56] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 216000,
- 162800, 13, 13 }, /* 216 Mb */
- [57] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 243000,
- 178200, 14, 14 }, /* 243 Mb */
- [58] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS, 270000,
- 192100, 15, 15 }, /* 270 Mb */
- [59] = { RC_HT_DT_40, WLAN_RC_PHY_HT_40_DS_HGI, 300000,
- 207000, 15, 15 }, /* 300 Mb */
- [60] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 40500,
- 36100, 16, 16 }, /* 40.5 Mb */
- [61] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 81000,
- 72900, 17, 17 }, /* 81 Mb */
- [62] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 121500,
- 108300, 18, 18 }, /* 121.5 Mb */
- [63] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 162000,
- 142000, 19, 19 }, /* 162 Mb */
- [64] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS, 243000,
- 205100, 20, 20 }, /* 243 Mb */
- [65] = { RC_INVALID, WLAN_RC_PHY_HT_40_TS_HGI, 270000,
- 224700, 20, 20 }, /* 270 Mb */
- [66] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 324000,
- 263100, 21, 21 }, /* 324 Mb */
- [67] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 360000,
- 288000, 21, 21 }, /* 360 Mb */
- [68] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 364500,
- 290700, 22, 22 }, /* 364.5 Mb */
- [69] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 405000,
- 317200, 22, 22 }, /* 405 Mb */
- [70] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS, 405000,
- 317200, 23, 23 }, /* 405 Mb */
- [71] = { RC_HT_T_40, WLAN_RC_PHY_HT_40_TS_HGI, 450000,
- 346400, 23, 23 }, /* 450 Mb */
- },
- 50, /* probe interval */
- WLAN_RC_HT_FLAG, /* Phy rates allowed initially */
-};
-
-static const struct ath_rate_table ar5416_11a_ratetable = {
- 8,
- 0,
- {
- { RC_L_SDT, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */
- 5400, 0, 12},
- { RC_L_SDT, WLAN_RC_PHY_OFDM, 9000, /* 9 Mb */
- 7800, 1, 18},
- { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000, /* 12 Mb */
- 10000, 2, 24},
- { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000, /* 18 Mb */
- 13900, 3, 36},
- { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000, /* 24 Mb */
- 17300, 4, 48},
- { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000, /* 36 Mb */
- 23000, 5, 72},
- { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000, /* 48 Mb */
- 27400, 6, 96},
- { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */
- 29300, 7, 108},
- },
- 50, /* probe interval */
- 0, /* Phy rates allowed initially */
-};
-
-static const struct ath_rate_table ar5416_11g_ratetable = {
- 12,
- 0,
- {
- { RC_L_SDT, WLAN_RC_PHY_CCK, 1000, /* 1 Mb */
- 900, 0, 2},
- { RC_L_SDT, WLAN_RC_PHY_CCK, 2000, /* 2 Mb */
- 1900, 1, 4},
- { RC_L_SDT, WLAN_RC_PHY_CCK, 5500, /* 5.5 Mb */
- 4900, 2, 11},
- { RC_L_SDT, WLAN_RC_PHY_CCK, 11000, /* 11 Mb */
- 8100, 3, 22},
- { RC_INVALID, WLAN_RC_PHY_OFDM, 6000, /* 6 Mb */
- 5400, 4, 12},
- { RC_INVALID, WLAN_RC_PHY_OFDM, 9000, /* 9 Mb */
- 7800, 5, 18},
- { RC_L_SDT, WLAN_RC_PHY_OFDM, 12000, /* 12 Mb */
- 10000, 6, 24},
- { RC_L_SDT, WLAN_RC_PHY_OFDM, 18000, /* 18 Mb */
- 13900, 7, 36},
- { RC_L_SDT, WLAN_RC_PHY_OFDM, 24000, /* 24 Mb */
- 17300, 8, 48},
- { RC_L_SDT, WLAN_RC_PHY_OFDM, 36000, /* 36 Mb */
- 23000, 9, 72},
- { RC_L_SDT, WLAN_RC_PHY_OFDM, 48000, /* 48 Mb */
- 27400, 10, 96},
- { RC_L_SDT, WLAN_RC_PHY_OFDM, 54000, /* 54 Mb */
- 29300, 11, 108},
- },
- 50, /* probe interval */
- 0, /* Phy rates allowed initially */
-};
-
-static int ath_rc_get_rateindex(struct ath_rate_priv *ath_rc_priv,
- struct ieee80211_tx_rate *rate)
-{
- const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
- int rix, i, idx = 0;
-
- if (!(rate->flags & IEEE80211_TX_RC_MCS))
- return rate->idx;
-
- for (i = 0; i < ath_rc_priv->max_valid_rate; i++) {
- idx = ath_rc_priv->valid_rate_index[i];
-
- if (WLAN_RC_PHY_HT(rate_table->info[idx].phy) &&
- rate_table->info[idx].ratecode == rate->idx)
- break;
- }
-
- rix = idx;
-
- if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
- rix++;
-
- return rix;
-}
-
-static void ath_rc_sort_validrates(struct ath_rate_priv *ath_rc_priv)
-{
- const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
- u8 i, j, idx, idx_next;
-
- for (i = ath_rc_priv->max_valid_rate - 1; i > 0; i--) {
- for (j = 0; j <= i-1; j++) {
- idx = ath_rc_priv->valid_rate_index[j];
- idx_next = ath_rc_priv->valid_rate_index[j+1];
-
- if (rate_table->info[idx].ratekbps >
- rate_table->info[idx_next].ratekbps) {
- ath_rc_priv->valid_rate_index[j] = idx_next;
- ath_rc_priv->valid_rate_index[j+1] = idx;
- }
- }
- }
-}
-
-static inline
-int ath_rc_get_nextvalid_txrate(const struct ath_rate_table *rate_table,
- struct ath_rate_priv *ath_rc_priv,
- u8 cur_valid_txrate,
- u8 *next_idx)
-{
- u8 i;
-
- for (i = 0; i < ath_rc_priv->max_valid_rate - 1; i++) {
- if (ath_rc_priv->valid_rate_index[i] == cur_valid_txrate) {
- *next_idx = ath_rc_priv->valid_rate_index[i+1];
- return 1;
- }
- }
-
- /* No more valid rates */
- *next_idx = 0;
-
- return 0;
-}
-
-/* Return true only for single stream */
-
-static int ath_rc_valid_phyrate(u32 phy, u32 capflag, int ignore_cw)
-{
- if (WLAN_RC_PHY_HT(phy) && !(capflag & WLAN_RC_HT_FLAG))
- return 0;
- if (WLAN_RC_PHY_DS(phy) && !(capflag & WLAN_RC_DS_FLAG))
- return 0;
- if (WLAN_RC_PHY_TS(phy) && !(capflag & WLAN_RC_TS_FLAG))
- return 0;
- if (WLAN_RC_PHY_SGI(phy) && !(capflag & WLAN_RC_SGI_FLAG))
- return 0;
- if (!ignore_cw && WLAN_RC_PHY_HT(phy))
- if (WLAN_RC_PHY_40(phy) && !(capflag & WLAN_RC_40_FLAG))
- return 0;
- return 1;
-}
-
-static inline int
-ath_rc_get_lower_rix(struct ath_rate_priv *ath_rc_priv,
- u8 cur_valid_txrate, u8 *next_idx)
-{
- int8_t i;
-
- for (i = 1; i < ath_rc_priv->max_valid_rate ; i++) {
- if (ath_rc_priv->valid_rate_index[i] == cur_valid_txrate) {
- *next_idx = ath_rc_priv->valid_rate_index[i-1];
- return 1;
- }
- }
-
- return 0;
-}
-
-static u8 ath_rc_init_validrates(struct ath_rate_priv *ath_rc_priv)
-{
- const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
- u8 i, hi = 0;
-
- for (i = 0; i < rate_table->rate_cnt; i++) {
- if (rate_table->info[i].rate_flags & RC_LEGACY) {
- u32 phy = rate_table->info[i].phy;
- u8 valid_rate_count = 0;
-
- if (!ath_rc_valid_phyrate(phy, ath_rc_priv->ht_cap, 0))
- continue;
-
- valid_rate_count = ath_rc_priv->valid_phy_ratecnt[phy];
-
- ath_rc_priv->valid_phy_rateidx[phy][valid_rate_count] = i;
- ath_rc_priv->valid_phy_ratecnt[phy] += 1;
- ath_rc_priv->valid_rate_index[i] = true;
- hi = i;
- }
- }
-
- return hi;
-}
-
-static inline bool ath_rc_check_legacy(u8 rate, u8 dot11rate, u16 rate_flags,
- u32 phy, u32 capflag)
-{
- if (rate != dot11rate || WLAN_RC_PHY_HT(phy))
- return false;
-
- if ((rate_flags & WLAN_RC_CAP_MODE(capflag)) != WLAN_RC_CAP_MODE(capflag))
- return false;
-
- if (!(rate_flags & WLAN_RC_CAP_STREAM(capflag)))
- return false;
-
- return true;
-}
-
-static inline bool ath_rc_check_ht(u8 rate, u8 dot11rate, u16 rate_flags,
- u32 phy, u32 capflag)
-{
- if (rate != dot11rate || !WLAN_RC_PHY_HT(phy))
- return false;
-
- if (!WLAN_RC_PHY_HT_VALID(rate_flags, capflag))
- return false;
-
- if (!(rate_flags & WLAN_RC_CAP_STREAM(capflag)))
- return false;
-
- return true;
-}
-
-static u8 ath_rc_setvalid_rates(struct ath_rate_priv *ath_rc_priv, bool legacy)
-{
- const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
- struct ath_rateset *rateset;
- u32 phy, capflag = ath_rc_priv->ht_cap;
- u16 rate_flags;
- u8 i, j, hi = 0, rate, dot11rate, valid_rate_count;
-
- if (legacy)
- rateset = &ath_rc_priv->neg_rates;
- else
- rateset = &ath_rc_priv->neg_ht_rates;
-
- for (i = 0; i < rateset->rs_nrates; i++) {
- for (j = 0; j < rate_table->rate_cnt; j++) {
- phy = rate_table->info[j].phy;
- rate_flags = rate_table->info[j].rate_flags;
- rate = rateset->rs_rates[i];
- dot11rate = rate_table->info[j].dot11rate;
-
- if (legacy &&
- !ath_rc_check_legacy(rate, dot11rate,
- rate_flags, phy, capflag))
- continue;
-
- if (!legacy &&
- !ath_rc_check_ht(rate, dot11rate,
- rate_flags, phy, capflag))
- continue;
-
- if (!ath_rc_valid_phyrate(phy, capflag, 0))
- continue;
-
- valid_rate_count = ath_rc_priv->valid_phy_ratecnt[phy];
- ath_rc_priv->valid_phy_rateidx[phy][valid_rate_count] = j;
- ath_rc_priv->valid_phy_ratecnt[phy] += 1;
- ath_rc_priv->valid_rate_index[j] = true;
- hi = max(hi, j);
- }
- }
-
- return hi;
-}
-
-static u8 ath_rc_get_highest_rix(struct ath_rate_priv *ath_rc_priv,
- int *is_probing)
-{
- const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
- u32 best_thruput, this_thruput, now_msec;
- u8 rate, next_rate, best_rate, maxindex, minindex;
- int8_t index = 0;
-
- now_msec = jiffies_to_msecs(jiffies);
- *is_probing = 0;
- best_thruput = 0;
- maxindex = ath_rc_priv->max_valid_rate-1;
- minindex = 0;
- best_rate = minindex;
-
- /*
- * Try the higher rate first. It will reduce memory moving time
- * if we have very good channel characteristics.
- */
- for (index = maxindex; index >= minindex ; index--) {
- u8 per_thres;
-
- rate = ath_rc_priv->valid_rate_index[index];
- if (rate > ath_rc_priv->rate_max_phy)
- continue;
-
- /*
- * For TCP the average collision rate is around 11%,
- * so we ignore PERs less than this. This is to
- * prevent the rate we are currently using (whose
- * PER might be in the 10-15 range because of TCP
- * collisions) looking worse than the next lower
- * rate whose PER has decayed close to 0. If we
- * used to next lower rate, its PER would grow to
- * 10-15 and we would be worse off then staying
- * at the current rate.
- */
- per_thres = ath_rc_priv->per[rate];
- if (per_thres < 12)
- per_thres = 12;
-
- this_thruput = rate_table->info[rate].user_ratekbps *
- (100 - per_thres);
-
- if (best_thruput <= this_thruput) {
- best_thruput = this_thruput;
- best_rate = rate;
- }
- }
-
- rate = best_rate;
-
- /*
- * Must check the actual rate (ratekbps) to account for
- * non-monoticity of 11g's rate table
- */
-
- if (rate >= ath_rc_priv->rate_max_phy) {
- rate = ath_rc_priv->rate_max_phy;
-
- /* Probe the next allowed phy state */
- if (ath_rc_get_nextvalid_txrate(rate_table,
- ath_rc_priv, rate, &next_rate) &&
- (now_msec - ath_rc_priv->probe_time >
- rate_table->probe_interval) &&
- (ath_rc_priv->hw_maxretry_pktcnt >= 1)) {
- rate = next_rate;
- ath_rc_priv->probe_rate = rate;
- ath_rc_priv->probe_time = now_msec;
- ath_rc_priv->hw_maxretry_pktcnt = 0;
- *is_probing = 1;
- }
- }
-
- if (rate > (ath_rc_priv->rate_table_size - 1))
- rate = ath_rc_priv->rate_table_size - 1;
-
- if (RC_TS_ONLY(rate_table->info[rate].rate_flags) &&
- (ath_rc_priv->ht_cap & WLAN_RC_TS_FLAG))
- return rate;
-
- if (RC_DS_OR_LATER(rate_table->info[rate].rate_flags) &&
- (ath_rc_priv->ht_cap & (WLAN_RC_DS_FLAG | WLAN_RC_TS_FLAG)))
- return rate;
-
- if (RC_SS_OR_LEGACY(rate_table->info[rate].rate_flags))
- return rate;
-
- /* This should not happen */
- WARN_ON_ONCE(1);
-
- rate = ath_rc_priv->valid_rate_index[0];
-
- return rate;
-}
-
-static void ath_rc_rate_set_series(const struct ath_rate_table *rate_table,
- struct ieee80211_tx_rate *rate,
- struct ieee80211_tx_rate_control *txrc,
- u8 tries, u8 rix, int rtsctsenable)
-{
- rate->count = tries;
- rate->idx = rate_table->info[rix].ratecode;
-
- if (txrc->rts || rtsctsenable)
- rate->flags |= IEEE80211_TX_RC_USE_RTS_CTS;
-
- if (WLAN_RC_PHY_HT(rate_table->info[rix].phy)) {
- rate->flags |= IEEE80211_TX_RC_MCS;
- if (WLAN_RC_PHY_40(rate_table->info[rix].phy) &&
- conf_is_ht40(&txrc->hw->conf))
- rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
- if (WLAN_RC_PHY_SGI(rate_table->info[rix].phy))
- rate->flags |= IEEE80211_TX_RC_SHORT_GI;
- }
-}
-
-static void ath_rc_rate_set_rtscts(struct ath_softc *sc,
- const struct ath_rate_table *rate_table,
- struct ieee80211_tx_info *tx_info)
-{
- struct ieee80211_bss_conf *bss_conf;
-
- if (!tx_info->control.vif)
- return;
- /*
- * For legacy frames, mac80211 takes care of CTS protection.
- */
- if (!(tx_info->control.rates[0].flags & IEEE80211_TX_RC_MCS))
- return;
-
- bss_conf = &tx_info->control.vif->bss_conf;
-
- if (!bss_conf->basic_rates)
- return;
-
- /*
- * For now, use the lowest allowed basic rate for HT frames.
- */
- tx_info->control.rts_cts_rate_idx = __ffs(bss_conf->basic_rates);
-}
-
-static void ath_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
- struct ieee80211_tx_rate_control *txrc)
-{
- struct ath_softc *sc = priv;
- struct ath_rate_priv *ath_rc_priv = priv_sta;
- const struct ath_rate_table *rate_table;
- struct sk_buff *skb = txrc->skb;
- struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
- struct ieee80211_tx_rate *rates = tx_info->control.rates;
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- __le16 fc = hdr->frame_control;
- u8 try_per_rate, i = 0, rix;
- int is_probe = 0;
-
- if (rate_control_send_low(sta, priv_sta, txrc))
- return;
-
- /*
- * For Multi Rate Retry we use a different number of
- * retry attempt counts. This ends up looking like this:
- *
- * MRR[0] = 4
- * MRR[1] = 4
- * MRR[2] = 4
- * MRR[3] = 8
- *
- */
- try_per_rate = 4;
-
- rate_table = ath_rc_priv->rate_table;
- rix = ath_rc_get_highest_rix(ath_rc_priv, &is_probe);
-
- if (conf_is_ht(&sc->hw->conf) &&
- (sta->ht_cap.cap & IEEE80211_HT_CAP_LDPC_CODING))
- tx_info->flags |= IEEE80211_TX_CTL_LDPC;
-
- if (conf_is_ht(&sc->hw->conf) &&
- (sta->ht_cap.cap & IEEE80211_HT_CAP_TX_STBC))
- tx_info->flags |= (1 << IEEE80211_TX_CTL_STBC_SHIFT);
-
- if (is_probe) {
- /*
- * Set one try for probe rates. For the
- * probes don't enable RTS.
- */
- ath_rc_rate_set_series(rate_table, &rates[i++], txrc,
- 1, rix, 0);
- /*
- * Get the next tried/allowed rate.
- * No RTS for the next series after the probe rate.
- */
- ath_rc_get_lower_rix(ath_rc_priv, rix, &rix);
- ath_rc_rate_set_series(rate_table, &rates[i++], txrc,
- try_per_rate, rix, 0);
-
- tx_info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE;
- } else {
- /*
- * Set the chosen rate. No RTS for first series entry.
- */
- ath_rc_rate_set_series(rate_table, &rates[i++], txrc,
- try_per_rate, rix, 0);
- }
-
- for ( ; i < 4; i++) {
- /*
- * Use twice the number of tries for the last MRR segment.
- */
- if (i + 1 == 4)
- try_per_rate = 8;
-
- ath_rc_get_lower_rix(ath_rc_priv, rix, &rix);
-
- /*
- * All other rates in the series have RTS enabled.
- */
- ath_rc_rate_set_series(rate_table, &rates[i], txrc,
- try_per_rate, rix, 1);
- }
-
- /*
- * NB:Change rate series to enable aggregation when operating
- * at lower MCS rates. When first rate in series is MCS2
- * in HT40 @ 2.4GHz, series should look like:
- *
- * {MCS2, MCS1, MCS0, MCS0}.
- *
- * When first rate in series is MCS3 in HT20 @ 2.4GHz, series should
- * look like:
- *
- * {MCS3, MCS2, MCS1, MCS1}
- *
- * So, set fourth rate in series to be same as third one for
- * above conditions.
- */
- if ((sc->hw->conf.chandef.chan->band == IEEE80211_BAND_2GHZ) &&
- (conf_is_ht(&sc->hw->conf))) {
- u8 dot11rate = rate_table->info[rix].dot11rate;
- u8 phy = rate_table->info[rix].phy;
- if (i == 4 &&
- ((dot11rate == 2 && phy == WLAN_RC_PHY_HT_40_SS) ||
- (dot11rate == 3 && phy == WLAN_RC_PHY_HT_20_SS))) {
- rates[3].idx = rates[2].idx;
- rates[3].flags = rates[2].flags;
- }
- }
-
- /*
- * Force hardware to use computed duration for next
- * fragment by disabling multi-rate retry, which
- * updates duration based on the multi-rate duration table.
- *
- * FIXME: Fix duration
- */
- if (ieee80211_has_morefrags(fc) ||
- (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)) {
- rates[1].count = rates[2].count = rates[3].count = 0;
- rates[1].idx = rates[2].idx = rates[3].idx = 0;
- rates[0].count = ATH_TXMAXTRY;
- }
-
- ath_rc_rate_set_rtscts(sc, rate_table, tx_info);
-}
-
-static void ath_rc_update_per(struct ath_softc *sc,
- const struct ath_rate_table *rate_table,
- struct ath_rate_priv *ath_rc_priv,
- struct ieee80211_tx_info *tx_info,
- int tx_rate, int xretries, int retries,
- u32 now_msec)
-{
- int count, n_bad_frames;
- u8 last_per;
- static const u32 nretry_to_per_lookup[10] = {
- 100 * 0 / 1,
- 100 * 1 / 4,
- 100 * 1 / 2,
- 100 * 3 / 4,
- 100 * 4 / 5,
- 100 * 5 / 6,
- 100 * 6 / 7,
- 100 * 7 / 8,
- 100 * 8 / 9,
- 100 * 9 / 10
- };
-
- last_per = ath_rc_priv->per[tx_rate];
- n_bad_frames = tx_info->status.ampdu_len - tx_info->status.ampdu_ack_len;
-
- if (xretries) {
- if (xretries == 1) {
- ath_rc_priv->per[tx_rate] += 30;
- if (ath_rc_priv->per[tx_rate] > 100)
- ath_rc_priv->per[tx_rate] = 100;
- } else {
- /* xretries == 2 */
- count = ARRAY_SIZE(nretry_to_per_lookup);
- if (retries >= count)
- retries = count - 1;
-
- /* new_PER = 7/8*old_PER + 1/8*(currentPER) */
- ath_rc_priv->per[tx_rate] =
- (u8)(last_per - (last_per >> 3) + (100 >> 3));
- }
-
- /* xretries == 1 or 2 */
-
- if (ath_rc_priv->probe_rate == tx_rate)
- ath_rc_priv->probe_rate = 0;
-
- } else { /* xretries == 0 */
- count = ARRAY_SIZE(nretry_to_per_lookup);
- if (retries >= count)
- retries = count - 1;
-
- if (n_bad_frames) {
- /* new_PER = 7/8*old_PER + 1/8*(currentPER)
- * Assuming that n_frames is not 0. The current PER
- * from the retries is 100 * retries / (retries+1),
- * since the first retries attempts failed, and the
- * next one worked. For the one that worked,
- * n_bad_frames subframes out of n_frames wored,
- * so the PER for that part is
- * 100 * n_bad_frames / n_frames, and it contributes
- * 100 * n_bad_frames / (n_frames * (retries+1)) to
- * the above PER. The expression below is a
- * simplified version of the sum of these two terms.
- */
- if (tx_info->status.ampdu_len > 0) {
- int n_frames, n_bad_tries;
- u8 cur_per, new_per;
-
- n_bad_tries = retries * tx_info->status.ampdu_len +
- n_bad_frames;
- n_frames = tx_info->status.ampdu_len * (retries + 1);
- cur_per = (100 * n_bad_tries / n_frames) >> 3;
- new_per = (u8)(last_per - (last_per >> 3) + cur_per);
- ath_rc_priv->per[tx_rate] = new_per;
- }
- } else {
- ath_rc_priv->per[tx_rate] =
- (u8)(last_per - (last_per >> 3) +
- (nretry_to_per_lookup[retries] >> 3));
- }
-
-
- /*
- * If we got at most one retry then increase the max rate if
- * this was a probe. Otherwise, ignore the probe.
- */
- if (ath_rc_priv->probe_rate && ath_rc_priv->probe_rate == tx_rate) {
- if (retries > 0 || 2 * n_bad_frames > tx_info->status.ampdu_len) {
- /*
- * Since we probed with just a single attempt,
- * any retries means the probe failed. Also,
- * if the attempt worked, but more than half
- * the subframes were bad then also consider
- * the probe a failure.
- */
- ath_rc_priv->probe_rate = 0;
- } else {
- u8 probe_rate = 0;
-
- ath_rc_priv->rate_max_phy =
- ath_rc_priv->probe_rate;
- probe_rate = ath_rc_priv->probe_rate;
-
- if (ath_rc_priv->per[probe_rate] > 30)
- ath_rc_priv->per[probe_rate] = 20;
-
- ath_rc_priv->probe_rate = 0;
-
- /*
- * Since this probe succeeded, we allow the next
- * probe twice as soon. This allows the maxRate
- * to move up faster if the probes are
- * successful.
- */
- ath_rc_priv->probe_time =
- now_msec - rate_table->probe_interval / 2;
- }
- }
-
- if (retries > 0) {
- /*
- * Don't update anything. We don't know if
- * this was because of collisions or poor signal.
- */
- ath_rc_priv->hw_maxretry_pktcnt = 0;
- } else {
- /*
- * It worked with no retries. First ignore bogus (small)
- * rssi_ack values.
- */
- if (tx_rate == ath_rc_priv->rate_max_phy &&
- ath_rc_priv->hw_maxretry_pktcnt < 255) {
- ath_rc_priv->hw_maxretry_pktcnt++;
- }
-
- }
- }
-}
-
-static void ath_rc_update_ht(struct ath_softc *sc,
- struct ath_rate_priv *ath_rc_priv,
- struct ieee80211_tx_info *tx_info,
- int tx_rate, int xretries, int retries)
-{
- u32 now_msec = jiffies_to_msecs(jiffies);
- int rate;
- u8 last_per;
- const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
- int size = ath_rc_priv->rate_table_size;
-
- if ((tx_rate < 0) || (tx_rate > rate_table->rate_cnt))
- return;
-
- last_per = ath_rc_priv->per[tx_rate];
-
- /* Update PER first */
- ath_rc_update_per(sc, rate_table, ath_rc_priv,
- tx_info, tx_rate, xretries,
- retries, now_msec);
-
- /*
- * If this rate looks bad (high PER) then stop using it for
- * a while (except if we are probing).
- */
- if (ath_rc_priv->per[tx_rate] >= 55 && tx_rate > 0 &&
- rate_table->info[tx_rate].ratekbps <=
- rate_table->info[ath_rc_priv->rate_max_phy].ratekbps) {
- ath_rc_get_lower_rix(ath_rc_priv, (u8)tx_rate,
- &ath_rc_priv->rate_max_phy);
-
- /* Don't probe for a little while. */
- ath_rc_priv->probe_time = now_msec;
- }
-
- /* Make sure the rates below this have lower PER */
- /* Monotonicity is kept only for rates below the current rate. */
- if (ath_rc_priv->per[tx_rate] < last_per) {
- for (rate = tx_rate - 1; rate >= 0; rate--) {
-
- if (ath_rc_priv->per[rate] >
- ath_rc_priv->per[rate+1]) {
- ath_rc_priv->per[rate] =
- ath_rc_priv->per[rate+1];
- }
- }
- }
-
- /* Maintain monotonicity for rates above the current rate */
- for (rate = tx_rate; rate < size - 1; rate++) {
- if (ath_rc_priv->per[rate+1] <
- ath_rc_priv->per[rate])
- ath_rc_priv->per[rate+1] =
- ath_rc_priv->per[rate];
- }
-
- /* Every so often, we reduce the thresholds
- * and PER (different for CCK and OFDM). */
- if (now_msec - ath_rc_priv->per_down_time >=
- rate_table->probe_interval) {
- for (rate = 0; rate < size; rate++) {
- ath_rc_priv->per[rate] =
- 7 * ath_rc_priv->per[rate] / 8;
- }
-
- ath_rc_priv->per_down_time = now_msec;
- }
-
- ath_debug_stat_retries(ath_rc_priv, tx_rate, xretries, retries,
- ath_rc_priv->per[tx_rate]);
-
-}
-
-static void ath_rc_tx_status(struct ath_softc *sc,
- struct ath_rate_priv *ath_rc_priv,
- struct sk_buff *skb)
-{
- struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
- struct ieee80211_tx_rate *rates = tx_info->status.rates;
- struct ieee80211_tx_rate *rate;
- int final_ts_idx = 0, xretries = 0, long_retry = 0;
- u8 flags;
- u32 i = 0, rix;
-
- for (i = 0; i < sc->hw->max_rates; i++) {
- rate = &tx_info->status.rates[i];
- if (rate->idx < 0 || !rate->count)
- break;
-
- final_ts_idx = i;
- long_retry = rate->count - 1;
- }
-
- if (!(tx_info->flags & IEEE80211_TX_STAT_ACK))
- xretries = 1;
-
- /*
- * If the first rate is not the final index, there
- * are intermediate rate failures to be processed.
- */
- if (final_ts_idx != 0) {
- for (i = 0; i < final_ts_idx ; i++) {
- if (rates[i].count != 0 && (rates[i].idx >= 0)) {
- flags = rates[i].flags;
-
- /* If HT40 and we have switched mode from
- * 40 to 20 => don't update */
-
- if ((flags & IEEE80211_TX_RC_40_MHZ_WIDTH) &&
- !(ath_rc_priv->ht_cap & WLAN_RC_40_FLAG))
- return;
-
- rix = ath_rc_get_rateindex(ath_rc_priv, &rates[i]);
- ath_rc_update_ht(sc, ath_rc_priv, tx_info,
- rix, xretries ? 1 : 2,
- rates[i].count);
- }
- }
- }
-
- flags = rates[final_ts_idx].flags;
-
- /* If HT40 and we have switched mode from 40 to 20 => don't update */
- if ((flags & IEEE80211_TX_RC_40_MHZ_WIDTH) &&
- !(ath_rc_priv->ht_cap & WLAN_RC_40_FLAG))
- return;
-
- rix = ath_rc_get_rateindex(ath_rc_priv, &rates[final_ts_idx]);
- ath_rc_update_ht(sc, ath_rc_priv, tx_info, rix, xretries, long_retry);
- ath_debug_stat_rc(ath_rc_priv, rix);
-}
-
-static const
-struct ath_rate_table *ath_choose_rate_table(struct ath_softc *sc,
- enum ieee80211_band band,
- bool is_ht)
-{
- switch(band) {
- case IEEE80211_BAND_2GHZ:
- if (is_ht)
- return &ar5416_11ng_ratetable;
- return &ar5416_11g_ratetable;
- case IEEE80211_BAND_5GHZ:
- if (is_ht)
- return &ar5416_11na_ratetable;
- return &ar5416_11a_ratetable;
- default:
- return NULL;
- }
-}
-
-static void ath_rc_init(struct ath_softc *sc,
- struct ath_rate_priv *ath_rc_priv)
-{
- const struct ath_rate_table *rate_table = ath_rc_priv->rate_table;
- struct ath_rateset *rateset = &ath_rc_priv->neg_rates;
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- u8 i, j, k, hi = 0, hthi = 0;
-
- ath_rc_priv->rate_table_size = RATE_TABLE_SIZE;
-
- for (i = 0 ; i < ath_rc_priv->rate_table_size; i++) {
- ath_rc_priv->per[i] = 0;
- ath_rc_priv->valid_rate_index[i] = 0;
- }
-
- for (i = 0; i < WLAN_RC_PHY_MAX; i++) {
- for (j = 0; j < RATE_TABLE_SIZE; j++)
- ath_rc_priv->valid_phy_rateidx[i][j] = 0;
- ath_rc_priv->valid_phy_ratecnt[i] = 0;
- }
-
- if (!rateset->rs_nrates) {
- hi = ath_rc_init_validrates(ath_rc_priv);
- } else {
- hi = ath_rc_setvalid_rates(ath_rc_priv, true);
-
- if (ath_rc_priv->ht_cap & WLAN_RC_HT_FLAG)
- hthi = ath_rc_setvalid_rates(ath_rc_priv, false);
-
- hi = max(hi, hthi);
- }
-
- ath_rc_priv->rate_table_size = hi + 1;
- ath_rc_priv->rate_max_phy = 0;
- WARN_ON(ath_rc_priv->rate_table_size > RATE_TABLE_SIZE);
-
- for (i = 0, k = 0; i < WLAN_RC_PHY_MAX; i++) {
- for (j = 0; j < ath_rc_priv->valid_phy_ratecnt[i]; j++) {
- ath_rc_priv->valid_rate_index[k++] =
- ath_rc_priv->valid_phy_rateidx[i][j];
- }
-
- if (!ath_rc_valid_phyrate(i, rate_table->initial_ratemax, 1) ||
- !ath_rc_priv->valid_phy_ratecnt[i])
- continue;
-
- ath_rc_priv->rate_max_phy = ath_rc_priv->valid_phy_rateidx[i][j-1];
- }
- WARN_ON(ath_rc_priv->rate_table_size > RATE_TABLE_SIZE);
- WARN_ON(k > RATE_TABLE_SIZE);
-
- ath_rc_priv->max_valid_rate = k;
- ath_rc_sort_validrates(ath_rc_priv);
- ath_rc_priv->rate_max_phy = (k > 4) ?
- ath_rc_priv->valid_rate_index[k-4] :
- ath_rc_priv->valid_rate_index[k-1];
-
- ath_dbg(common, CONFIG, "RC Initialized with capabilities: 0x%x\n",
- ath_rc_priv->ht_cap);
-}
-
-static u8 ath_rc_build_ht_caps(struct ath_softc *sc, struct ieee80211_sta *sta)
-{
- u8 caps = 0;
-
- if (sta->ht_cap.ht_supported) {
- caps = WLAN_RC_HT_FLAG;
- if (sta->ht_cap.mcs.rx_mask[1] && sta->ht_cap.mcs.rx_mask[2])
- caps |= WLAN_RC_TS_FLAG | WLAN_RC_DS_FLAG;
- else if (sta->ht_cap.mcs.rx_mask[1])
- caps |= WLAN_RC_DS_FLAG;
- if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) {
- caps |= WLAN_RC_40_FLAG;
- if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40)
- caps |= WLAN_RC_SGI_FLAG;
- } else {
- if (sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20)
- caps |= WLAN_RC_SGI_FLAG;
- }
- }
-
- return caps;
-}
-
-static bool ath_tx_aggr_check(struct ath_softc *sc, struct ieee80211_sta *sta,
- u8 tidno)
-{
- struct ath_node *an = (struct ath_node *)sta->drv_priv;
- struct ath_atx_tid *txtid;
-
- if (!sta->ht_cap.ht_supported)
- return false;
-
- txtid = ATH_AN_2_TID(an, tidno);
- return !txtid->active;
-}
-
-
-/***********************************/
-/* mac80211 Rate Control callbacks */
-/***********************************/
-
-static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
- struct ieee80211_sta *sta, void *priv_sta,
- struct sk_buff *skb)
-{
- struct ath_softc *sc = priv;
- struct ath_rate_priv *ath_rc_priv = priv_sta;
- struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
- struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
- __le16 fc = hdr->frame_control;
-
- if (!priv_sta || !ieee80211_is_data(fc))
- return;
-
- /* This packet was aggregated but doesn't carry status info */
- if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) &&
- !(tx_info->flags & IEEE80211_TX_STAT_AMPDU))
- return;
-
- if (tx_info->flags & IEEE80211_TX_STAT_TX_FILTERED)
- return;
-
- ath_rc_tx_status(sc, ath_rc_priv, skb);
-
- /* Check if aggregation has to be enabled for this tid */
- if (conf_is_ht(&sc->hw->conf) &&
- !(skb->protocol == cpu_to_be16(ETH_P_PAE))) {
- if (ieee80211_is_data_qos(fc) &&
- skb_get_queue_mapping(skb) != IEEE80211_AC_VO) {
- u8 *qc, tid;
-
- qc = ieee80211_get_qos_ctl(hdr);
- tid = qc[0] & 0xf;
-
- if(ath_tx_aggr_check(sc, sta, tid))
- ieee80211_start_tx_ba_session(sta, tid, 0);
- }
- }
-}
-
-static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
- struct cfg80211_chan_def *chandef,
- struct ieee80211_sta *sta, void *priv_sta)
-{
- struct ath_softc *sc = priv;
- struct ath_common *common = ath9k_hw_common(sc->sc_ah);
- struct ath_rate_priv *ath_rc_priv = priv_sta;
- int i, j = 0;
- u32 rate_flags = ieee80211_chandef_rate_flags(&sc->hw->conf.chandef);
-
- for (i = 0; i < sband->n_bitrates; i++) {
- if (sta->supp_rates[sband->band] & BIT(i)) {
- if ((rate_flags & sband->bitrates[i].flags)
- != rate_flags)
- continue;
-
- ath_rc_priv->neg_rates.rs_rates[j]
- = (sband->bitrates[i].bitrate * 2) / 10;
- j++;
- }
- }
- ath_rc_priv->neg_rates.rs_nrates = j;
-
- if (sta->ht_cap.ht_supported) {
- for (i = 0, j = 0; i < 77; i++) {
- if (sta->ht_cap.mcs.rx_mask[i/8] & (1<<(i%8)))
- ath_rc_priv->neg_ht_rates.rs_rates[j++] = i;
- if (j == ATH_RATE_MAX)
- break;
- }
- ath_rc_priv->neg_ht_rates.rs_nrates = j;
- }
-
- ath_rc_priv->rate_table = ath_choose_rate_table(sc, sband->band,
- sta->ht_cap.ht_supported);
- if (!ath_rc_priv->rate_table) {
- ath_err(common, "No rate table chosen\n");
- return;
- }
-
- ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta);
- ath_rc_init(sc, priv_sta);
-}
-
-static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
- struct cfg80211_chan_def *chandef,
- struct ieee80211_sta *sta, void *priv_sta,
- u32 changed)
-{
- struct ath_softc *sc = priv;
- struct ath_rate_priv *ath_rc_priv = priv_sta;
-
- if (changed & IEEE80211_RC_BW_CHANGED) {
- ath_rc_priv->ht_cap = ath_rc_build_ht_caps(sc, sta);
- ath_rc_init(sc, priv_sta);
-
- ath_dbg(ath9k_hw_common(sc->sc_ah), CONFIG,
- "Operating Bandwidth changed to: %d\n",
- sc->hw->conf.chandef.width);
- }
-}
-
-#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS)
-
-void ath_debug_stat_rc(struct ath_rate_priv *rc, int final_rate)
-{
- struct ath_rc_stats *stats;
-
- stats = &rc->rcstats[final_rate];
- stats->success++;
-}
-
-void ath_debug_stat_retries(struct ath_rate_priv *rc, int rix,
- int xretries, int retries, u8 per)
-{
- struct ath_rc_stats *stats = &rc->rcstats[rix];
-
- stats->xretries += xretries;
- stats->retries += retries;
- stats->per = per;
-}
-
-static ssize_t read_file_rcstat(struct file *file, char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct ath_rate_priv *rc = file->private_data;
- char *buf;
- unsigned int len = 0, max;
- int rix;
- ssize_t retval;
-
- if (rc->rate_table == NULL)
- return 0;
-
- max = 80 + rc->rate_table_size * 1024 + 1;
- buf = kmalloc(max, GFP_KERNEL);
- if (buf == NULL)
- return -ENOMEM;
-
- len += sprintf(buf, "%6s %6s %6s "
- "%10s %10s %10s %10s\n",
- "HT", "MCS", "Rate",
- "Success", "Retries", "XRetries", "PER");
-
- for (rix = 0; rix < rc->max_valid_rate; rix++) {
- u8 i = rc->valid_rate_index[rix];
- u32 ratekbps = rc->rate_table->info[i].ratekbps;
- struct ath_rc_stats *stats = &rc->rcstats[i];
- char mcs[5];
- char htmode[5];
- int used_mcs = 0, used_htmode = 0;
-
- if (WLAN_RC_PHY_HT(rc->rate_table->info[i].phy)) {
- used_mcs = scnprintf(mcs, 5, "%d",
- rc->rate_table->info[i].ratecode);
-
- if (WLAN_RC_PHY_40(rc->rate_table->info[i].phy))
- used_htmode = scnprintf(htmode, 5, "HT40");
- else if (WLAN_RC_PHY_20(rc->rate_table->info[i].phy))
- used_htmode = scnprintf(htmode, 5, "HT20");
- else
- used_htmode = scnprintf(htmode, 5, "????");
- }
-
- mcs[used_mcs] = '\0';
- htmode[used_htmode] = '\0';
-
- len += scnprintf(buf + len, max - len,
- "%6s %6s %3u.%d: "
- "%10u %10u %10u %10u\n",
- htmode,
- mcs,
- ratekbps / 1000,
- (ratekbps % 1000) / 100,
- stats->success,
- stats->retries,
- stats->xretries,
- stats->per);
- }
-
- if (len > max)
- len = max;
-
- retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
- kfree(buf);
- return retval;
-}
-
-static const struct file_operations fops_rcstat = {
- .read = read_file_rcstat,
- .open = simple_open,
- .owner = THIS_MODULE
-};
-
-static void ath_rate_add_sta_debugfs(void *priv, void *priv_sta,
- struct dentry *dir)
-{
- struct ath_rate_priv *rc = priv_sta;
- rc->debugfs_rcstats = debugfs_create_file("rc_stats", S_IRUGO,
- dir, rc, &fops_rcstat);
-}
-
-static void ath_rate_remove_sta_debugfs(void *priv, void *priv_sta)
-{
- struct ath_rate_priv *rc = priv_sta;
- debugfs_remove(rc->debugfs_rcstats);
-}
-
-#endif /* CONFIG_MAC80211_DEBUGFS && CONFIG_ATH9K_DEBUGFS */
-
-static void *ath_rate_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
-{
- return hw->priv;
-}
-
-static void ath_rate_free(void *priv)
-{
- return;
-}
-
-static void *ath_rate_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp)
-{
- return kzalloc(sizeof(struct ath_rate_priv), gfp);
-}
-
-static void ath_rate_free_sta(void *priv, struct ieee80211_sta *sta,
- void *priv_sta)
-{
- struct ath_rate_priv *rate_priv = priv_sta;
- kfree(rate_priv);
-}
-
-static struct rate_control_ops ath_rate_ops = {
- .module = NULL,
- .name = "ath9k_rate_control",
- .tx_status = ath_tx_status,
- .get_rate = ath_get_rate,
- .rate_init = ath_rate_init,
- .rate_update = ath_rate_update,
- .alloc = ath_rate_alloc,
- .free = ath_rate_free,
- .alloc_sta = ath_rate_alloc_sta,
- .free_sta = ath_rate_free_sta,
-
-#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS)
- .add_sta_debugfs = ath_rate_add_sta_debugfs,
- .remove_sta_debugfs = ath_rate_remove_sta_debugfs,
-#endif
-};
-
-int ath_rate_control_register(void)
-{
- return ieee80211_rate_control_register(&ath_rate_ops);
-}
-
-void ath_rate_control_unregister(void)
-{
- ieee80211_rate_control_unregister(&ath_rate_ops);
-}
diff --git a/drivers/net/wireless/ath/ath9k/rc.h b/drivers/net/wireless/ath/ath9k/rc.h
deleted file mode 100644
index b9a8738..0000000
--- a/drivers/net/wireless/ath/ath9k/rc.h
+++ /dev/null
@@ -1,248 +0,0 @@
-/*
- * Copyright (c) 2004 Sam Leffler, Errno Consulting
- * Copyright (c) 2004 Video54 Technologies, Inc.
- * Copyright (c) 2008-2011 Atheros Communications Inc.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef RC_H
-#define RC_H
-
-#include "hw.h"
-
-struct ath_softc;
-
-#define ATH_RATE_MAX 30
-#define RATE_TABLE_SIZE 72
-
-#define RC_INVALID 0x0000
-#define RC_LEGACY 0x0001
-#define RC_SS 0x0002
-#define RC_DS 0x0004
-#define RC_TS 0x0008
-#define RC_HT_20 0x0010
-#define RC_HT_40 0x0020
-
-#define RC_STREAM_MASK 0xe
-#define RC_DS_OR_LATER(f) ((((f) & RC_STREAM_MASK) == RC_DS) || \
- (((f) & RC_STREAM_MASK) == (RC_DS | RC_TS)))
-#define RC_TS_ONLY(f) (((f) & RC_STREAM_MASK) == RC_TS)
-#define RC_SS_OR_LEGACY(f) ((f) & (RC_SS | RC_LEGACY))
-
-#define RC_HT_2040 (RC_HT_20 | RC_HT_40)
-#define RC_ALL_STREAM (RC_SS | RC_DS | RC_TS)
-#define RC_L_SD (RC_LEGACY | RC_SS | RC_DS)
-#define RC_L_SDT (RC_LEGACY | RC_SS | RC_DS | RC_TS)
-#define RC_HT_S_20 (RC_HT_20 | RC_SS)
-#define RC_HT_D_20 (RC_HT_20 | RC_DS)
-#define RC_HT_T_20 (RC_HT_20 | RC_TS)
-#define RC_HT_S_40 (RC_HT_40 | RC_SS)
-#define RC_HT_D_40 (RC_HT_40 | RC_DS)
-#define RC_HT_T_40 (RC_HT_40 | RC_TS)
-
-#define RC_HT_SD_20 (RC_HT_20 | RC_SS | RC_DS)
-#define RC_HT_DT_20 (RC_HT_20 | RC_DS | RC_TS)
-#define RC_HT_SD_40 (RC_HT_40 | RC_SS | RC_DS)
-#define RC_HT_DT_40 (RC_HT_40 | RC_DS | RC_TS)
-
-#define RC_HT_SD_2040 (RC_HT_2040 | RC_SS | RC_DS)
-#define RC_HT_SDT_2040 (RC_HT_2040 | RC_SS | RC_DS | RC_TS)
-
-#define RC_HT_SDT_20 (RC_HT_20 | RC_SS | RC_DS | RC_TS)
-#define RC_HT_SDT_40 (RC_HT_40 | RC_SS | RC_DS | RC_TS)
-
-#define RC_ALL (RC_LEGACY | RC_HT_2040 | RC_ALL_STREAM)
-
-enum {
- WLAN_RC_PHY_OFDM,
- WLAN_RC_PHY_CCK,
- WLAN_RC_PHY_HT_20_SS,
- WLAN_RC_PHY_HT_20_DS,
- WLAN_RC_PHY_HT_20_TS,
- WLAN_RC_PHY_HT_40_SS,
- WLAN_RC_PHY_HT_40_DS,
- WLAN_RC_PHY_HT_40_TS,
- WLAN_RC_PHY_HT_20_SS_HGI,
- WLAN_RC_PHY_HT_20_DS_HGI,
- WLAN_RC_PHY_HT_20_TS_HGI,
- WLAN_RC_PHY_HT_40_SS_HGI,
- WLAN_RC_PHY_HT_40_DS_HGI,
- WLAN_RC_PHY_HT_40_TS_HGI,
- WLAN_RC_PHY_MAX
-};
-
-#define WLAN_RC_PHY_DS(_phy) ((_phy == WLAN_RC_PHY_HT_20_DS) \
- || (_phy == WLAN_RC_PHY_HT_40_DS) \
- || (_phy == WLAN_RC_PHY_HT_20_DS_HGI) \
- || (_phy == WLAN_RC_PHY_HT_40_DS_HGI))
-#define WLAN_RC_PHY_TS(_phy) ((_phy == WLAN_RC_PHY_HT_20_TS) \
- || (_phy == WLAN_RC_PHY_HT_40_TS) \
- || (_phy == WLAN_RC_PHY_HT_20_TS_HGI) \
- || (_phy == WLAN_RC_PHY_HT_40_TS_HGI))
-#define WLAN_RC_PHY_20(_phy) ((_phy == WLAN_RC_PHY_HT_20_SS) \
- || (_phy == WLAN_RC_PHY_HT_20_DS) \
- || (_phy == WLAN_RC_PHY_HT_20_TS) \
- || (_phy == WLAN_RC_PHY_HT_20_SS_HGI) \
- || (_phy == WLAN_RC_PHY_HT_20_DS_HGI) \
- || (_phy == WLAN_RC_PHY_HT_20_TS_HGI))
-#define WLAN_RC_PHY_40(_phy) ((_phy == WLAN_RC_PHY_HT_40_SS) \
- || (_phy == WLAN_RC_PHY_HT_40_DS) \
- || (_phy == WLAN_RC_PHY_HT_40_TS) \
- || (_phy == WLAN_RC_PHY_HT_40_SS_HGI) \
- || (_phy == WLAN_RC_PHY_HT_40_DS_HGI) \
- || (_phy == WLAN_RC_PHY_HT_40_TS_HGI))
-#define WLAN_RC_PHY_SGI(_phy) ((_phy == WLAN_RC_PHY_HT_20_SS_HGI) \
- || (_phy == WLAN_RC_PHY_HT_20_DS_HGI) \
- || (_phy == WLAN_RC_PHY_HT_20_TS_HGI) \
- || (_phy == WLAN_RC_PHY_HT_40_SS_HGI) \
- || (_phy == WLAN_RC_PHY_HT_40_DS_HGI) \
- || (_phy == WLAN_RC_PHY_HT_40_TS_HGI))
-
-#define WLAN_RC_PHY_HT(_phy) (_phy >= WLAN_RC_PHY_HT_20_SS)
-
-#define WLAN_RC_CAP_MODE(capflag) (((capflag & WLAN_RC_HT_FLAG) ? \
- ((capflag & WLAN_RC_40_FLAG) ? RC_HT_40 : RC_HT_20) : RC_LEGACY))
-
-#define WLAN_RC_CAP_STREAM(capflag) (((capflag & WLAN_RC_TS_FLAG) ? \
- (RC_TS) : ((capflag & WLAN_RC_DS_FLAG) ? RC_DS : RC_SS)))
-
-/* Return TRUE if flag supports HT20 && client supports HT20 or
- * return TRUE if flag supports HT40 && client supports HT40.
- * This is used becos some rates overlap between HT20/HT40.
- */
-#define WLAN_RC_PHY_HT_VALID(flag, capflag) \
- (((flag & RC_HT_20) && !(capflag & WLAN_RC_40_FLAG)) || \
- ((flag & RC_HT_40) && (capflag & WLAN_RC_40_FLAG)))
-
-#define WLAN_RC_DS_FLAG (0x01)
-#define WLAN_RC_TS_FLAG (0x02)
-#define WLAN_RC_40_FLAG (0x04)
-#define WLAN_RC_SGI_FLAG (0x08)
-#define WLAN_RC_HT_FLAG (0x10)
-
-/**
- * struct ath_rate_table - Rate Control table
- * @rate_cnt: total number of rates for the given wireless mode
- * @mcs_start: MCS rate index offset
- * @rate_flags: Rate Control flags
- * @phy: CCK/OFDM/HT20/HT40
- * @ratekbps: rate in Kbits per second
- * @user_ratekbps: user rate in Kbits per second
- * @ratecode: rate that goes into HW descriptors
- * @dot11rate: value that goes into supported
- * rates info element of MLME
- * @ctrl_rate: Index of next lower basic rate, used for duration computation
- * @cw40index: Index of rates having 40MHz channel width
- * @sgi_index: Index of rates having Short Guard Interval
- * @ht_index: high throughput rates having 40MHz channel width and
- * Short Guard Interval
- * @probe_interval: interval for rate control to probe for other rates
- * @initial_ratemax: initial ratemax value
- */
-struct ath_rate_table {
- int rate_cnt;
- int mcs_start;
- struct {
- u16 rate_flags;
- u8 phy;
- u32 ratekbps;
- u32 user_ratekbps;
- u8 ratecode;
- u8 dot11rate;
- } info[RATE_TABLE_SIZE];
- u32 probe_interval;
- u8 initial_ratemax;
-};
-
-struct ath_rateset {
- u8 rs_nrates;
- u8 rs_rates[ATH_RATE_MAX];
-};
-
-struct ath_rc_stats {
- u32 success;
- u32 retries;
- u32 xretries;
- u8 per;
-};
-
-/**
- * struct ath_rate_priv - Rate Control priv data
- * @state: RC state
- * @probe_rate: rate we are probing at
- * @probe_time: msec timestamp for last probe
- * @hw_maxretry_pktcnt: num of packets since we got HW max retry error
- * @max_valid_rate: maximum number of valid rate
- * @per_down_time: msec timestamp for last PER down step
- * @valid_phy_ratecnt: valid rate count
- * @rate_max_phy: phy index for the max rate
- * @per: PER for every valid rate in %
- * @probe_interval: interval for ratectrl to probe for other rates
- * @ht_cap: HT capabilities
- * @neg_rates: Negotatied rates
- * @neg_ht_rates: Negotiated HT rates
- */
-struct ath_rate_priv {
- u8 rate_table_size;
- u8 probe_rate;
- u8 hw_maxretry_pktcnt;
- u8 max_valid_rate;
- u8 valid_rate_index[RATE_TABLE_SIZE];
- u8 ht_cap;
- u8 valid_phy_ratecnt[WLAN_RC_PHY_MAX];
- u8 valid_phy_rateidx[WLAN_RC_PHY_MAX][RATE_TABLE_SIZE];
- u8 rate_max_phy;
- u8 per[RATE_TABLE_SIZE];
- u32 probe_time;
- u32 per_down_time;
- u32 probe_interval;
- struct ath_rateset neg_rates;
- struct ath_rateset neg_ht_rates;
- const struct ath_rate_table *rate_table;
-
-#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS)
- struct dentry *debugfs_rcstats;
- struct ath_rc_stats rcstats[RATE_TABLE_SIZE];
-#endif
-};
-
-#if defined(CONFIG_MAC80211_DEBUGFS) && defined(CONFIG_ATH9K_DEBUGFS)
-void ath_debug_stat_rc(struct ath_rate_priv *rc, int final_rate);
-void ath_debug_stat_retries(struct ath_rate_priv *rc, int rix,
- int xretries, int retries, u8 per);
-#else
-static inline void ath_debug_stat_rc(struct ath_rate_priv *rc, int final_rate)
-{
-}
-static inline void ath_debug_stat_retries(struct ath_rate_priv *rc, int rix,
- int xretries, int retries, u8 per)
-{
-}
-#endif
-
-#ifdef CONFIG_ATH9K_LEGACY_RATE_CONTROL
-int ath_rate_control_register(void);
-void ath_rate_control_unregister(void);
-#else
-static inline int ath_rate_control_register(void)
-{
- return 0;
-}
-
-static inline void ath_rate_control_unregister(void)
-{
-}
-#endif
-
-#endif /* RC_H */
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index 82e340d..425e34f 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -762,204 +762,6 @@ static struct ath_rxbuf *ath_get_next_rx_buf(struct ath_softc *sc,
return bf;
}
-/* Assumes you've already done the endian to CPU conversion */
-static bool ath9k_rx_accept(struct ath_common *common,
- struct ieee80211_hdr *hdr,
- struct ieee80211_rx_status *rxs,
- struct ath_rx_status *rx_stats,
- bool *decrypt_error)
-{
- struct ath_softc *sc = (struct ath_softc *) common->priv;
- bool is_mc, is_valid_tkip, strip_mic, mic_error;
- struct ath_hw *ah = common->ah;
- __le16 fc;
-
- fc = hdr->frame_control;
-
- is_mc = !!is_multicast_ether_addr(hdr->addr1);
- is_valid_tkip = rx_stats->rs_keyix != ATH9K_RXKEYIX_INVALID &&
- test_bit(rx_stats->rs_keyix, common->tkip_keymap);
- strip_mic = is_valid_tkip && ieee80211_is_data(fc) &&
- ieee80211_has_protected(fc) &&
- !(rx_stats->rs_status &
- (ATH9K_RXERR_DECRYPT | ATH9K_RXERR_CRC | ATH9K_RXERR_MIC |
- ATH9K_RXERR_KEYMISS));
-
- /*
- * Key miss events are only relevant for pairwise keys where the
- * descriptor does contain a valid key index. This has been observed
- * mostly with CCMP encryption.
- */
- if (rx_stats->rs_keyix == ATH9K_RXKEYIX_INVALID ||
- !test_bit(rx_stats->rs_keyix, common->ccmp_keymap))
- rx_stats->rs_status &= ~ATH9K_RXERR_KEYMISS;
-
- mic_error = is_valid_tkip && !ieee80211_is_ctl(fc) &&
- !ieee80211_has_morefrags(fc) &&
- !(le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG) &&
- (rx_stats->rs_status & ATH9K_RXERR_MIC);
-
- /*
- * The rx_stats->rs_status will not be set until the end of the
- * chained descriptors so it can be ignored if rs_more is set. The
- * rs_more will be false at the last element of the chained
- * descriptors.
- */
- if (rx_stats->rs_status != 0) {
- u8 status_mask;
-
- if (rx_stats->rs_status & ATH9K_RXERR_CRC) {
- rxs->flag |= RX_FLAG_FAILED_FCS_CRC;
- mic_error = false;
- }
-
- if ((rx_stats->rs_status & ATH9K_RXERR_DECRYPT) ||
- (!is_mc && (rx_stats->rs_status & ATH9K_RXERR_KEYMISS))) {
- *decrypt_error = true;
- mic_error = false;
- }
-
- /*
- * Reject error frames with the exception of
- * decryption and MIC failures. For monitor mode,
- * we also ignore the CRC error.
- */
- status_mask = ATH9K_RXERR_DECRYPT | ATH9K_RXERR_MIC |
- ATH9K_RXERR_KEYMISS;
-
- if (ah->is_monitoring && (sc->rx.rxfilter & FIF_FCSFAIL))
- status_mask |= ATH9K_RXERR_CRC;
-
- if (rx_stats->rs_status & ~status_mask)
- return false;
- }
-
- /*
- * For unicast frames the MIC error bit can have false positives,
- * so all MIC error reports need to be validated in software.
- * False negatives are not common, so skip software verification
- * if the hardware considers the MIC valid.
- */
- if (strip_mic)
- rxs->flag |= RX_FLAG_MMIC_STRIPPED;
- else if (is_mc && mic_error)
- rxs->flag |= RX_FLAG_MMIC_ERROR;
-
- return true;
-}
-
-static int ath9k_process_rate(struct ath_common *common,
- struct ieee80211_hw *hw,
- struct ath_rx_status *rx_stats,
- struct ieee80211_rx_status *rxs)
-{
- struct ieee80211_supported_band *sband;
- enum ieee80211_band band;
- unsigned int i = 0;
- struct ath_softc __maybe_unused *sc = common->priv;
- struct ath_hw *ah = sc->sc_ah;
-
- band = ah->curchan->chan->band;
- sband = hw->wiphy->bands[band];
-
- if (IS_CHAN_QUARTER_RATE(ah->curchan))
- rxs->flag |= RX_FLAG_5MHZ;
- else if (IS_CHAN_HALF_RATE(ah->curchan))
- rxs->flag |= RX_FLAG_10MHZ;
-
- if (rx_stats->rs_rate & 0x80) {
- /* HT rate */
- rxs->flag |= RX_FLAG_HT;
- rxs->flag |= rx_stats->flag;
- rxs->rate_idx = rx_stats->rs_rate & 0x7f;
- return 0;
- }
-
- for (i = 0; i < sband->n_bitrates; i++) {
- if (sband->bitrates[i].hw_value == rx_stats->rs_rate) {
- rxs->rate_idx = i;
- return 0;
- }
- if (sband->bitrates[i].hw_value_short == rx_stats->rs_rate) {
- rxs->flag |= RX_FLAG_SHORTPRE;
- rxs->rate_idx = i;
- return 0;
- }
- }
-
- /*
- * No valid hardware bitrate found -- we should not get here
- * because hardware has already validated this frame as OK.
- */
- ath_dbg(common, ANY,
- "unsupported hw bitrate detected 0x%02x using 1 Mbit\n",
- rx_stats->rs_rate);
- RX_STAT_INC(rx_rate_err);
- return -EINVAL;
-}
-
-static void ath9k_process_rssi(struct ath_common *common,
- struct ieee80211_hw *hw,
- struct ath_rx_status *rx_stats,
- struct ieee80211_rx_status *rxs)
-{
- struct ath_softc *sc = hw->priv;
- struct ath_hw *ah = common->ah;
- int last_rssi;
- int rssi = rx_stats->rs_rssi;
- int i, j;
-
- /*
- * RSSI is not available for subframes in an A-MPDU.
- */
- if (rx_stats->rs_moreaggr) {
- rxs->flag |= RX_FLAG_NO_SIGNAL_VAL;
- return;
- }
-
- /*
- * Check if the RSSI for the last subframe in an A-MPDU
- * or an unaggregated frame is valid.
- */
- if (rx_stats->rs_rssi == ATH9K_RSSI_BAD) {
- rxs->flag |= RX_FLAG_NO_SIGNAL_VAL;
- return;
- }
-
- for (i = 0, j = 0; i < ARRAY_SIZE(rx_stats->rs_rssi_ctl); i++) {
- s8 rssi;
-
- if (!(ah->rxchainmask & BIT(i)))
- continue;
-
- rssi = rx_stats->rs_rssi_ctl[i];
- if (rssi != ATH9K_RSSI_BAD) {
- rxs->chains |= BIT(j);
- rxs->chain_signal[j] = ah->noise + rssi;
- }
- j++;
- }
-
- /*
- * Update Beacon RSSI, this is used by ANI.
- */
- if (rx_stats->is_mybeacon &&
- ((ah->opmode == NL80211_IFTYPE_STATION) ||
- (ah->opmode == NL80211_IFTYPE_ADHOC))) {
- ATH_RSSI_LPF(sc->last_rssi, rx_stats->rs_rssi);
- last_rssi = sc->last_rssi;
-
- if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
- rssi = ATH_EP_RND(last_rssi, ATH_RSSI_EP_MULTIPLIER);
- if (rssi < 0)
- rssi = 0;
-
- ah->stats.avgbrssi = rssi;
- }
-
- rxs->signal = ah->noise + rx_stats->rs_rssi;
-}
-
static void ath9k_process_tsf(struct ath_rx_status *rs,
struct ieee80211_rx_status *rxs,
u64 tsf)
@@ -1055,7 +857,7 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
* everything but the rate is checked here, the rate check is done
* separately to avoid doing two lookups for a rate for each frame.
*/
- if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error))
+ if (!ath9k_cmn_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error, sc->rx.rxfilter))
return -EINVAL;
if (ath_is_mybeacon(common, hdr)) {
@@ -1069,10 +871,10 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
if (WARN_ON(!ah->curchan))
return -EINVAL;
- if (ath9k_process_rate(common, hw, rx_stats, rx_status))
+ if (ath9k_cmn_process_rate(common, hw, rx_stats, rx_status))
return -EINVAL;
- ath9k_process_rssi(common, hw, rx_stats, rx_status);
+ ath9k_cmn_process_rssi(common, hw, rx_stats, rx_status);
rx_status->band = ah->curchan->chan->band;
rx_status->freq = ah->curchan->chan->center_freq;
@@ -1092,57 +894,6 @@ corrupt:
return -EINVAL;
}
-static void ath9k_rx_skb_postprocess(struct ath_common *common,
- struct sk_buff *skb,
- struct ath_rx_status *rx_stats,
- struct ieee80211_rx_status *rxs,
- bool decrypt_error)
-{
- struct ath_hw *ah = common->ah;
- struct ieee80211_hdr *hdr;
- int hdrlen, padpos, padsize;
- u8 keyix;
- __le16 fc;
-
- /* see if any padding is done by the hw and remove it */
- hdr = (struct ieee80211_hdr *) skb->data;
- hdrlen = ieee80211_get_hdrlen_from_skb(skb);
- fc = hdr->frame_control;
- padpos = ieee80211_hdrlen(fc);
-
- /* The MAC header is padded to have 32-bit boundary if the
- * packet payload is non-zero. The general calculation for
- * padsize would take into account odd header lengths:
- * padsize = (4 - padpos % 4) % 4; However, since only
- * even-length headers are used, padding can only be 0 or 2
- * bytes and we can optimize this a bit. In addition, we must
- * not try to remove padding from short control frames that do
- * not have payload. */
- padsize = padpos & 3;
- if (padsize && skb->len>=padpos+padsize+FCS_LEN) {
- memmove(skb->data + padsize, skb->data, padpos);
- skb_pull(skb, padsize);
- }
-
- keyix = rx_stats->rs_keyix;
-
- if (!(keyix == ATH9K_RXKEYIX_INVALID) && !decrypt_error &&
- ieee80211_has_protected(fc)) {
- rxs->flag |= RX_FLAG_DECRYPTED;
- } else if (ieee80211_has_protected(fc)
- && !decrypt_error && skb->len >= hdrlen + 4) {
- keyix = skb->data[hdrlen + 3] >> 6;
-
- if (test_bit(keyix, common->keymap))
- rxs->flag |= RX_FLAG_DECRYPTED;
- }
- if (ah->sw_mgmt_crypto &&
- (rxs->flag & RX_FLAG_DECRYPTED) &&
- ieee80211_is_mgmt(fc))
- /* Use software decrypt for management frames. */
- rxs->flag &= ~RX_FLAG_DECRYPTED;
-}
-
/*
* Run the LNA combining algorithm only in these cases:
*
@@ -1292,8 +1043,8 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
skb_pull(skb, ah->caps.rx_status_len);
if (!rs.rs_more)
- ath9k_rx_skb_postprocess(common, hdr_skb, &rs,
- rxs, decrypt_error);
+ ath9k_cmn_rx_skb_postprocess(common, hdr_skb, &rs,
+ rxs, decrypt_error);
if (rs.rs_more) {
RX_STAT_INC(rx_frags);
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index f042a18..312314c 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -2569,7 +2569,7 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
sc->beacon.tx_processed = true;
sc->beacon.tx_last = !(ts.ts_status & ATH9K_TXERR_MASK);
- ath9k_csa_is_finished(sc);
+ ath9k_csa_update(sc);
continue;
}
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.c b/drivers/net/wireless/ath/wcn36xx/dxe.c
index ee25786..73f12f1 100644
--- a/drivers/net/wireless/ath/wcn36xx/dxe.c
+++ b/drivers/net/wireless/ath/wcn36xx/dxe.c
@@ -44,6 +44,14 @@ static void wcn36xx_dxe_write_register(struct wcn36xx *wcn, int addr, int data)
writel(data, wcn->mmio + addr);
}
+#define wcn36xx_dxe_write_register_x(wcn, reg, reg_data) \
+do { \
+ if (wcn->chip_version == WCN36XX_CHIP_3680) \
+ wcn36xx_dxe_write_register(wcn, reg ## _3680, reg_data); \
+ else \
+ wcn36xx_dxe_write_register(wcn, reg ## _3660, reg_data); \
+} while (0) \
+
static void wcn36xx_dxe_read_register(struct wcn36xx *wcn, int addr, int *data)
{
*data = readl(wcn->mmio + addr);
@@ -680,7 +688,7 @@ int wcn36xx_dxe_init(struct wcn36xx *wcn)
/* Setting interrupt path */
reg_data = WCN36XX_DXE_CCU_INT;
- wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CCU_INT, reg_data);
+ wcn36xx_dxe_write_register_x(wcn, WCN36XX_DXE_REG_CCU_INT, reg_data);
/***************************************/
/* Init descriptors for TX LOW channel */
diff --git a/drivers/net/wireless/ath/wcn36xx/dxe.h b/drivers/net/wireless/ath/wcn36xx/dxe.h
index c88562f..35ee7e9 100644
--- a/drivers/net/wireless/ath/wcn36xx/dxe.h
+++ b/drivers/net/wireless/ath/wcn36xx/dxe.h
@@ -28,11 +28,11 @@ H2H_TEST_RX_TX = DMA2
*/
/* DXE registers */
-#define WCN36XX_DXE_MEM_BASE 0x03000000
#define WCN36XX_DXE_MEM_REG 0x202000
#define WCN36XX_DXE_CCU_INT 0xA0011
-#define WCN36XX_DXE_REG_CCU_INT 0x200b10
+#define WCN36XX_DXE_REG_CCU_INT_3660 0x200b10
+#define WCN36XX_DXE_REG_CCU_INT_3680 0x2050dc
/* TODO This must calculated properly but not hardcoded */
#define WCN36XX_DXE_CTRL_TX_L 0x328a44
diff --git a/drivers/net/wireless/ath/wcn36xx/hal.h b/drivers/net/wireless/ath/wcn36xx/hal.h
index 3c2ef0c..a1f1127 100644
--- a/drivers/net/wireless/ath/wcn36xx/hal.h
+++ b/drivers/net/wireless/ath/wcn36xx/hal.h
@@ -4384,11 +4384,13 @@ enum place_holder_in_cap_bitmap {
MAX_FEATURE_SUPPORTED = 128,
};
+#define WCN36XX_HAL_CAPS_SIZE 4
+
struct wcn36xx_hal_feat_caps_msg {
struct wcn36xx_hal_msg_header header;
- u32 feat_caps[4];
+ u32 feat_caps[WCN36XX_HAL_CAPS_SIZE];
} __packed;
/* status codes to help debug rekey failures */
diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c
index e64a678..4ab5370 100644
--- a/drivers/net/wireless/ath/wcn36xx/main.c
+++ b/drivers/net/wireless/ath/wcn36xx/main.c
@@ -17,6 +17,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
+#include <linux/firmware.h>
#include <linux/platform_device.h>
#include "wcn36xx.h"
@@ -177,6 +178,60 @@ static inline u8 get_sta_index(struct ieee80211_vif *vif,
sta_priv->sta_index;
}
+static const char * const wcn36xx_caps_names[] = {
+ "MCC", /* 0 */
+ "P2P", /* 1 */
+ "DOT11AC", /* 2 */
+ "SLM_SESSIONIZATION", /* 3 */
+ "DOT11AC_OPMODE", /* 4 */
+ "SAP32STA", /* 5 */
+ "TDLS", /* 6 */
+ "P2P_GO_NOA_DECOUPLE_INIT_SCAN",/* 7 */
+ "WLANACTIVE_OFFLOAD", /* 8 */
+ "BEACON_OFFLOAD", /* 9 */
+ "SCAN_OFFLOAD", /* 10 */
+ "ROAM_OFFLOAD", /* 11 */
+ "BCN_MISS_OFFLOAD", /* 12 */
+ "STA_POWERSAVE", /* 13 */
+ "STA_ADVANCED_PWRSAVE", /* 14 */
+ "AP_UAPSD", /* 15 */
+ "AP_DFS", /* 16 */
+ "BLOCKACK", /* 17 */
+ "PHY_ERR", /* 18 */
+ "BCN_FILTER", /* 19 */
+ "RTT", /* 20 */
+ "RATECTRL", /* 21 */
+ "WOW" /* 22 */
+};
+
+static const char *wcn36xx_get_cap_name(enum place_holder_in_cap_bitmap x)
+{
+ if (x >= ARRAY_SIZE(wcn36xx_caps_names))
+ return "UNKNOWN";
+ return wcn36xx_caps_names[x];
+}
+
+static void wcn36xx_feat_caps_info(struct wcn36xx *wcn)
+{
+ int i;
+
+ for (i = 0; i < MAX_FEATURE_SUPPORTED; i++) {
+ if (get_feat_caps(wcn->fw_feat_caps, i))
+ wcn36xx_info("FW Cap %s\n", wcn36xx_get_cap_name(i));
+ }
+}
+
+static void wcn36xx_detect_chip_version(struct wcn36xx *wcn)
+{
+ if (get_feat_caps(wcn->fw_feat_caps, DOT11AC)) {
+ wcn36xx_info("Chip is 3680\n");
+ wcn->chip_version = WCN36XX_CHIP_3680;
+ } else {
+ wcn36xx_info("Chip is 3660\n");
+ wcn->chip_version = WCN36XX_CHIP_3660;
+ }
+}
+
static int wcn36xx_start(struct ieee80211_hw *hw)
{
struct wcn36xx *wcn = hw->priv;
@@ -223,6 +278,16 @@ static int wcn36xx_start(struct ieee80211_hw *hw)
goto out_free_smd_buf;
}
+ if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) {
+ ret = wcn36xx_smd_feature_caps_exchange(wcn);
+ if (ret)
+ wcn36xx_warn("Exchange feature caps failed\n");
+ else
+ wcn36xx_feat_caps_info(wcn);
+ }
+
+ wcn36xx_detect_chip_version(wcn);
+
/* DMA channel initialization */
ret = wcn36xx_dxe_init(wcn);
if (ret) {
@@ -232,11 +297,6 @@ static int wcn36xx_start(struct ieee80211_hw *hw)
wcn36xx_debugfs_init(wcn);
- if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) {
- ret = wcn36xx_smd_feature_caps_exchange(wcn);
- if (ret)
- wcn36xx_warn("Exchange feature caps failed\n");
- }
INIT_LIST_HEAD(&wcn->vif_list);
return 0;
@@ -648,6 +708,7 @@ static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
bss_conf->enable_beacon);
if (bss_conf->enable_beacon) {
+ vif_priv->dtim_period = bss_conf->dtim_period;
vif_priv->bss_index = 0xff;
wcn36xx_smd_config_bss(wcn, vif, NULL,
vif->addr, false);
@@ -992,6 +1053,7 @@ static int wcn36xx_remove(struct platform_device *pdev)
struct wcn36xx *wcn = hw->priv;
wcn36xx_dbg(WCN36XX_DBG_MAC, "platform remove\n");
+ release_firmware(wcn->nv);
mutex_destroy(&wcn->hal_mutex);
ieee80211_unregister_hw(hw);
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
index 750626b..7bf0ef8 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.c
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -195,9 +195,11 @@ static void wcn36xx_smd_set_sta_params(struct wcn36xx *wcn,
static int wcn36xx_smd_send_and_wait(struct wcn36xx *wcn, size_t len)
{
int ret = 0;
+ unsigned long start;
wcn36xx_dbg_dump(WCN36XX_DBG_SMD_DUMP, "HAL >>> ", wcn->hal_buf, len);
init_completion(&wcn->hal_rsp_compl);
+ start = jiffies;
ret = wcn->ctrl_ops->tx(wcn->hal_buf, len);
if (ret) {
wcn36xx_err("HAL TX failed\n");
@@ -205,10 +207,13 @@ static int wcn36xx_smd_send_and_wait(struct wcn36xx *wcn, size_t len)
}
if (wait_for_completion_timeout(&wcn->hal_rsp_compl,
msecs_to_jiffies(HAL_MSG_TIMEOUT)) <= 0) {
- wcn36xx_err("Timeout while waiting SMD response\n");
+ wcn36xx_err("Timeout! No SMD response in %dms\n",
+ HAL_MSG_TIMEOUT);
ret = -ETIME;
goto out;
}
+ wcn36xx_dbg(WCN36XX_DBG_SMD, "SMD command completed in %dms",
+ jiffies_to_msecs(jiffies - start));
out:
return ret;
}
@@ -246,21 +251,22 @@ static int wcn36xx_smd_rsp_status_check(void *buf, size_t len)
int wcn36xx_smd_load_nv(struct wcn36xx *wcn)
{
- const struct firmware *nv;
struct nv_data *nv_d;
struct wcn36xx_hal_nv_img_download_req_msg msg_body;
int fw_bytes_left;
int ret;
u16 fm_offset = 0;
- ret = request_firmware(&nv, WLAN_NV_FILE, wcn->dev);
- if (ret) {
- wcn36xx_err("Failed to load nv file %s: %d\n",
- WLAN_NV_FILE, ret);
- goto out_free_nv;
+ if (!wcn->nv) {
+ ret = request_firmware(&wcn->nv, WLAN_NV_FILE, wcn->dev);
+ if (ret) {
+ wcn36xx_err("Failed to load nv file %s: %d\n",
+ WLAN_NV_FILE, ret);
+ goto out;
+ }
}
- nv_d = (struct nv_data *)nv->data;
+ nv_d = (struct nv_data *)wcn->nv->data;
INIT_HAL_MSG(msg_body, WCN36XX_HAL_DOWNLOAD_NV_REQ);
msg_body.header.len += WCN36XX_NV_FRAGMENT_SIZE;
@@ -270,7 +276,7 @@ int wcn36xx_smd_load_nv(struct wcn36xx *wcn)
mutex_lock(&wcn->hal_mutex);
do {
- fw_bytes_left = nv->size - fm_offset - 4;
+ fw_bytes_left = wcn->nv->size - fm_offset - 4;
if (fw_bytes_left > WCN36XX_NV_FRAGMENT_SIZE) {
msg_body.last_fragment = 0;
msg_body.nv_img_buffer_size = WCN36XX_NV_FRAGMENT_SIZE;
@@ -308,10 +314,7 @@ int wcn36xx_smd_load_nv(struct wcn36xx *wcn)
out_unlock:
mutex_unlock(&wcn->hal_mutex);
-out_free_nv:
- release_firmware(nv);
-
- return ret;
+out: return ret;
}
static int wcn36xx_smd_start_rsp(struct wcn36xx *wcn, void *buf, size_t len)
@@ -899,11 +902,12 @@ static int wcn36xx_smd_config_sta_rsp(struct wcn36xx *wcn,
sta_priv->sta_index = params->sta_index;
sta_priv->dpu_desc_index = params->dpu_index;
+ sta_priv->ucast_dpu_sign = params->uc_ucast_sig;
wcn36xx_dbg(WCN36XX_DBG_HAL,
- "hal config sta rsp status %d sta_index %d bssid_index %d p2p %d\n",
+ "hal config sta rsp status %d sta_index %d bssid_index %d uc_ucast_sig %d p2p %d\n",
params->status, params->sta_index, params->bssid_index,
- params->p2p);
+ params->uc_ucast_sig, params->p2p);
return 0;
}
@@ -1118,7 +1122,7 @@ static int wcn36xx_smd_config_bss_rsp(struct wcn36xx *wcn,
priv_vif->sta->bss_dpu_desc_index = params->dpu_desc_index;
}
- priv_vif->ucast_dpu_signature = params->ucast_dpu_signature;
+ priv_vif->self_ucast_dpu_sign = params->ucast_dpu_signature;
return 0;
}
@@ -1637,12 +1641,12 @@ int wcn36xx_smd_keep_alive_req(struct wcn36xx *wcn,
ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
if (ret) {
- wcn36xx_err("Sending hal_exit_bmps failed\n");
+ wcn36xx_err("Sending hal_keep_alive failed\n");
goto out;
}
ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
if (ret) {
- wcn36xx_err("hal_exit_bmps response failed err=%d\n", ret);
+ wcn36xx_err("hal_keep_alive response failed err=%d\n", ret);
goto out;
}
out:
@@ -1682,8 +1686,7 @@ out:
return ret;
}
-static inline void set_feat_caps(u32 *bitmap,
- enum place_holder_in_cap_bitmap cap)
+void set_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
{
int arr_idx, bit_idx;
@@ -1697,8 +1700,7 @@ static inline void set_feat_caps(u32 *bitmap,
bitmap[arr_idx] |= (1 << bit_idx);
}
-static inline int get_feat_caps(u32 *bitmap,
- enum place_holder_in_cap_bitmap cap)
+int get_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
{
int arr_idx, bit_idx;
int ret = 0;
@@ -1714,8 +1716,7 @@ static inline int get_feat_caps(u32 *bitmap,
return ret;
}
-static inline void clear_feat_caps(u32 *bitmap,
- enum place_holder_in_cap_bitmap cap)
+void clear_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap)
{
int arr_idx, bit_idx;
@@ -1731,8 +1732,8 @@ static inline void clear_feat_caps(u32 *bitmap,
int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn)
{
- struct wcn36xx_hal_feat_caps_msg msg_body;
- int ret = 0;
+ struct wcn36xx_hal_feat_caps_msg msg_body, *rsp;
+ int ret = 0, i;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_REQ);
@@ -1746,12 +1747,15 @@ int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn)
wcn36xx_err("Sending hal_feature_caps_exchange failed\n");
goto out;
}
- ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
- if (ret) {
- wcn36xx_err("hal_feature_caps_exchange response failed err=%d\n",
- ret);
+ if (wcn->hal_rsp_len != sizeof(*rsp)) {
+ wcn36xx_err("Invalid hal_feature_caps_exchange response");
goto out;
}
+
+ rsp = (struct wcn36xx_hal_feat_caps_msg *) wcn->hal_buf;
+
+ for (i = 0; i < WCN36XX_HAL_CAPS_SIZE; i++)
+ wcn->fw_feat_caps[i] = rsp->feat_caps[i];
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.h b/drivers/net/wireless/ath/wcn36xx/smd.h
index e7c3901..008d034 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.h
+++ b/drivers/net/wireless/ath/wcn36xx/smd.h
@@ -24,7 +24,7 @@
#define WCN36XX_HAL_BUF_SIZE 4096
-#define HAL_MSG_TIMEOUT 200
+#define HAL_MSG_TIMEOUT 500
#define WCN36XX_SMSM_WLAN_TX_ENABLE 0x00000400
#define WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY 0x00000200
/* The PNO version info be contained in the rsp msg */
@@ -112,6 +112,9 @@ int wcn36xx_smd_keep_alive_req(struct wcn36xx *wcn,
int wcn36xx_smd_dump_cmd_req(struct wcn36xx *wcn, u32 arg1, u32 arg2,
u32 arg3, u32 arg4, u32 arg5);
int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn);
+void set_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap);
+int get_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap);
+void clear_feat_caps(u32 *bitmap, enum place_holder_in_cap_bitmap cap);
int wcn36xx_smd_add_ba_session(struct wcn36xx *wcn,
struct ieee80211_sta *sta,
diff --git a/drivers/net/wireless/ath/wcn36xx/txrx.c b/drivers/net/wireless/ath/wcn36xx/txrx.c
index b2b60e3..32bb26a 100644
--- a/drivers/net/wireless/ath/wcn36xx/txrx.c
+++ b/drivers/net/wireless/ath/wcn36xx/txrx.c
@@ -57,8 +57,7 @@ int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
RX_FLAG_MMIC_STRIPPED |
RX_FLAG_DECRYPTED;
- wcn36xx_dbg(WCN36XX_DBG_RX, "status.flags=%x status->vendor_radiotap_len=%x\n",
- status.flag, status.vendor_radiotap_len);
+ wcn36xx_dbg(WCN36XX_DBG_RX, "status.flags=%x\n", status.flag);
memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
@@ -132,6 +131,7 @@ static void wcn36xx_set_tx_data(struct wcn36xx_tx_bd *bd,
struct ieee80211_vif,
drv_priv);
+ bd->dpu_sign = sta_priv->ucast_dpu_sign;
if (vif->type == NL80211_IFTYPE_STATION) {
bd->sta_index = sta_priv->bss_sta_index;
bd->dpu_desc_idx = sta_priv->bss_dpu_desc_index;
@@ -145,10 +145,9 @@ static void wcn36xx_set_tx_data(struct wcn36xx_tx_bd *bd,
__vif_priv = get_vif_by_addr(wcn, hdr->addr2);
bd->sta_index = __vif_priv->self_sta_index;
bd->dpu_desc_idx = __vif_priv->self_dpu_desc_index;
+ bd->dpu_sign = __vif_priv->self_ucast_dpu_sign;
}
- bd->dpu_sign = __vif_priv->ucast_dpu_signature;
-
if (ieee80211_is_nullfunc(hdr->frame_control) ||
(sta_priv && !sta_priv->is_data_encrypted))
bd->dpu_ne = 1;
diff --git a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
index 8fa5cba..f0fb81d 100644
--- a/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
+++ b/drivers/net/wireless/ath/wcn36xx/wcn36xx.h
@@ -125,10 +125,10 @@ struct wcn36xx_vif {
enum wcn36xx_power_state pw_state;
u8 bss_index;
- u8 ucast_dpu_signature;
/* Returned from WCN36XX_HAL_ADD_STA_SELF_RSP */
u8 self_sta_index;
u8 self_dpu_desc_index;
+ u8 self_ucast_dpu_sign;
};
/**
@@ -159,6 +159,7 @@ struct wcn36xx_sta {
u16 tid;
u8 sta_index;
u8 dpu_desc_index;
+ u8 ucast_dpu_sign;
u8 bss_sta_index;
u8 bss_dpu_desc_index;
bool is_data_encrypted;
@@ -171,10 +172,14 @@ struct wcn36xx {
struct device *dev;
struct list_head vif_list;
+ const struct firmware *nv;
+
u8 fw_revision;
u8 fw_version;
u8 fw_minor;
u8 fw_major;
+ u32 fw_feat_caps[WCN36XX_HAL_CAPS_SIZE];
+ u32 chip_version;
/* extra byte for the NULL termination */
u8 crm_version[WCN36XX_HAL_VERSION_LENGTH + 1];
@@ -222,6 +227,9 @@ struct wcn36xx {
};
+#define WCN36XX_CHIP_3660 0
+#define WCN36XX_CHIP_3680 1
+
static inline bool wcn36xx_is_fw_version(struct wcn36xx *wcn,
u8 major,
u8 minor,
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index eeceab3..e1c8cc4 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -41,30 +41,28 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
switch (use_msi) {
case 3:
case 1:
+ wil_dbg_misc(wil, "Setup %d MSI interrupts\n", use_msi);
+ break;
case 0:
+ wil_dbg_misc(wil, "MSI interrupts disabled, use INTx\n");
break;
default:
- wil_err(wil, "Invalid use_msi=%d, default to 1\n",
- use_msi);
+ wil_err(wil, "Invalid use_msi=%d, default to 1\n", use_msi);
use_msi = 1;
}
- wil->n_msi = use_msi;
- if (wil->n_msi) {
- wil_dbg_misc(wil, "Setup %d MSI interrupts\n", use_msi);
- rc = pci_enable_msi_block(pdev, wil->n_msi);
- if (rc && (wil->n_msi == 3)) {
- wil_err(wil, "3 MSI mode failed, try 1 MSI\n");
- wil->n_msi = 1;
- rc = pci_enable_msi_block(pdev, wil->n_msi);
- }
- if (rc) {
- wil_err(wil, "pci_enable_msi failed, use INTx\n");
- wil->n_msi = 0;
- }
- } else {
- wil_dbg_misc(wil, "MSI interrupts disabled, use INTx\n");
+
+ if (use_msi == 3 && pci_enable_msi_range(pdev, 3, 3) < 0) {
+ wil_err(wil, "3 MSI mode failed, try 1 MSI\n");
+ use_msi = 1;
+ }
+
+ if (use_msi == 1 && pci_enable_msi(pdev)) {
+ wil_err(wil, "pci_enable_msi failed, use INTx\n");
+ use_msi = 0;
}
+ wil->n_msi = use_msi;
+
rc = wil6210_init_irq(wil, pdev->irq);
if (rc)
goto stop_master;
diff --git a/drivers/net/wireless/b43/main.h b/drivers/net/wireless/b43/main.h
index abac25e..f476fc3 100644
--- a/drivers/net/wireless/b43/main.h
+++ b/drivers/net/wireless/b43/main.h
@@ -58,41 +58,6 @@ enum b43_verbosity {
#endif
};
-
-/* Lightweight function to convert a frequency (in Mhz) to a channel number. */
-static inline u8 b43_freq_to_channel_5ghz(int freq)
-{
- return ((freq - 5000) / 5);
-}
-static inline u8 b43_freq_to_channel_2ghz(int freq)
-{
- u8 channel;
-
- if (freq == 2484)
- channel = 14;
- else
- channel = (freq - 2407) / 5;
-
- return channel;
-}
-
-/* Lightweight function to convert a channel number to a frequency (in Mhz). */
-static inline int b43_channel_to_freq_5ghz(u8 channel)
-{
- return (5000 + (5 * channel));
-}
-static inline int b43_channel_to_freq_2ghz(u8 channel)
-{
- int freq;
-
- if (channel == 14)
- freq = 2484;
- else
- freq = 2407 + (5 * channel);
-
- return freq;
-}
-
static inline int b43_is_cck_rate(int rate)
{
return (rate == B43_CCK_RATE_1MB ||
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index 50e5ddb..218a0f3 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -806,7 +806,8 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
B43_WARN_ON(1);
/* FIXME: We don't really know which value the "chanid" contains.
* So the following assignment might be wrong. */
- status.freq = b43_channel_to_freq_5ghz(chanid);
+ status.freq =
+ ieee80211_channel_to_frequency(chanid, status.band);
break;
case B43_PHYTYPE_G:
status.band = IEEE80211_BAND_2GHZ;
@@ -819,13 +820,12 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
case B43_PHYTYPE_HT:
/* chanid is the SHM channel cookie. Which is the plain
* channel number in b43. */
- if (chanstat & B43_RX_CHAN_5GHZ) {
+ if (chanstat & B43_RX_CHAN_5GHZ)
status.band = IEEE80211_BAND_5GHZ;
- status.freq = b43_channel_to_freq_5ghz(chanid);
- } else {
+ else
status.band = IEEE80211_BAND_2GHZ;
- status.freq = b43_channel_to_freq_2ghz(chanid);
- }
+ status.freq =
+ ieee80211_channel_to_frequency(chanid, status.band);
break;
default:
B43_WARN_ON(1);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
index 57cddee..1d2ceac 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/Makefile
+++ b/drivers/net/wireless/brcm80211/brcmfmac/Makefile
@@ -24,6 +24,7 @@ ccflags-y += -D__CHECK_ENDIAN__
obj-$(CONFIG_BRCMFMAC) += brcmfmac.o
brcmfmac-objs += \
wl_cfg80211.o \
+ chip.o \
fwil.o \
fweh.o \
fwsignal.o \
@@ -36,8 +37,7 @@ brcmfmac-objs += \
btcoex.o
brcmfmac-$(CONFIG_BRCMFMAC_SDIO) += \
dhd_sdio.o \
- bcmsdh.o \
- sdio_chip.o
+ bcmsdh.o
brcmfmac-$(CONFIG_BRCMFMAC_USB) += \
usb.o
brcmfmac-$(CONFIG_BRCMDBG) += \
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index fa35b23..07e7d25 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -43,7 +43,6 @@
#include "dhd_bus.h"
#include "dhd_dbg.h"
#include "sdio_host.h"
-#include "sdio_chip.h"
#define SDIOH_API_ACCESS_RETRY_LIMIT 2
@@ -827,7 +826,7 @@ brcmf_sdiod_ramrw(struct brcmf_sdio_dev *sdiodev, bool write, u32 address,
}
if (!write)
memcpy(data, pkt->data, dsize);
- skb_trim(pkt, dsize);
+ skb_trim(pkt, 0);
/* Adjust for next transfer (if any) */
size -= dsize;
@@ -1115,11 +1114,12 @@ static struct sdio_driver brcmf_sdmmc_driver = {
.remove = brcmf_ops_sdio_remove,
.name = BRCMFMAC_SDIO_PDATA_NAME,
.id_table = brcmf_sdmmc_ids,
-#ifdef CONFIG_PM_SLEEP
.drv = {
+ .owner = THIS_MODULE,
+#ifdef CONFIG_PM_SLEEP
.pm = &brcmf_sdio_pm_ops,
- },
#endif /* CONFIG_PM_SLEEP */
+ },
};
static int brcmf_sdio_pd_probe(struct platform_device *pdev)
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/brcm80211/brcmfmac/chip.c
new file mode 100644
index 0000000..a07b95e
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/chip.c
@@ -0,0 +1,1029 @@
+/*
+ * Copyright (c) 2014 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/ssb/ssb_regs.h>
+#include <linux/bcma/bcma.h>
+#include <linux/bcma/bcma_regs.h>
+
+#include <defs.h>
+#include <soc.h>
+#include <brcm_hw_ids.h>
+#include <brcmu_utils.h>
+#include <chipcommon.h>
+#include "dhd_dbg.h"
+#include "chip.h"
+
+/* SOC Interconnect types (aka chip types) */
+#define SOCI_SB 0
+#define SOCI_AI 1
+
+/* PL-368 DMP definitions */
+#define DMP_DESC_TYPE_MSK 0x0000000F
+#define DMP_DESC_EMPTY 0x00000000
+#define DMP_DESC_VALID 0x00000001
+#define DMP_DESC_COMPONENT 0x00000001
+#define DMP_DESC_MASTER_PORT 0x00000003
+#define DMP_DESC_ADDRESS 0x00000005
+#define DMP_DESC_ADDRSIZE_GT32 0x00000008
+#define DMP_DESC_EOT 0x0000000F
+
+#define DMP_COMP_DESIGNER 0xFFF00000
+#define DMP_COMP_DESIGNER_S 20
+#define DMP_COMP_PARTNUM 0x000FFF00
+#define DMP_COMP_PARTNUM_S 8
+#define DMP_COMP_CLASS 0x000000F0
+#define DMP_COMP_CLASS_S 4
+#define DMP_COMP_REVISION 0xFF000000
+#define DMP_COMP_REVISION_S 24
+#define DMP_COMP_NUM_SWRAP 0x00F80000
+#define DMP_COMP_NUM_SWRAP_S 19
+#define DMP_COMP_NUM_MWRAP 0x0007C000
+#define DMP_COMP_NUM_MWRAP_S 14
+#define DMP_COMP_NUM_SPORT 0x00003E00
+#define DMP_COMP_NUM_SPORT_S 9
+#define DMP_COMP_NUM_MPORT 0x000001F0
+#define DMP_COMP_NUM_MPORT_S 4
+
+#define DMP_MASTER_PORT_UID 0x0000FF00
+#define DMP_MASTER_PORT_UID_S 8
+#define DMP_MASTER_PORT_NUM 0x000000F0
+#define DMP_MASTER_PORT_NUM_S 4
+
+#define DMP_SLAVE_ADDR_BASE 0xFFFFF000
+#define DMP_SLAVE_ADDR_BASE_S 12
+#define DMP_SLAVE_PORT_NUM 0x00000F00
+#define DMP_SLAVE_PORT_NUM_S 8
+#define DMP_SLAVE_TYPE 0x000000C0
+#define DMP_SLAVE_TYPE_S 6
+#define DMP_SLAVE_TYPE_SLAVE 0
+#define DMP_SLAVE_TYPE_BRIDGE 1
+#define DMP_SLAVE_TYPE_SWRAP 2
+#define DMP_SLAVE_TYPE_MWRAP 3
+#define DMP_SLAVE_SIZE_TYPE 0x00000030
+#define DMP_SLAVE_SIZE_TYPE_S 4
+#define DMP_SLAVE_SIZE_4K 0
+#define DMP_SLAVE_SIZE_8K 1
+#define DMP_SLAVE_SIZE_16K 2
+#define DMP_SLAVE_SIZE_DESC 3
+
+/* EROM CompIdentB */
+#define CIB_REV_MASK 0xff000000
+#define CIB_REV_SHIFT 24
+
+/* ARM CR4 core specific control flag bits */
+#define ARMCR4_BCMA_IOCTL_CPUHALT 0x0020
+
+/* D11 core specific control flag bits */
+#define D11_BCMA_IOCTL_PHYCLOCKEN 0x0004
+#define D11_BCMA_IOCTL_PHYRESET 0x0008
+
+/* chip core base & ramsize */
+/* bcm4329 */
+/* SDIO device core, ID 0x829 */
+#define BCM4329_CORE_BUS_BASE 0x18011000
+/* internal memory core, ID 0x80e */
+#define BCM4329_CORE_SOCRAM_BASE 0x18003000
+/* ARM Cortex M3 core, ID 0x82a */
+#define BCM4329_CORE_ARM_BASE 0x18002000
+#define BCM4329_RAMSIZE 0x48000
+
+/* bcm43143 */
+/* SDIO device core */
+#define BCM43143_CORE_BUS_BASE 0x18002000
+/* internal memory core */
+#define BCM43143_CORE_SOCRAM_BASE 0x18004000
+/* ARM Cortex M3 core, ID 0x82a */
+#define BCM43143_CORE_ARM_BASE 0x18003000
+#define BCM43143_RAMSIZE 0x70000
+
+#define CORE_SB(base, field) \
+ (base + SBCONFIGOFF + offsetof(struct sbconfig, field))
+#define SBCOREREV(sbidh) \
+ ((((sbidh) & SSB_IDHIGH_RCHI) >> SSB_IDHIGH_RCHI_SHIFT) | \
+ ((sbidh) & SSB_IDHIGH_RCLO))
+
+struct sbconfig {
+ u32 PAD[2];
+ u32 sbipsflag; /* initiator port ocp slave flag */
+ u32 PAD[3];
+ u32 sbtpsflag; /* target port ocp slave flag */
+ u32 PAD[11];
+ u32 sbtmerrloga; /* (sonics >= 2.3) */
+ u32 PAD;
+ u32 sbtmerrlog; /* (sonics >= 2.3) */
+ u32 PAD[3];
+ u32 sbadmatch3; /* address match3 */
+ u32 PAD;
+ u32 sbadmatch2; /* address match2 */
+ u32 PAD;
+ u32 sbadmatch1; /* address match1 */
+ u32 PAD[7];
+ u32 sbimstate; /* initiator agent state */
+ u32 sbintvec; /* interrupt mask */
+ u32 sbtmstatelow; /* target state */
+ u32 sbtmstatehigh; /* target state */
+ u32 sbbwa0; /* bandwidth allocation table0 */
+ u32 PAD;
+ u32 sbimconfiglow; /* initiator configuration */
+ u32 sbimconfighigh; /* initiator configuration */
+ u32 sbadmatch0; /* address match0 */
+ u32 PAD;
+ u32 sbtmconfiglow; /* target configuration */
+ u32 sbtmconfighigh; /* target configuration */
+ u32 sbbconfig; /* broadcast configuration */
+ u32 PAD;
+ u32 sbbstate; /* broadcast state */
+ u32 PAD[3];
+ u32 sbactcnfg; /* activate configuration */
+ u32 PAD[3];
+ u32 sbflagst; /* current sbflags */
+ u32 PAD[3];
+ u32 sbidlow; /* identification */
+ u32 sbidhigh; /* identification */
+};
+
+struct brcmf_core_priv {
+ struct brcmf_core pub;
+ u32 wrapbase;
+ struct list_head list;
+ struct brcmf_chip_priv *chip;
+};
+
+/* ARM CR4 core specific control flag bits */
+#define ARMCR4_BCMA_IOCTL_CPUHALT 0x0020
+
+/* D11 core specific control flag bits */
+#define D11_BCMA_IOCTL_PHYCLOCKEN 0x0004
+#define D11_BCMA_IOCTL_PHYRESET 0x0008
+
+struct brcmf_chip_priv {
+ struct brcmf_chip pub;
+ const struct brcmf_buscore_ops *ops;
+ void *ctx;
+ /* assured first core is chipcommon, second core is buscore */
+ struct list_head cores;
+ u16 num_cores;
+
+ bool (*iscoreup)(struct brcmf_core_priv *core);
+ void (*coredisable)(struct brcmf_core_priv *core, u32 prereset,
+ u32 reset);
+ void (*resetcore)(struct brcmf_core_priv *core, u32 prereset, u32 reset,
+ u32 postreset);
+};
+
+static void brcmf_chip_sb_corerev(struct brcmf_chip_priv *ci,
+ struct brcmf_core *core)
+{
+ u32 regdata;
+
+ regdata = ci->ops->read32(ci->ctx, CORE_SB(core->base, sbidhigh));
+ core->rev = SBCOREREV(regdata);
+}
+
+static bool brcmf_chip_sb_iscoreup(struct brcmf_core_priv *core)
+{
+ struct brcmf_chip_priv *ci;
+ u32 regdata;
+ u32 address;
+
+ ci = core->chip;
+ address = CORE_SB(core->pub.base, sbtmstatelow);
+ regdata = ci->ops->read32(ci->ctx, address);
+ regdata &= (SSB_TMSLOW_RESET | SSB_TMSLOW_REJECT |
+ SSB_IMSTATE_REJECT | SSB_TMSLOW_CLOCK);
+ return SSB_TMSLOW_CLOCK == regdata;
+}
+
+static bool brcmf_chip_ai_iscoreup(struct brcmf_core_priv *core)
+{
+ struct brcmf_chip_priv *ci;
+ u32 regdata;
+ bool ret;
+
+ ci = core->chip;
+ regdata = ci->ops->read32(ci->ctx, core->wrapbase + BCMA_IOCTL);
+ ret = (regdata & (BCMA_IOCTL_FGC | BCMA_IOCTL_CLK)) == BCMA_IOCTL_CLK;
+
+ regdata = ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL);
+ ret = ret && ((regdata & BCMA_RESET_CTL_RESET) == 0);
+
+ return ret;
+}
+
+static void brcmf_chip_sb_coredisable(struct brcmf_core_priv *core,
+ u32 prereset, u32 reset)
+{
+ struct brcmf_chip_priv *ci;
+ u32 val, base;
+
+ ci = core->chip;
+ base = core->pub.base;
+ val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
+ if (val & SSB_TMSLOW_RESET)
+ return;
+
+ val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
+ if ((val & SSB_TMSLOW_CLOCK) != 0) {
+ /*
+ * set target reject and spin until busy is clear
+ * (preserve core-specific bits)
+ */
+ val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
+ ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow),
+ val | SSB_TMSLOW_REJECT);
+
+ val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
+ udelay(1);
+ SPINWAIT((ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatehigh))
+ & SSB_TMSHIGH_BUSY), 100000);
+
+ val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatehigh));
+ if (val & SSB_TMSHIGH_BUSY)
+ brcmf_err("core state still busy\n");
+
+ val = ci->ops->read32(ci->ctx, CORE_SB(base, sbidlow));
+ if (val & SSB_IDLOW_INITIATOR) {
+ val = ci->ops->read32(ci->ctx,
+ CORE_SB(base, sbimstate));
+ val |= SSB_IMSTATE_REJECT;
+ ci->ops->write32(ci->ctx,
+ CORE_SB(base, sbimstate), val);
+ val = ci->ops->read32(ci->ctx,
+ CORE_SB(base, sbimstate));
+ udelay(1);
+ SPINWAIT((ci->ops->read32(ci->ctx,
+ CORE_SB(base, sbimstate)) &
+ SSB_IMSTATE_BUSY), 100000);
+ }
+
+ /* set reset and reject while enabling the clocks */
+ val = SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK |
+ SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET;
+ ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow), val);
+ val = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
+ udelay(10);
+
+ /* clear the initiator reject bit */
+ val = ci->ops->read32(ci->ctx, CORE_SB(base, sbidlow));
+ if (val & SSB_IDLOW_INITIATOR) {
+ val = ci->ops->read32(ci->ctx,
+ CORE_SB(base, sbimstate));
+ val &= ~SSB_IMSTATE_REJECT;
+ ci->ops->write32(ci->ctx,
+ CORE_SB(base, sbimstate), val);
+ }
+ }
+
+ /* leave reset and reject asserted */
+ ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow),
+ (SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET));
+ udelay(1);
+}
+
+static void brcmf_chip_ai_coredisable(struct brcmf_core_priv *core,
+ u32 prereset, u32 reset)
+{
+ struct brcmf_chip_priv *ci;
+ u32 regdata;
+
+ ci = core->chip;
+
+ /* if core is already in reset, just return */
+ regdata = ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL);
+ if ((regdata & BCMA_RESET_CTL_RESET) != 0)
+ return;
+
+ /* configure reset */
+ ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL,
+ prereset | BCMA_IOCTL_FGC | BCMA_IOCTL_CLK);
+ ci->ops->read32(ci->ctx, core->wrapbase + BCMA_IOCTL);
+
+ /* put in reset */
+ ci->ops->write32(ci->ctx, core->wrapbase + BCMA_RESET_CTL,
+ BCMA_RESET_CTL_RESET);
+ usleep_range(10, 20);
+
+ /* wait till reset is 1 */
+ SPINWAIT(ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL) !=
+ BCMA_RESET_CTL_RESET, 300);
+
+ /* in-reset configure */
+ ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL,
+ reset | BCMA_IOCTL_FGC | BCMA_IOCTL_CLK);
+ ci->ops->read32(ci->ctx, core->wrapbase + BCMA_IOCTL);
+}
+
+static void brcmf_chip_sb_resetcore(struct brcmf_core_priv *core, u32 prereset,
+ u32 reset, u32 postreset)
+{
+ struct brcmf_chip_priv *ci;
+ u32 regdata;
+ u32 base;
+
+ ci = core->chip;
+ base = core->pub.base;
+ /*
+ * Must do the disable sequence first to work for
+ * arbitrary current core state.
+ */
+ brcmf_chip_sb_coredisable(core, 0, 0);
+
+ /*
+ * Now do the initialization sequence.
+ * set reset while enabling the clock and
+ * forcing them on throughout the core
+ */
+ ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow),
+ SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK |
+ SSB_TMSLOW_RESET);
+ regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
+ udelay(1);
+
+ /* clear any serror */
+ regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatehigh));
+ if (regdata & SSB_TMSHIGH_SERR)
+ ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatehigh), 0);
+
+ regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbimstate));
+ if (regdata & (SSB_IMSTATE_IBE | SSB_IMSTATE_TO)) {
+ regdata &= ~(SSB_IMSTATE_IBE | SSB_IMSTATE_TO);
+ ci->ops->write32(ci->ctx, CORE_SB(base, sbimstate), regdata);
+ }
+
+ /* clear reset and allow it to propagate throughout the core */
+ ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow),
+ SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK);
+ regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
+ udelay(1);
+
+ /* leave clock enabled */
+ ci->ops->write32(ci->ctx, CORE_SB(base, sbtmstatelow),
+ SSB_TMSLOW_CLOCK);
+ regdata = ci->ops->read32(ci->ctx, CORE_SB(base, sbtmstatelow));
+ udelay(1);
+}
+
+static void brcmf_chip_ai_resetcore(struct brcmf_core_priv *core, u32 prereset,
+ u32 reset, u32 postreset)
+{
+ struct brcmf_chip_priv *ci;
+ int count;
+
+ ci = core->chip;
+
+ /* must disable first to work for arbitrary current core state */
+ brcmf_chip_ai_coredisable(core, prereset, reset);
+
+ count = 0;
+ while (ci->ops->read32(ci->ctx, core->wrapbase + BCMA_RESET_CTL) &
+ BCMA_RESET_CTL_RESET) {
+ ci->ops->write32(ci->ctx, core->wrapbase + BCMA_RESET_CTL, 0);
+ count++;
+ if (count > 50)
+ break;
+ usleep_range(40, 60);
+ }
+
+ ci->ops->write32(ci->ctx, core->wrapbase + BCMA_IOCTL,
+ postreset | BCMA_IOCTL_CLK);
+ ci->ops->read32(ci->ctx, core->wrapbase + BCMA_IOCTL);
+}
+
+static char *brcmf_chip_name(uint chipid, char *buf, uint len)
+{
+ const char *fmt;
+
+ fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x";
+ snprintf(buf, len, fmt, chipid);
+ return buf;
+}
+
+static struct brcmf_core *brcmf_chip_add_core(struct brcmf_chip_priv *ci,
+ u16 coreid, u32 base,
+ u32 wrapbase)
+{
+ struct brcmf_core_priv *core;
+
+ core = kzalloc(sizeof(*core), GFP_KERNEL);
+ if (!core)
+ return ERR_PTR(-ENOMEM);
+
+ core->pub.id = coreid;
+ core->pub.base = base;
+ core->chip = ci;
+ core->wrapbase = wrapbase;
+
+ list_add_tail(&core->list, &ci->cores);
+ return &core->pub;
+}
+
+#ifdef DEBUG
+/* safety check for chipinfo */
+static int brcmf_chip_cores_check(struct brcmf_chip_priv *ci)
+{
+ struct brcmf_core_priv *core;
+ bool need_socram = false;
+ bool has_socram = false;
+ int idx = 1;
+
+ list_for_each_entry(core, &ci->cores, list) {
+ brcmf_dbg(INFO, " [%-2d] core 0x%x:%-2d base 0x%08x wrap 0x%08x\n",
+ idx++, core->pub.id, core->pub.rev, core->pub.base,
+ core->wrapbase);
+
+ switch (core->pub.id) {
+ case BCMA_CORE_ARM_CM3:
+ need_socram = true;
+ break;
+ case BCMA_CORE_INTERNAL_MEM:
+ has_socram = true;
+ break;
+ case BCMA_CORE_ARM_CR4:
+ if (ci->pub.rambase == 0) {
+ brcmf_err("RAM base not provided with ARM CR4 core\n");
+ return -ENOMEM;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+
+ /* check RAM core presence for ARM CM3 core */
+ if (need_socram && !has_socram) {
+ brcmf_err("RAM core not provided with ARM CM3 core\n");
+ return -ENODEV;
+ }
+ return 0;
+}
+#else /* DEBUG */
+static inline int brcmf_chip_cores_check(struct brcmf_chip_priv *ci)
+{
+ return 0;
+}
+#endif
+
+static void brcmf_chip_get_raminfo(struct brcmf_chip_priv *ci)
+{
+ switch (ci->pub.chip) {
+ case BCM4329_CHIP_ID:
+ ci->pub.ramsize = BCM4329_RAMSIZE;
+ break;
+ case BCM43143_CHIP_ID:
+ ci->pub.ramsize = BCM43143_RAMSIZE;
+ break;
+ case BCM43241_CHIP_ID:
+ ci->pub.ramsize = 0x90000;
+ break;
+ case BCM4330_CHIP_ID:
+ ci->pub.ramsize = 0x48000;
+ break;
+ case BCM4334_CHIP_ID:
+ ci->pub.ramsize = 0x80000;
+ break;
+ case BCM4335_CHIP_ID:
+ ci->pub.ramsize = 0xc0000;
+ ci->pub.rambase = 0x180000;
+ break;
+ case BCM43362_CHIP_ID:
+ ci->pub.ramsize = 0x3c000;
+ break;
+ case BCM4339_CHIP_ID:
+ ci->pub.ramsize = 0xc0000;
+ ci->pub.rambase = 0x180000;
+ break;
+ default:
+ brcmf_err("unknown chip: %s\n", ci->pub.name);
+ break;
+ }
+}
+
+static u32 brcmf_chip_dmp_get_desc(struct brcmf_chip_priv *ci, u32 *eromaddr,
+ u8 *type)
+{
+ u32 val;
+
+ /* read next descriptor */
+ val = ci->ops->read32(ci->ctx, *eromaddr);
+ *eromaddr += 4;
+
+ if (!type)
+ return val;
+
+ /* determine descriptor type */
+ *type = (val & DMP_DESC_TYPE_MSK);
+ if ((*type & ~DMP_DESC_ADDRSIZE_GT32) == DMP_DESC_ADDRESS)
+ *type = DMP_DESC_ADDRESS;
+
+ return val;
+}
+
+static int brcmf_chip_dmp_get_regaddr(struct brcmf_chip_priv *ci, u32 *eromaddr,
+ u32 *regbase, u32 *wrapbase)
+{
+ u8 desc;
+ u32 val;
+ u8 mpnum = 0;
+ u8 stype, sztype, wraptype;
+
+ *regbase = 0;
+ *wrapbase = 0;
+
+ val = brcmf_chip_dmp_get_desc(ci, eromaddr, &desc);
+ if (desc == DMP_DESC_MASTER_PORT) {
+ mpnum = (val & DMP_MASTER_PORT_NUM) >> DMP_MASTER_PORT_NUM_S;
+ wraptype = DMP_SLAVE_TYPE_MWRAP;
+ } else if (desc == DMP_DESC_ADDRESS) {
+ /* revert erom address */
+ *eromaddr -= 4;
+ wraptype = DMP_SLAVE_TYPE_SWRAP;
+ } else {
+ *eromaddr -= 4;
+ return -EILSEQ;
+ }
+
+ do {
+ /* locate address descriptor */
+ do {
+ val = brcmf_chip_dmp_get_desc(ci, eromaddr, &desc);
+ /* unexpected table end */
+ if (desc == DMP_DESC_EOT) {
+ *eromaddr -= 4;
+ return -EFAULT;
+ }
+ } while (desc != DMP_DESC_ADDRESS);
+
+ /* skip upper 32-bit address descriptor */
+ if (val & DMP_DESC_ADDRSIZE_GT32)
+ brcmf_chip_dmp_get_desc(ci, eromaddr, NULL);
+
+ sztype = (val & DMP_SLAVE_SIZE_TYPE) >> DMP_SLAVE_SIZE_TYPE_S;
+
+ /* next size descriptor can be skipped */
+ if (sztype == DMP_SLAVE_SIZE_DESC) {
+ val = brcmf_chip_dmp_get_desc(ci, eromaddr, NULL);
+ /* skip upper size descriptor if present */
+ if (val & DMP_DESC_ADDRSIZE_GT32)
+ brcmf_chip_dmp_get_desc(ci, eromaddr, NULL);
+ }
+
+ /* only look for 4K register regions */
+ if (sztype != DMP_SLAVE_SIZE_4K)
+ continue;
+
+ stype = (val & DMP_SLAVE_TYPE) >> DMP_SLAVE_TYPE_S;
+
+ /* only regular slave and wrapper */
+ if (*regbase == 0 && stype == DMP_SLAVE_TYPE_SLAVE)
+ *regbase = val & DMP_SLAVE_ADDR_BASE;
+ if (*wrapbase == 0 && stype == wraptype)
+ *wrapbase = val & DMP_SLAVE_ADDR_BASE;
+ } while (*regbase == 0 || *wrapbase == 0);
+
+ return 0;
+}
+
+static
+int brcmf_chip_dmp_erom_scan(struct brcmf_chip_priv *ci)
+{
+ struct brcmf_core *core;
+ u32 eromaddr;
+ u8 desc_type = 0;
+ u32 val;
+ u16 id;
+ u8 nmp, nsp, nmw, nsw, rev;
+ u32 base, wrap;
+ int err;
+
+ eromaddr = ci->ops->read32(ci->ctx, CORE_CC_REG(SI_ENUM_BASE, eromptr));
+
+ while (desc_type != DMP_DESC_EOT) {
+ val = brcmf_chip_dmp_get_desc(ci, &eromaddr, &desc_type);
+ if (!(val & DMP_DESC_VALID))
+ continue;
+
+ if (desc_type == DMP_DESC_EMPTY)
+ continue;
+
+ /* need a component descriptor */
+ if (desc_type != DMP_DESC_COMPONENT)
+ continue;
+
+ id = (val & DMP_COMP_PARTNUM) >> DMP_COMP_PARTNUM_S;
+
+ /* next descriptor must be component as well */
+ val = brcmf_chip_dmp_get_desc(ci, &eromaddr, &desc_type);
+ if (WARN_ON((val & DMP_DESC_TYPE_MSK) != DMP_DESC_COMPONENT))
+ return -EFAULT;
+
+ /* only look at cores with master port(s) */
+ nmp = (val & DMP_COMP_NUM_MPORT) >> DMP_COMP_NUM_MPORT_S;
+ nsp = (val & DMP_COMP_NUM_SPORT) >> DMP_COMP_NUM_SPORT_S;
+ nmw = (val & DMP_COMP_NUM_MWRAP) >> DMP_COMP_NUM_MWRAP_S;
+ nsw = (val & DMP_COMP_NUM_SWRAP) >> DMP_COMP_NUM_SWRAP_S;
+ rev = (val & DMP_COMP_REVISION) >> DMP_COMP_REVISION_S;
+
+ /* need core with ports */
+ if (nmw + nsw == 0)
+ continue;
+
+ /* try to obtain register address info */
+ err = brcmf_chip_dmp_get_regaddr(ci, &eromaddr, &base, &wrap);
+ if (err)
+ continue;
+
+ /* finally a core to be added */
+ core = brcmf_chip_add_core(ci, id, base, wrap);
+ if (IS_ERR(core))
+ return PTR_ERR(core);
+
+ core->rev = rev;
+ }
+
+ return 0;
+}
+
+static int brcmf_chip_recognition(struct brcmf_chip_priv *ci)
+{
+ struct brcmf_core *core;
+ u32 regdata;
+ u32 socitype;
+
+ /* Get CC core rev
+ * Chipid is assume to be at offset 0 from SI_ENUM_BASE
+ * For different chiptypes or old sdio hosts w/o chipcommon,
+ * other ways of recognition should be added here.
+ */
+ regdata = ci->ops->read32(ci->ctx, CORE_CC_REG(SI_ENUM_BASE, chipid));
+ ci->pub.chip = regdata & CID_ID_MASK;
+ ci->pub.chiprev = (regdata & CID_REV_MASK) >> CID_REV_SHIFT;
+ socitype = (regdata & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
+
+ brcmf_chip_name(ci->pub.chip, ci->pub.name, sizeof(ci->pub.name));
+ brcmf_dbg(INFO, "found %s chip: BCM%s, rev=%d\n",
+ socitype == SOCI_SB ? "SB" : "AXI", ci->pub.name,
+ ci->pub.chiprev);
+
+ if (socitype == SOCI_SB) {
+ if (ci->pub.chip != BCM4329_CHIP_ID) {
+ brcmf_err("SB chip is not supported\n");
+ return -ENODEV;
+ }
+ ci->iscoreup = brcmf_chip_sb_iscoreup;
+ ci->coredisable = brcmf_chip_sb_coredisable;
+ ci->resetcore = brcmf_chip_sb_resetcore;
+
+ core = brcmf_chip_add_core(ci, BCMA_CORE_CHIPCOMMON,
+ SI_ENUM_BASE, 0);
+ brcmf_chip_sb_corerev(ci, core);
+ core = brcmf_chip_add_core(ci, BCMA_CORE_SDIO_DEV,
+ BCM4329_CORE_BUS_BASE, 0);
+ brcmf_chip_sb_corerev(ci, core);
+ core = brcmf_chip_add_core(ci, BCMA_CORE_INTERNAL_MEM,
+ BCM4329_CORE_SOCRAM_BASE, 0);
+ brcmf_chip_sb_corerev(ci, core);
+ core = brcmf_chip_add_core(ci, BCMA_CORE_ARM_CM3,
+ BCM4329_CORE_ARM_BASE, 0);
+ brcmf_chip_sb_corerev(ci, core);
+
+ core = brcmf_chip_add_core(ci, BCMA_CORE_80211, 0x18001000, 0);
+ brcmf_chip_sb_corerev(ci, core);
+ } else if (socitype == SOCI_AI) {
+ ci->iscoreup = brcmf_chip_ai_iscoreup;
+ ci->coredisable = brcmf_chip_ai_coredisable;
+ ci->resetcore = brcmf_chip_ai_resetcore;
+
+ brcmf_chip_dmp_erom_scan(ci);
+ } else {
+ brcmf_err("chip backplane type %u is not supported\n",
+ socitype);
+ return -ENODEV;
+ }
+
+ brcmf_chip_get_raminfo(ci);
+
+ return brcmf_chip_cores_check(ci);
+}
+
+static void brcmf_chip_disable_arm(struct brcmf_chip_priv *chip, u16 id)
+{
+ struct brcmf_core *core;
+ struct brcmf_core_priv *cr4;
+ u32 val;
+
+
+ core = brcmf_chip_get_core(&chip->pub, id);
+ if (!core)
+ return;
+
+ switch (id) {
+ case BCMA_CORE_ARM_CM3:
+ brcmf_chip_coredisable(core, 0, 0);
+ break;
+ case BCMA_CORE_ARM_CR4:
+ cr4 = container_of(core, struct brcmf_core_priv, pub);
+
+ /* clear all IOCTL bits except HALT bit */
+ val = chip->ops->read32(chip->ctx, cr4->wrapbase + BCMA_IOCTL);
+ val &= ARMCR4_BCMA_IOCTL_CPUHALT;
+ brcmf_chip_resetcore(core, val, ARMCR4_BCMA_IOCTL_CPUHALT,
+ ARMCR4_BCMA_IOCTL_CPUHALT);
+ break;
+ default:
+ brcmf_err("unknown id: %u\n", id);
+ break;
+ }
+}
+
+static int brcmf_chip_setup(struct brcmf_chip_priv *chip)
+{
+ struct brcmf_chip *pub;
+ struct brcmf_core_priv *cc;
+ u32 base;
+ u32 val;
+ int ret = 0;
+
+ pub = &chip->pub;
+ cc = list_first_entry(&chip->cores, struct brcmf_core_priv, list);
+ base = cc->pub.base;
+
+ /* get chipcommon capabilites */
+ pub->cc_caps = chip->ops->read32(chip->ctx,
+ CORE_CC_REG(base, capabilities));
+
+ /* get pmu caps & rev */
+ if (pub->cc_caps & CC_CAP_PMU) {
+ val = chip->ops->read32(chip->ctx,
+ CORE_CC_REG(base, pmucapabilities));
+ pub->pmurev = val & PCAP_REV_MASK;
+ pub->pmucaps = val;
+ }
+
+ brcmf_dbg(INFO, "ccrev=%d, pmurev=%d, pmucaps=0x%x\n",
+ cc->pub.rev, pub->pmurev, pub->pmucaps);
+
+ /* execute bus core specific setup */
+ if (chip->ops->setup)
+ ret = chip->ops->setup(chip->ctx, pub);
+
+ /*
+ * Make sure any on-chip ARM is off (in case strapping is wrong),
+ * or downloaded code was already running.
+ */
+ brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CM3);
+ brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CR4);
+ return ret;
+}
+
+struct brcmf_chip *brcmf_chip_attach(void *ctx,
+ const struct brcmf_buscore_ops *ops)
+{
+ struct brcmf_chip_priv *chip;
+ int err = 0;
+
+ if (WARN_ON(!ops->read32))
+ err = -EINVAL;
+ if (WARN_ON(!ops->write32))
+ err = -EINVAL;
+ if (WARN_ON(!ops->prepare))
+ err = -EINVAL;
+ if (WARN_ON(!ops->exit_dl))
+ err = -EINVAL;
+ if (err < 0)
+ return ERR_PTR(-EINVAL);
+
+ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return ERR_PTR(-ENOMEM);
+
+ INIT_LIST_HEAD(&chip->cores);
+ chip->num_cores = 0;
+ chip->ops = ops;
+ chip->ctx = ctx;
+
+ err = ops->prepare(ctx);
+ if (err < 0)
+ goto fail;
+
+ err = brcmf_chip_recognition(chip);
+ if (err < 0)
+ goto fail;
+
+ err = brcmf_chip_setup(chip);
+ if (err < 0)
+ goto fail;
+
+ return &chip->pub;
+
+fail:
+ brcmf_chip_detach(&chip->pub);
+ return ERR_PTR(err);
+}
+
+void brcmf_chip_detach(struct brcmf_chip *pub)
+{
+ struct brcmf_chip_priv *chip;
+ struct brcmf_core_priv *core;
+ struct brcmf_core_priv *tmp;
+
+ chip = container_of(pub, struct brcmf_chip_priv, pub);
+ list_for_each_entry_safe(core, tmp, &chip->cores, list) {
+ list_del(&core->list);
+ kfree(core);
+ }
+ kfree(chip);
+}
+
+struct brcmf_core *brcmf_chip_get_core(struct brcmf_chip *pub, u16 coreid)
+{
+ struct brcmf_chip_priv *chip;
+ struct brcmf_core_priv *core;
+
+ chip = container_of(pub, struct brcmf_chip_priv, pub);
+ list_for_each_entry(core, &chip->cores, list)
+ if (core->pub.id == coreid)
+ return &core->pub;
+
+ return NULL;
+}
+
+struct brcmf_core *brcmf_chip_get_chipcommon(struct brcmf_chip *pub)
+{
+ struct brcmf_chip_priv *chip;
+ struct brcmf_core_priv *cc;
+
+ chip = container_of(pub, struct brcmf_chip_priv, pub);
+ cc = list_first_entry(&chip->cores, struct brcmf_core_priv, list);
+ if (WARN_ON(!cc || cc->pub.id != BCMA_CORE_CHIPCOMMON))
+ return brcmf_chip_get_core(pub, BCMA_CORE_CHIPCOMMON);
+ return &cc->pub;
+}
+
+bool brcmf_chip_iscoreup(struct brcmf_core *pub)
+{
+ struct brcmf_core_priv *core;
+
+ core = container_of(pub, struct brcmf_core_priv, pub);
+ return core->chip->iscoreup(core);
+}
+
+void brcmf_chip_coredisable(struct brcmf_core *pub, u32 prereset, u32 reset)
+{
+ struct brcmf_core_priv *core;
+
+ core = container_of(pub, struct brcmf_core_priv, pub);
+ core->chip->coredisable(core, prereset, reset);
+}
+
+void brcmf_chip_resetcore(struct brcmf_core *pub, u32 prereset, u32 reset,
+ u32 postreset)
+{
+ struct brcmf_core_priv *core;
+
+ core = container_of(pub, struct brcmf_core_priv, pub);
+ core->chip->resetcore(core, prereset, reset, postreset);
+}
+
+static void
+brcmf_chip_cm3_enterdl(struct brcmf_chip_priv *chip)
+{
+ struct brcmf_core *core;
+
+ brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CM3);
+ core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_80211);
+ brcmf_chip_resetcore(core, D11_BCMA_IOCTL_PHYRESET |
+ D11_BCMA_IOCTL_PHYCLOCKEN,
+ D11_BCMA_IOCTL_PHYCLOCKEN,
+ D11_BCMA_IOCTL_PHYCLOCKEN);
+ core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_INTERNAL_MEM);
+ brcmf_chip_resetcore(core, 0, 0, 0);
+}
+
+static bool brcmf_chip_cm3_exitdl(struct brcmf_chip_priv *chip)
+{
+ struct brcmf_core *core;
+
+ core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_INTERNAL_MEM);
+ if (!brcmf_chip_iscoreup(core)) {
+ brcmf_err("SOCRAM core is down after reset?\n");
+ return false;
+ }
+
+ chip->ops->exit_dl(chip->ctx, &chip->pub, 0);
+
+ core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_ARM_CM3);
+ brcmf_chip_resetcore(core, 0, 0, 0);
+
+ return true;
+}
+
+static inline void
+brcmf_chip_cr4_enterdl(struct brcmf_chip_priv *chip)
+{
+ struct brcmf_core *core;
+
+ brcmf_chip_disable_arm(chip, BCMA_CORE_ARM_CR4);
+
+ core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_80211);
+ brcmf_chip_resetcore(core, D11_BCMA_IOCTL_PHYRESET |
+ D11_BCMA_IOCTL_PHYCLOCKEN,
+ D11_BCMA_IOCTL_PHYCLOCKEN,
+ D11_BCMA_IOCTL_PHYCLOCKEN);
+}
+
+static bool brcmf_chip_cr4_exitdl(struct brcmf_chip_priv *chip, u32 rstvec)
+{
+ struct brcmf_core *core;
+
+ chip->ops->exit_dl(chip->ctx, &chip->pub, rstvec);
+
+ /* restore ARM */
+ core = brcmf_chip_get_core(&chip->pub, BCMA_CORE_ARM_CR4);
+ brcmf_chip_resetcore(core, ARMCR4_BCMA_IOCTL_CPUHALT, 0, 0);
+
+ return true;
+}
+
+void brcmf_chip_enter_download(struct brcmf_chip *pub)
+{
+ struct brcmf_chip_priv *chip;
+ struct brcmf_core *arm;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ chip = container_of(pub, struct brcmf_chip_priv, pub);
+ arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CR4);
+ if (arm) {
+ brcmf_chip_cr4_enterdl(chip);
+ return;
+ }
+
+ brcmf_chip_cm3_enterdl(chip);
+}
+
+bool brcmf_chip_exit_download(struct brcmf_chip *pub, u32 rstvec)
+{
+ struct brcmf_chip_priv *chip;
+ struct brcmf_core *arm;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ chip = container_of(pub, struct brcmf_chip_priv, pub);
+ arm = brcmf_chip_get_core(pub, BCMA_CORE_ARM_CR4);
+ if (arm)
+ return brcmf_chip_cr4_exitdl(chip, rstvec);
+
+ return brcmf_chip_cm3_exitdl(chip);
+}
+
+bool brcmf_chip_sr_capable(struct brcmf_chip *pub)
+{
+ u32 base, addr, reg, pmu_cc3_mask = ~0;
+ struct brcmf_chip_priv *chip;
+
+ brcmf_dbg(TRACE, "Enter\n");
+
+ /* old chips with PMU version less than 17 don't support save restore */
+ if (pub->pmurev < 17)
+ return false;
+
+ base = brcmf_chip_get_chipcommon(pub)->base;
+ chip = container_of(pub, struct brcmf_chip_priv, pub);
+
+ switch (pub->chip) {
+ case BCM43241_CHIP_ID:
+ case BCM4335_CHIP_ID:
+ case BCM4339_CHIP_ID:
+ /* read PMU chipcontrol register 3 */
+ addr = CORE_CC_REG(base, chipcontrol_addr);
+ chip->ops->write32(chip->ctx, addr, 3);
+ addr = CORE_CC_REG(base, chipcontrol_data);
+ reg = chip->ops->read32(chip->ctx, addr);
+ return (reg & pmu_cc3_mask) != 0;
+ default:
+ addr = CORE_CC_REG(base, pmucapabilities_ext);
+ reg = chip->ops->read32(chip->ctx, addr);
+ if ((reg & PCAPEXT_SR_SUPPORTED_MASK) == 0)
+ return false;
+
+ addr = CORE_CC_REG(base, retention_ctl);
+ reg = chip->ops->read32(chip->ctx, addr);
+ return (reg & (PMU_RCTL_MACPHY_DISABLE_MASK |
+ PMU_RCTL_LOGIC_DISABLE_MASK)) == 0;
+ }
+}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/chip.h b/drivers/net/wireless/brcm80211/brcmfmac/chip.h
new file mode 100644
index 0000000..c32908d
--- /dev/null
+++ b/drivers/net/wireless/brcm80211/brcmfmac/chip.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (c) 2014 Broadcom Corporation
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
+ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
+ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+#ifndef BRCMF_CHIP_H
+#define BRCMF_CHIP_H
+
+#include <linux/types.h>
+
+#define CORE_CC_REG(base, field) \
+ (base + offsetof(struct chipcregs, field))
+
+/**
+ * struct brcmf_chip - chip level information.
+ *
+ * @chip: chip identifier.
+ * @chiprev: chip revision.
+ * @cc_caps: chipcommon core capabilities.
+ * @pmucaps: PMU capabilities.
+ * @pmurev: PMU revision.
+ * @rambase: RAM base address (only applicable for ARM CR4 chips).
+ * @ramsize: amount of RAM on chip.
+ * @name: string representation of the chip identifier.
+ */
+struct brcmf_chip {
+ u32 chip;
+ u32 chiprev;
+ u32 cc_caps;
+ u32 pmucaps;
+ u32 pmurev;
+ u32 rambase;
+ u32 ramsize;
+ char name[8];
+};
+
+/**
+ * struct brcmf_core - core related information.
+ *
+ * @id: core identifier.
+ * @rev: core revision.
+ * @base: base address of core register space.
+ */
+struct brcmf_core {
+ u16 id;
+ u16 rev;
+ u32 base;
+};
+
+/**
+ * struct brcmf_buscore_ops - buscore specific callbacks.
+ *
+ * @read32: read 32-bit value over bus.
+ * @write32: write 32-bit value over bus.
+ * @prepare: prepare bus for core configuration.
+ * @setup: bus-specific core setup.
+ * @exit_dl: exit download state.
+ * The callback should use the provided @rstvec when non-zero.
+ */
+struct brcmf_buscore_ops {
+ u32 (*read32)(void *ctx, u32 addr);
+ void (*write32)(void *ctx, u32 addr, u32 value);
+ int (*prepare)(void *ctx);
+ int (*setup)(void *ctx, struct brcmf_chip *chip);
+ void (*exit_dl)(void *ctx, struct brcmf_chip *chip, u32 rstvec);
+};
+
+struct brcmf_chip *brcmf_chip_attach(void *ctx,
+ const struct brcmf_buscore_ops *ops);
+void brcmf_chip_detach(struct brcmf_chip *chip);
+struct brcmf_core *brcmf_chip_get_core(struct brcmf_chip *chip, u16 coreid);
+struct brcmf_core *brcmf_chip_get_chipcommon(struct brcmf_chip *chip);
+bool brcmf_chip_iscoreup(struct brcmf_core *core);
+void brcmf_chip_coredisable(struct brcmf_core *core, u32 prereset, u32 reset);
+void brcmf_chip_resetcore(struct brcmf_core *core, u32 prereset, u32 reset,
+ u32 postreset);
+void brcmf_chip_enter_download(struct brcmf_chip *ci);
+bool brcmf_chip_exit_download(struct brcmf_chip *ci, u32 rstvec);
+bool brcmf_chip_sr_capable(struct brcmf_chip *pub);
+
+#endif /* BRCMF_AXIDMP_H */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index 119ee6e..49d4196 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -23,6 +23,7 @@
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/mmc/sdio.h>
+#include <linux/mmc/sdio_ids.h>
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/card.h>
#include <linux/semaphore.h>
@@ -40,7 +41,7 @@
#include <brcm_hw_ids.h>
#include <soc.h>
#include "sdio_host.h"
-#include "sdio_chip.h"
+#include "chip.h"
#include "nvram.h"
#define DCMD_RESP_TIMEOUT 2000 /* In milli second */
@@ -156,6 +157,33 @@ struct rte_console {
/* manfid tuple length, include tuple, link bytes */
#define SBSDIO_CIS_MANFID_TUPLE_LEN 6
+#define CORE_BUS_REG(base, field) \
+ (base + offsetof(struct sdpcmd_regs, field))
+
+/* SDIO function 1 register CHIPCLKCSR */
+/* Force ALP request to backplane */
+#define SBSDIO_FORCE_ALP 0x01
+/* Force HT request to backplane */
+#define SBSDIO_FORCE_HT 0x02
+/* Force ILP request to backplane */
+#define SBSDIO_FORCE_ILP 0x04
+/* Make ALP ready (power up xtal) */
+#define SBSDIO_ALP_AVAIL_REQ 0x08
+/* Make HT ready (power up PLL) */
+#define SBSDIO_HT_AVAIL_REQ 0x10
+/* Squelch clock requests from HW */
+#define SBSDIO_FORCE_HW_CLKREQ_OFF 0x20
+/* Status: ALP is ready */
+#define SBSDIO_ALP_AVAIL 0x40
+/* Status: HT is ready */
+#define SBSDIO_HT_AVAIL 0x80
+#define SBSDIO_AVBITS (SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL)
+#define SBSDIO_ALPAV(regval) ((regval) & SBSDIO_AVBITS)
+#define SBSDIO_HTAV(regval) (((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS)
+#define SBSDIO_ALPONLY(regval) (SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval))
+#define SBSDIO_CLKAV(regval, alponly) \
+ (SBSDIO_ALPAV(regval) && (alponly ? 1 : SBSDIO_HTAV(regval)))
+
/* intstatus */
#define I_SMB_SW0 (1 << 0) /* To SB Mail S/W interrupt 0 */
#define I_SMB_SW1 (1 << 1) /* To SB Mail S/W interrupt 1 */
@@ -493,6 +521,52 @@ enum brcmf_sdio_frmtype {
BRCMF_SDIO_FT_SUB,
};
+#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu))
+
+/* SDIO Pad drive strength to select value mappings */
+struct sdiod_drive_str {
+ u8 strength; /* Pad Drive Strength in mA */
+ u8 sel; /* Chip-specific select value */
+};
+
+/* SDIO Drive Strength to sel value table for PMU Rev 11 (1.8V) */
+static const struct sdiod_drive_str sdiod_drvstr_tab1_1v8[] = {
+ {32, 0x6},
+ {26, 0x7},
+ {22, 0x4},
+ {16, 0x5},
+ {12, 0x2},
+ {8, 0x3},
+ {4, 0x0},
+ {0, 0x1}
+};
+
+/* SDIO Drive Strength to sel value table for PMU Rev 13 (1.8v) */
+static const struct sdiod_drive_str sdiod_drive_strength_tab5_1v8[] = {
+ {6, 0x7},
+ {5, 0x6},
+ {4, 0x5},
+ {3, 0x4},
+ {2, 0x2},
+ {1, 0x1},
+ {0, 0x0}
+};
+
+/* SDIO Drive Strength to sel value table for PMU Rev 17 (1.8v) */
+static const struct sdiod_drive_str sdiod_drvstr_tab6_1v8[] = {
+ {3, 0x3},
+ {2, 0x2},
+ {1, 0x1},
+ {0, 0x0} };
+
+/* SDIO Drive Strength to sel value table for 43143 PMU Rev 17 (3.3V) */
+static const struct sdiod_drive_str sdiod_drvstr_tab2_3v3[] = {
+ {16, 0x7},
+ {12, 0x5},
+ {8, 0x3},
+ {4, 0x1}
+};
+
#define BCM43143_FIRMWARE_NAME "brcm/brcmfmac43143-sdio.bin"
#define BCM43143_NVRAM_NAME "brcm/brcmfmac43143-sdio.txt"
#define BCM43241B0_FIRMWARE_NAME "brcm/brcmfmac43241b0-sdio.bin"
@@ -618,27 +692,24 @@ static bool data_ok(struct brcmf_sdio *bus)
* Reads a register in the SDIO hardware block. This block occupies a series of
* adresses on the 32 bit backplane bus.
*/
-static int
-r_sdreg32(struct brcmf_sdio *bus, u32 *regvar, u32 offset)
+static int r_sdreg32(struct brcmf_sdio *bus, u32 *regvar, u32 offset)
{
- u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
+ struct brcmf_core *core;
int ret;
- *regvar = brcmf_sdiod_regrl(bus->sdiodev,
- bus->ci->c_inf[idx].base + offset, &ret);
+ core = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
+ *regvar = brcmf_sdiod_regrl(bus->sdiodev, core->base + offset, &ret);
return ret;
}
-static int
-w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset)
+static int w_sdreg32(struct brcmf_sdio *bus, u32 regval, u32 reg_offset)
{
- u8 idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
+ struct brcmf_core *core;
int ret;
- brcmf_sdiod_regwl(bus->sdiodev,
- bus->ci->c_inf[idx].base + reg_offset,
- regval, &ret);
+ core = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
+ brcmf_sdiod_regwl(bus->sdiodev, core->base + reg_offset, regval, &ret);
return ret;
}
@@ -899,8 +970,8 @@ static int
brcmf_sdio_bus_sleep(struct brcmf_sdio *bus, bool sleep, bool pendok)
{
int err = 0;
- brcmf_dbg(TRACE, "Enter\n");
- brcmf_dbg(SDIO, "request %s currently %s\n",
+
+ brcmf_dbg(SDIO, "Enter: request %s currently %s\n",
(sleep ? "SLEEP" : "WAKE"),
(bus->sleeping ? "SLEEP" : "WAKE"));
@@ -952,6 +1023,86 @@ end:
}
+#ifdef DEBUG
+static inline bool brcmf_sdio_valid_shared_address(u32 addr)
+{
+ return !(addr == 0 || ((~addr >> 16) & 0xffff) == (addr & 0xffff));
+}
+
+static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
+ struct sdpcm_shared *sh)
+{
+ u32 addr;
+ int rv;
+ u32 shaddr = 0;
+ struct sdpcm_shared_le sh_le;
+ __le32 addr_le;
+
+ shaddr = bus->ci->rambase + bus->ramsize - 4;
+
+ /*
+ * Read last word in socram to determine
+ * address of sdpcm_shared structure
+ */
+ sdio_claim_host(bus->sdiodev->func[1]);
+ brcmf_sdio_bus_sleep(bus, false, false);
+ rv = brcmf_sdiod_ramrw(bus->sdiodev, false, shaddr, (u8 *)&addr_le, 4);
+ sdio_release_host(bus->sdiodev->func[1]);
+ if (rv < 0)
+ return rv;
+
+ addr = le32_to_cpu(addr_le);
+
+ brcmf_dbg(SDIO, "sdpcm_shared address 0x%08X\n", addr);
+
+ /*
+ * Check if addr is valid.
+ * NVRAM length at the end of memory should have been overwritten.
+ */
+ if (!brcmf_sdio_valid_shared_address(addr)) {
+ brcmf_err("invalid sdpcm_shared address 0x%08X\n",
+ addr);
+ return -EINVAL;
+ }
+
+ /* Read hndrte_shared structure */
+ rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, (u8 *)&sh_le,
+ sizeof(struct sdpcm_shared_le));
+ if (rv < 0)
+ return rv;
+
+ /* Endianness */
+ sh->flags = le32_to_cpu(sh_le.flags);
+ sh->trap_addr = le32_to_cpu(sh_le.trap_addr);
+ sh->assert_exp_addr = le32_to_cpu(sh_le.assert_exp_addr);
+ sh->assert_file_addr = le32_to_cpu(sh_le.assert_file_addr);
+ sh->assert_line = le32_to_cpu(sh_le.assert_line);
+ sh->console_addr = le32_to_cpu(sh_le.console_addr);
+ sh->msgtrace_addr = le32_to_cpu(sh_le.msgtrace_addr);
+
+ if ((sh->flags & SDPCM_SHARED_VERSION_MASK) > SDPCM_SHARED_VERSION) {
+ brcmf_err("sdpcm shared version unsupported: dhd %d dongle %d\n",
+ SDPCM_SHARED_VERSION,
+ sh->flags & SDPCM_SHARED_VERSION_MASK);
+ return -EPROTO;
+ }
+
+ return 0;
+}
+
+static void brcmf_sdio_get_console_addr(struct brcmf_sdio *bus)
+{
+ struct sdpcm_shared sh;
+
+ if (brcmf_sdio_readshared(bus, &sh) == 0)
+ bus->console_addr = sh.console_addr;
+}
+#else
+static void brcmf_sdio_get_console_addr(struct brcmf_sdio *bus)
+{
+}
+#endif /* DEBUG */
+
static u32 brcmf_sdio_hostmail(struct brcmf_sdio *bus)
{
u32 intstatus = 0;
@@ -995,6 +1146,12 @@ static u32 brcmf_sdio_hostmail(struct brcmf_sdio *bus)
else
brcmf_dbg(SDIO, "Dongle ready, protocol version %d\n",
bus->sdpcm_ver);
+
+ /*
+ * Retrieve console state address now that firmware should have
+ * updated it.
+ */
+ brcmf_sdio_get_console_addr(bus);
}
/*
@@ -2292,14 +2449,13 @@ static inline void brcmf_sdio_clrintr(struct brcmf_sdio *bus)
static int brcmf_sdio_intr_rstatus(struct brcmf_sdio *bus)
{
- u8 idx;
+ struct brcmf_core *buscore;
u32 addr;
unsigned long val;
int n, ret;
- idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
- addr = bus->ci->c_inf[idx].base +
- offsetof(struct sdpcmd_regs, intstatus);
+ buscore = brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV);
+ addr = buscore->base + offsetof(struct sdpcmd_regs, intstatus);
val = brcmf_sdiod_regrl(bus->sdiodev, addr, &ret);
bus->sdcnt.f1regdata++;
@@ -2809,72 +2965,6 @@ brcmf_sdio_bus_txctl(struct device *dev, unsigned char *msg, uint msglen)
}
#ifdef DEBUG
-static inline bool brcmf_sdio_valid_shared_address(u32 addr)
-{
- return !(addr == 0 || ((~addr >> 16) & 0xffff) == (addr & 0xffff));
-}
-
-static int brcmf_sdio_readshared(struct brcmf_sdio *bus,
- struct sdpcm_shared *sh)
-{
- u32 addr;
- int rv;
- u32 shaddr = 0;
- struct sdpcm_shared_le sh_le;
- __le32 addr_le;
-
- shaddr = bus->ci->rambase + bus->ramsize - 4;
-
- /*
- * Read last word in socram to determine
- * address of sdpcm_shared structure
- */
- sdio_claim_host(bus->sdiodev->func[1]);
- brcmf_sdio_bus_sleep(bus, false, false);
- rv = brcmf_sdiod_ramrw(bus->sdiodev, false, shaddr, (u8 *)&addr_le, 4);
- sdio_release_host(bus->sdiodev->func[1]);
- if (rv < 0)
- return rv;
-
- addr = le32_to_cpu(addr_le);
-
- brcmf_dbg(SDIO, "sdpcm_shared address 0x%08X\n", addr);
-
- /*
- * Check if addr is valid.
- * NVRAM length at the end of memory should have been overwritten.
- */
- if (!brcmf_sdio_valid_shared_address(addr)) {
- brcmf_err("invalid sdpcm_shared address 0x%08X\n",
- addr);
- return -EINVAL;
- }
-
- /* Read hndrte_shared structure */
- rv = brcmf_sdiod_ramrw(bus->sdiodev, false, addr, (u8 *)&sh_le,
- sizeof(struct sdpcm_shared_le));
- if (rv < 0)
- return rv;
-
- /* Endianness */
- sh->flags = le32_to_cpu(sh_le.flags);
- sh->trap_addr = le32_to_cpu(sh_le.trap_addr);
- sh->assert_exp_addr = le32_to_cpu(sh_le.assert_exp_addr);
- sh->assert_file_addr = le32_to_cpu(sh_le.assert_file_addr);
- sh->assert_line = le32_to_cpu(sh_le.assert_line);
- sh->console_addr = le32_to_cpu(sh_le.console_addr);
- sh->msgtrace_addr = le32_to_cpu(sh_le.msgtrace_addr);
-
- if ((sh->flags & SDPCM_SHARED_VERSION_MASK) > SDPCM_SHARED_VERSION) {
- brcmf_err("sdpcm shared version unsupported: dhd %d dongle %d\n",
- SDPCM_SHARED_VERSION,
- sh->flags & SDPCM_SHARED_VERSION_MASK);
- return -EPROTO;
- }
-
- return 0;
-}
-
static int brcmf_sdio_dump_console(struct brcmf_sdio *bus,
struct sdpcm_shared *sh, char __user *data,
size_t count)
@@ -3104,6 +3194,8 @@ static void brcmf_sdio_debugfs_create(struct brcmf_sdio *bus)
debugfs_create_file("forensics", S_IRUGO, dentry, bus,
&brcmf_sdio_forensic_ops);
brcmf_debugfs_create_sdio_count(drvr, &bus->sdcnt);
+ debugfs_create_u32("console_interval", 0644, dentry,
+ &bus->console_interval);
}
#else
static int brcmf_sdio_checkdied(struct brcmf_sdio *bus)
@@ -3222,32 +3314,17 @@ static int brcmf_sdio_download_code_file(struct brcmf_sdio *bus,
const struct firmware *fw)
{
int err;
- int offset;
- int address;
- int len;
brcmf_dbg(TRACE, "Enter\n");
- err = 0;
- offset = 0;
- address = bus->ci->rambase;
- while (offset < fw->size) {
- len = ((offset + MEMBLOCK) < fw->size) ? MEMBLOCK :
- fw->size - offset;
- err = brcmf_sdiod_ramrw(bus->sdiodev, true, address,
- (u8 *)&fw->data[offset], len);
- if (err) {
- brcmf_err("error %d on writing %d membytes at 0x%08x\n",
- err, len, address);
- return err;
- }
- offset += len;
- address += len;
- }
- if (!err)
- if (!brcmf_sdio_verifymemory(bus->sdiodev, bus->ci->rambase,
- (u8 *)fw->data, fw->size))
- err = -EIO;
+ err = brcmf_sdiod_ramrw(bus->sdiodev, true, bus->ci->rambase,
+ (u8 *)fw->data, fw->size);
+ if (err)
+ brcmf_err("error %d on writing %d membytes at 0x%08x\n",
+ err, (int)fw->size, bus->ci->rambase);
+ else if (!brcmf_sdio_verifymemory(bus->sdiodev, bus->ci->rambase,
+ (u8 *)fw->data, fw->size))
+ err = -EIO;
return err;
}
@@ -3290,7 +3367,7 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus)
brcmf_sdio_clkctl(bus, CLK_AVAIL, false);
/* Keep arm in reset */
- brcmf_sdio_chip_enter_download(bus->sdiodev, bus->ci);
+ brcmf_chip_enter_download(bus->ci);
fw = brcmf_sdio_get_fw(bus, BRCMF_FIRMWARE_BIN);
if (fw == NULL) {
@@ -3322,7 +3399,7 @@ static int brcmf_sdio_download_firmware(struct brcmf_sdio *bus)
}
/* Take arm out of reset */
- if (!brcmf_sdio_chip_exit_download(bus->sdiodev, bus->ci, rstvec)) {
+ if (!brcmf_chip_exit_download(bus->ci, rstvec)) {
brcmf_err("error getting out of ARM core reset\n");
goto err;
}
@@ -3337,40 +3414,6 @@ err:
return bcmerror;
}
-static bool brcmf_sdio_sr_capable(struct brcmf_sdio *bus)
-{
- u32 addr, reg, pmu_cc3_mask = ~0;
- int err;
-
- brcmf_dbg(TRACE, "Enter\n");
-
- /* old chips with PMU version less than 17 don't support save restore */
- if (bus->ci->pmurev < 17)
- return false;
-
- switch (bus->ci->chip) {
- case BCM43241_CHIP_ID:
- case BCM4335_CHIP_ID:
- case BCM4339_CHIP_ID:
- /* read PMU chipcontrol register 3 */
- addr = CORE_CC_REG(bus->ci->c_inf[0].base, chipcontrol_addr);
- brcmf_sdiod_regwl(bus->sdiodev, addr, 3, NULL);
- addr = CORE_CC_REG(bus->ci->c_inf[0].base, chipcontrol_data);
- reg = brcmf_sdiod_regrl(bus->sdiodev, addr, NULL);
- return (reg & pmu_cc3_mask) != 0;
- default:
- addr = CORE_CC_REG(bus->ci->c_inf[0].base, pmucapabilities_ext);
- reg = brcmf_sdiod_regrl(bus->sdiodev, addr, &err);
- if ((reg & PCAPEXT_SR_SUPPORTED_MASK) == 0)
- return false;
-
- addr = CORE_CC_REG(bus->ci->c_inf[0].base, retention_ctl);
- reg = brcmf_sdiod_regrl(bus->sdiodev, addr, NULL);
- return (reg & (PMU_RCTL_MACPHY_DISABLE_MASK |
- PMU_RCTL_LOGIC_DISABLE_MASK)) == 0;
- }
-}
-
static void brcmf_sdio_sr_init(struct brcmf_sdio *bus)
{
int err = 0;
@@ -3422,7 +3465,7 @@ static int brcmf_sdio_kso_init(struct brcmf_sdio *bus)
brcmf_dbg(TRACE, "Enter\n");
/* KSO bit added in SDIO core rev 12 */
- if (bus->ci->c_inf[1].rev < 12)
+ if (brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV)->rev < 12)
return 0;
val = brcmf_sdiod_regrb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, &err);
@@ -3453,15 +3496,13 @@ static int brcmf_sdio_bus_preinit(struct device *dev)
struct brcmf_sdio *bus = sdiodev->bus;
uint pad_size;
u32 value;
- u8 idx;
int err;
/* the commands below use the terms tx and rx from
* a device perspective, ie. bus:txglom affects the
* bus transfers from device to host.
*/
- idx = brcmf_sdio_chip_getinfidx(bus->ci, BCMA_CORE_SDIO_DEV);
- if (bus->ci->c_inf[idx].rev < 12) {
+ if (brcmf_chip_get_core(bus->ci, BCMA_CORE_SDIO_DEV)->rev < 12) {
/* for sdio core rev < 12, disable txgloming */
value = 0;
err = brcmf_iovar_data_set(dev, "bus:txglom", &value,
@@ -3568,7 +3609,7 @@ static int brcmf_sdio_bus_init(struct device *dev)
ret = -ENODEV;
}
- if (brcmf_sdio_sr_capable(bus)) {
+ if (brcmf_chip_sr_capable(bus->ci)) {
brcmf_sdio_sr_init(bus);
} else {
/* Restore previous clock setting */
@@ -3717,6 +3758,170 @@ static void brcmf_sdio_dataworker(struct work_struct *work)
}
}
+static void
+brcmf_sdio_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
+ struct brcmf_chip *ci, u32 drivestrength)
+{
+ const struct sdiod_drive_str *str_tab = NULL;
+ u32 str_mask;
+ u32 str_shift;
+ u32 base;
+ u32 i;
+ u32 drivestrength_sel = 0;
+ u32 cc_data_temp;
+ u32 addr;
+
+ if (!(ci->cc_caps & CC_CAP_PMU))
+ return;
+
+ switch (SDIOD_DRVSTR_KEY(ci->chip, ci->pmurev)) {
+ case SDIOD_DRVSTR_KEY(BCM4330_CHIP_ID, 12):
+ str_tab = sdiod_drvstr_tab1_1v8;
+ str_mask = 0x00003800;
+ str_shift = 11;
+ break;
+ case SDIOD_DRVSTR_KEY(BCM4334_CHIP_ID, 17):
+ str_tab = sdiod_drvstr_tab6_1v8;
+ str_mask = 0x00001800;
+ str_shift = 11;
+ break;
+ case SDIOD_DRVSTR_KEY(BCM43143_CHIP_ID, 17):
+ /* note: 43143 does not support tristate */
+ i = ARRAY_SIZE(sdiod_drvstr_tab2_3v3) - 1;
+ if (drivestrength >= sdiod_drvstr_tab2_3v3[i].strength) {
+ str_tab = sdiod_drvstr_tab2_3v3;
+ str_mask = 0x00000007;
+ str_shift = 0;
+ } else
+ brcmf_err("Invalid SDIO Drive strength for chip %s, strength=%d\n",
+ ci->name, drivestrength);
+ break;
+ case SDIOD_DRVSTR_KEY(BCM43362_CHIP_ID, 13):
+ str_tab = sdiod_drive_strength_tab5_1v8;
+ str_mask = 0x00003800;
+ str_shift = 11;
+ break;
+ default:
+ brcmf_err("No SDIO Drive strength init done for chip %s rev %d pmurev %d\n",
+ ci->name, ci->chiprev, ci->pmurev);
+ break;
+ }
+
+ if (str_tab != NULL) {
+ for (i = 0; str_tab[i].strength != 0; i++) {
+ if (drivestrength >= str_tab[i].strength) {
+ drivestrength_sel = str_tab[i].sel;
+ break;
+ }
+ }
+ base = brcmf_chip_get_chipcommon(ci)->base;
+ addr = CORE_CC_REG(base, chipcontrol_addr);
+ brcmf_sdiod_regwl(sdiodev, addr, 1, NULL);
+ cc_data_temp = brcmf_sdiod_regrl(sdiodev, addr, NULL);
+ cc_data_temp &= ~str_mask;
+ drivestrength_sel <<= str_shift;
+ cc_data_temp |= drivestrength_sel;
+ brcmf_sdiod_regwl(sdiodev, addr, cc_data_temp, NULL);
+
+ brcmf_dbg(INFO, "SDIO: %d mA (req=%d mA) drive strength selected, set to 0x%08x\n",
+ str_tab[i].strength, drivestrength, cc_data_temp);
+ }
+}
+
+static int brcmf_sdio_buscoreprep(void *ctx)
+{
+ struct brcmf_sdio_dev *sdiodev = ctx;
+ int err = 0;
+ u8 clkval, clkset;
+
+ /* Try forcing SDIO core to do ALPAvail request only */
+ clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
+ brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
+ if (err) {
+ brcmf_err("error writing for HT off\n");
+ return err;
+ }
+
+ /* If register supported, wait for ALPAvail and then force ALP */
+ /* This may take up to 15 milliseconds */
+ clkval = brcmf_sdiod_regrb(sdiodev,
+ SBSDIO_FUNC1_CHIPCLKCSR, NULL);
+
+ if ((clkval & ~SBSDIO_AVBITS) != clkset) {
+ brcmf_err("ChipClkCSR access: wrote 0x%02x read 0x%02x\n",
+ clkset, clkval);
+ return -EACCES;
+ }
+
+ SPINWAIT(((clkval = brcmf_sdiod_regrb(sdiodev,
+ SBSDIO_FUNC1_CHIPCLKCSR, NULL)),
+ !SBSDIO_ALPAV(clkval)),
+ PMU_MAX_TRANSITION_DLY);
+ if (!SBSDIO_ALPAV(clkval)) {
+ brcmf_err("timeout on ALPAV wait, clkval 0x%02x\n",
+ clkval);
+ return -EBUSY;
+ }
+
+ clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP;
+ brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
+ udelay(65);
+
+ /* Also, disable the extra SDIO pull-ups */
+ brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
+
+ return 0;
+}
+
+static void brcmf_sdio_buscore_exitdl(void *ctx, struct brcmf_chip *chip,
+ u32 rstvec)
+{
+ struct brcmf_sdio_dev *sdiodev = ctx;
+ struct brcmf_core *core;
+ u32 reg_addr;
+
+ /* clear all interrupts */
+ core = brcmf_chip_get_core(chip, BCMA_CORE_SDIO_DEV);
+ reg_addr = core->base + offsetof(struct sdpcmd_regs, intstatus);
+ brcmf_sdiod_regwl(sdiodev, reg_addr, 0xFFFFFFFF, NULL);
+
+ if (rstvec)
+ /* Write reset vector to address 0 */
+ brcmf_sdiod_ramrw(sdiodev, true, 0, (void *)&rstvec,
+ sizeof(rstvec));
+}
+
+static u32 brcmf_sdio_buscore_read32(void *ctx, u32 addr)
+{
+ struct brcmf_sdio_dev *sdiodev = ctx;
+ u32 val, rev;
+
+ val = brcmf_sdiod_regrl(sdiodev, addr, NULL);
+ if (sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4335_4339 &&
+ addr == CORE_CC_REG(SI_ENUM_BASE, chipid)) {
+ rev = (val & CID_REV_MASK) >> CID_REV_SHIFT;
+ if (rev >= 2) {
+ val &= ~CID_ID_MASK;
+ val |= BCM4339_CHIP_ID;
+ }
+ }
+ return val;
+}
+
+static void brcmf_sdio_buscore_write32(void *ctx, u32 addr, u32 val)
+{
+ struct brcmf_sdio_dev *sdiodev = ctx;
+
+ brcmf_sdiod_regwl(sdiodev, addr, val, NULL);
+}
+
+static const struct brcmf_buscore_ops brcmf_sdio_buscore_ops = {
+ .prepare = brcmf_sdio_buscoreprep,
+ .exit_dl = brcmf_sdio_buscore_exitdl,
+ .read32 = brcmf_sdio_buscore_read32,
+ .write32 = brcmf_sdio_buscore_write32,
+};
+
static bool
brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
{
@@ -3732,7 +3937,7 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
brcmf_sdiod_regrl(bus->sdiodev, SI_ENUM_BASE, NULL));
/*
- * Force PLL off until brcmf_sdio_chip_attach()
+ * Force PLL off until brcmf_chip_attach()
* programs PLL control regs
*/
@@ -3753,8 +3958,10 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
*/
brcmf_bus_change_state(bus->sdiodev->bus_if, BRCMF_BUS_DOWN);
- if (brcmf_sdio_chip_attach(bus->sdiodev, &bus->ci)) {
- brcmf_err("brcmf_sdio_chip_attach failed!\n");
+ bus->ci = brcmf_chip_attach(bus->sdiodev, &brcmf_sdio_buscore_ops);
+ if (IS_ERR(bus->ci)) {
+ brcmf_err("brcmf_chip_attach failed!\n");
+ bus->ci = NULL;
goto fail;
}
@@ -3767,7 +3974,7 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
drivestrength = bus->sdiodev->pdata->drive_strength;
else
drivestrength = DEFAULT_SDIO_DRIVE_STRENGTH;
- brcmf_sdio_chip_drivestrengthinit(bus->sdiodev, bus->ci, drivestrength);
+ brcmf_sdio_drivestrengthinit(bus->sdiodev, bus->ci, drivestrength);
/* Get info on the SOCRAM cores... */
bus->ramsize = bus->ci->ramsize;
@@ -3790,24 +3997,18 @@ brcmf_sdio_probe_attach(struct brcmf_sdio *bus)
goto fail;
/* set PMUControl so a backplane reset does PMU state reload */
- reg_addr = CORE_CC_REG(bus->ci->c_inf[0].base,
+ reg_addr = CORE_CC_REG(brcmf_chip_get_chipcommon(bus->ci)->base,
pmucontrol);
- reg_val = brcmf_sdiod_regrl(bus->sdiodev,
- reg_addr,
- &err);
+ reg_val = brcmf_sdiod_regrl(bus->sdiodev, reg_addr, &err);
if (err)
goto fail;
reg_val |= (BCMA_CC_PMU_CTL_RES_RELOAD << BCMA_CC_PMU_CTL_RES_SHIFT);
- brcmf_sdiod_regwl(bus->sdiodev,
- reg_addr,
- reg_val,
- &err);
+ brcmf_sdiod_regwl(bus->sdiodev, reg_addr, reg_val, &err);
if (err)
goto fail;
-
sdio_release_host(bus->sdiodev->func[1]);
brcmu_pktq_init(&bus->txq, (PRIOMASK + 1), TXQLEN);
@@ -4022,14 +4223,14 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus)
/* De-register interrupt handler */
brcmf_sdiod_intr_unregister(bus->sdiodev);
- cancel_work_sync(&bus->datawork);
- if (bus->brcmf_wq)
- destroy_workqueue(bus->brcmf_wq);
-
if (bus->sdiodev->bus_if->drvr) {
brcmf_detach(bus->sdiodev->dev);
}
+ cancel_work_sync(&bus->datawork);
+ if (bus->brcmf_wq)
+ destroy_workqueue(bus->brcmf_wq);
+
if (bus->ci) {
if (bus->sdiodev->bus_if->state == BRCMF_BUS_DOWN) {
sdio_claim_host(bus->sdiodev->func[1]);
@@ -4040,12 +4241,11 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus)
* all necessary cores.
*/
msleep(20);
- brcmf_sdio_chip_enter_download(bus->sdiodev,
- bus->ci);
+ brcmf_chip_enter_download(bus->ci);
brcmf_sdio_clkctl(bus, CLK_NONE, false);
sdio_release_host(bus->sdiodev->func[1]);
}
- brcmf_sdio_chip_detach(&bus->ci);
+ brcmf_chip_detach(bus->ci);
}
kfree(bus->rxbuf);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil.c b/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
index 22adbe3..59a5af5 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil.c
@@ -124,7 +124,8 @@ brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data)
}
static u32
-brcmf_create_iovar(char *name, char *data, u32 datalen, char *buf, u32 buflen)
+brcmf_create_iovar(char *name, const char *data, u32 datalen,
+ char *buf, u32 buflen)
{
u32 len;
@@ -144,7 +145,7 @@ brcmf_create_iovar(char *name, char *data, u32 datalen, char *buf, u32 buflen)
s32
-brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, void *data,
+brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, const void *data,
u32 len)
{
struct brcmf_pub *drvr = ifp->drvr;
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/fwil.h b/drivers/net/wireless/brcm80211/brcmfmac/fwil.h
index 77eae86..a30be68 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/fwil.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/fwil.h
@@ -83,7 +83,7 @@ s32 brcmf_fil_cmd_data_get(struct brcmf_if *ifp, u32 cmd, void *data, u32 len);
s32 brcmf_fil_cmd_int_set(struct brcmf_if *ifp, u32 cmd, u32 data);
s32 brcmf_fil_cmd_int_get(struct brcmf_if *ifp, u32 cmd, u32 *data);
-s32 brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, void *data,
+s32 brcmf_fil_iovar_data_set(struct brcmf_if *ifp, char *name, const void *data,
u32 len);
s32 brcmf_fil_iovar_data_get(struct brcmf_if *ifp, char *name, void *data,
u32 len);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
deleted file mode 100644
index 82bf3c5..0000000
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.c
+++ /dev/null
@@ -1,972 +0,0 @@
-/*
- * Copyright (c) 2011 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-/* ***** SDIO interface chip backplane handle functions ***** */
-
-#include <linux/types.h>
-#include <linux/netdevice.h>
-#include <linux/mmc/card.h>
-#include <linux/mmc/sdio_func.h>
-#include <linux/mmc/sdio_ids.h>
-#include <linux/ssb/ssb_regs.h>
-#include <linux/bcma/bcma.h>
-
-#include <chipcommon.h>
-#include <brcm_hw_ids.h>
-#include <brcmu_wifi.h>
-#include <brcmu_utils.h>
-#include <soc.h>
-#include "dhd_dbg.h"
-#include "sdio_host.h"
-#include "sdio_chip.h"
-
-/* chip core base & ramsize */
-/* bcm4329 */
-/* SDIO device core, ID 0x829 */
-#define BCM4329_CORE_BUS_BASE 0x18011000
-/* internal memory core, ID 0x80e */
-#define BCM4329_CORE_SOCRAM_BASE 0x18003000
-/* ARM Cortex M3 core, ID 0x82a */
-#define BCM4329_CORE_ARM_BASE 0x18002000
-#define BCM4329_RAMSIZE 0x48000
-
-/* bcm43143 */
-/* SDIO device core */
-#define BCM43143_CORE_BUS_BASE 0x18002000
-/* internal memory core */
-#define BCM43143_CORE_SOCRAM_BASE 0x18004000
-/* ARM Cortex M3 core, ID 0x82a */
-#define BCM43143_CORE_ARM_BASE 0x18003000
-#define BCM43143_RAMSIZE 0x70000
-
-/* All D11 cores, ID 0x812 */
-#define BCM43xx_CORE_D11_BASE 0x18001000
-
-#define SBCOREREV(sbidh) \
- ((((sbidh) & SSB_IDHIGH_RCHI) >> SSB_IDHIGH_RCHI_SHIFT) | \
- ((sbidh) & SSB_IDHIGH_RCLO))
-
-/* SOC Interconnect types (aka chip types) */
-#define SOCI_SB 0
-#define SOCI_AI 1
-
-/* EROM CompIdentB */
-#define CIB_REV_MASK 0xff000000
-#define CIB_REV_SHIFT 24
-
-/* ARM CR4 core specific control flag bits */
-#define ARMCR4_BCMA_IOCTL_CPUHALT 0x0020
-
-/* D11 core specific control flag bits */
-#define D11_BCMA_IOCTL_PHYCLOCKEN 0x0004
-#define D11_BCMA_IOCTL_PHYRESET 0x0008
-
-#define SDIOD_DRVSTR_KEY(chip, pmu) (((chip) << 16) | (pmu))
-/* SDIO Pad drive strength to select value mappings */
-struct sdiod_drive_str {
- u8 strength; /* Pad Drive Strength in mA */
- u8 sel; /* Chip-specific select value */
-};
-/* SDIO Drive Strength to sel value table for PMU Rev 11 (1.8V) */
-static const struct sdiod_drive_str sdiod_drvstr_tab1_1v8[] = {
- {32, 0x6},
- {26, 0x7},
- {22, 0x4},
- {16, 0x5},
- {12, 0x2},
- {8, 0x3},
- {4, 0x0},
- {0, 0x1}
-};
-
-/* SDIO Drive Strength to sel value table for PMU Rev 13 (1.8v) */
-static const struct sdiod_drive_str sdiod_drive_strength_tab5_1v8[] = {
- {6, 0x7},
- {5, 0x6},
- {4, 0x5},
- {3, 0x4},
- {2, 0x2},
- {1, 0x1},
- {0, 0x0}
-};
-
-/* SDIO Drive Strength to sel value table for PMU Rev 17 (1.8v) */
-static const struct sdiod_drive_str sdiod_drvstr_tab6_1v8[] = {
- {3, 0x3},
- {2, 0x2},
- {1, 0x1},
- {0, 0x0} };
-
-/* SDIO Drive Strength to sel value table for 43143 PMU Rev 17 (3.3V) */
-static const struct sdiod_drive_str sdiod_drvstr_tab2_3v3[] = {
- {16, 0x7},
- {12, 0x5},
- {8, 0x3},
- {4, 0x1}
-};
-
-u8
-brcmf_sdio_chip_getinfidx(struct brcmf_chip *ci, u16 coreid)
-{
- u8 idx;
-
- for (idx = 0; idx < BRCMF_MAX_CORENUM; idx++)
- if (coreid == ci->c_inf[idx].id)
- return idx;
-
- return BRCMF_MAX_CORENUM;
-}
-
-static u32
-brcmf_sdio_sb_corerev(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci, u16 coreid)
-{
- u32 regdata;
- u8 idx;
-
- idx = brcmf_sdio_chip_getinfidx(ci, coreid);
-
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbidhigh),
- NULL);
- return SBCOREREV(regdata);
-}
-
-static u32
-brcmf_sdio_ai_corerev(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci, u16 coreid)
-{
- u8 idx;
-
- idx = brcmf_sdio_chip_getinfidx(ci, coreid);
-
- return (ci->c_inf[idx].cib & CIB_REV_MASK) >> CIB_REV_SHIFT;
-}
-
-static bool
-brcmf_sdio_sb_iscoreup(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci, u16 coreid)
-{
- u32 regdata;
- u8 idx;
-
- idx = brcmf_sdio_chip_getinfidx(ci, coreid);
- if (idx == BRCMF_MAX_CORENUM)
- return false;
-
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
- NULL);
- regdata &= (SSB_TMSLOW_RESET | SSB_TMSLOW_REJECT |
- SSB_IMSTATE_REJECT | SSB_TMSLOW_CLOCK);
- return (SSB_TMSLOW_CLOCK == regdata);
-}
-
-static bool
-brcmf_sdio_ai_iscoreup(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci, u16 coreid)
-{
- u32 regdata;
- u8 idx;
- bool ret;
-
- idx = brcmf_sdio_chip_getinfidx(ci, coreid);
- if (idx == BRCMF_MAX_CORENUM)
- return false;
-
- regdata = brcmf_sdiod_regrl(sdiodev, ci->c_inf[idx].wrapbase+BCMA_IOCTL,
- NULL);
- ret = (regdata & (BCMA_IOCTL_FGC | BCMA_IOCTL_CLK)) == BCMA_IOCTL_CLK;
-
- regdata = brcmf_sdiod_regrl(sdiodev,
- ci->c_inf[idx].wrapbase+BCMA_RESET_CTL,
- NULL);
- ret = ret && ((regdata & BCMA_RESET_CTL_RESET) == 0);
-
- return ret;
-}
-
-static void
-brcmf_sdio_sb_coredisable(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
- u32 in_resetbits)
-{
- u32 regdata, base;
- u8 idx;
-
- idx = brcmf_sdio_chip_getinfidx(ci, coreid);
- base = ci->c_inf[idx].base;
-
- regdata = brcmf_sdiod_regrl(sdiodev, CORE_SB(base, sbtmstatelow), NULL);
- if (regdata & SSB_TMSLOW_RESET)
- return;
-
- regdata = brcmf_sdiod_regrl(sdiodev, CORE_SB(base, sbtmstatelow), NULL);
- if ((regdata & SSB_TMSLOW_CLOCK) != 0) {
- /*
- * set target reject and spin until busy is clear
- * (preserve core-specific bits)
- */
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_SB(base, sbtmstatelow), NULL);
- brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbtmstatelow),
- regdata | SSB_TMSLOW_REJECT, NULL);
-
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_SB(base, sbtmstatelow), NULL);
- udelay(1);
- SPINWAIT((brcmf_sdiod_regrl(sdiodev,
- CORE_SB(base, sbtmstatehigh),
- NULL) &
- SSB_TMSHIGH_BUSY), 100000);
-
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_SB(base, sbtmstatehigh),
- NULL);
- if (regdata & SSB_TMSHIGH_BUSY)
- brcmf_err("core state still busy\n");
-
- regdata = brcmf_sdiod_regrl(sdiodev, CORE_SB(base, sbidlow),
- NULL);
- if (regdata & SSB_IDLOW_INITIATOR) {
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_SB(base, sbimstate),
- NULL);
- regdata |= SSB_IMSTATE_REJECT;
- brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbimstate),
- regdata, NULL);
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_SB(base, sbimstate),
- NULL);
- udelay(1);
- SPINWAIT((brcmf_sdiod_regrl(sdiodev,
- CORE_SB(base, sbimstate),
- NULL) &
- SSB_IMSTATE_BUSY), 100000);
- }
-
- /* set reset and reject while enabling the clocks */
- regdata = SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK |
- SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET;
- brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbtmstatelow),
- regdata, NULL);
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_SB(base, sbtmstatelow), NULL);
- udelay(10);
-
- /* clear the initiator reject bit */
- regdata = brcmf_sdiod_regrl(sdiodev, CORE_SB(base, sbidlow),
- NULL);
- if (regdata & SSB_IDLOW_INITIATOR) {
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_SB(base, sbimstate),
- NULL);
- regdata &= ~SSB_IMSTATE_REJECT;
- brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbimstate),
- regdata, NULL);
- }
- }
-
- /* leave reset and reject asserted */
- brcmf_sdiod_regwl(sdiodev, CORE_SB(base, sbtmstatelow),
- (SSB_TMSLOW_REJECT | SSB_TMSLOW_RESET), NULL);
- udelay(1);
-}
-
-static void
-brcmf_sdio_ai_coredisable(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
- u32 in_resetbits)
-{
- u8 idx;
- u32 regdata;
- u32 wrapbase;
-
- idx = brcmf_sdio_chip_getinfidx(ci, coreid);
- if (idx == BRCMF_MAX_CORENUM)
- return;
-
- wrapbase = ci->c_inf[idx].wrapbase;
-
- /* if core is already in reset, just return */
- regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_RESET_CTL, NULL);
- if ((regdata & BCMA_RESET_CTL_RESET) != 0)
- return;
-
- /* configure reset */
- brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_IOCTL, pre_resetbits |
- BCMA_IOCTL_FGC | BCMA_IOCTL_CLK, NULL);
- regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_IOCTL, NULL);
-
- /* put in reset */
- brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_RESET_CTL,
- BCMA_RESET_CTL_RESET, NULL);
- usleep_range(10, 20);
-
- /* wait till reset is 1 */
- SPINWAIT(brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_RESET_CTL, NULL) !=
- BCMA_RESET_CTL_RESET, 300);
-
- /* post reset configure */
- brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_IOCTL, pre_resetbits |
- BCMA_IOCTL_FGC | BCMA_IOCTL_CLK, NULL);
- regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_IOCTL, NULL);
-}
-
-static void
-brcmf_sdio_sb_resetcore(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
- u32 in_resetbits, u32 post_resetbits)
-{
- u32 regdata;
- u8 idx;
-
- idx = brcmf_sdio_chip_getinfidx(ci, coreid);
- if (idx == BRCMF_MAX_CORENUM)
- return;
-
- /*
- * Must do the disable sequence first to work for
- * arbitrary current core state.
- */
- brcmf_sdio_sb_coredisable(sdiodev, ci, coreid, pre_resetbits,
- in_resetbits);
-
- /*
- * Now do the initialization sequence.
- * set reset while enabling the clock and
- * forcing them on throughout the core
- */
- brcmf_sdiod_regwl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
- SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK | SSB_TMSLOW_RESET,
- NULL);
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
- NULL);
- udelay(1);
-
- /* clear any serror */
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatehigh),
- NULL);
- if (regdata & SSB_TMSHIGH_SERR)
- brcmf_sdiod_regwl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatehigh),
- 0, NULL);
-
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbimstate),
- NULL);
- if (regdata & (SSB_IMSTATE_IBE | SSB_IMSTATE_TO))
- brcmf_sdiod_regwl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbimstate),
- regdata & ~(SSB_IMSTATE_IBE | SSB_IMSTATE_TO),
- NULL);
-
- /* clear reset and allow it to propagate throughout the core */
- brcmf_sdiod_regwl(sdiodev, CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
- SSB_TMSLOW_FGC | SSB_TMSLOW_CLOCK, NULL);
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
- NULL);
- udelay(1);
-
- /* leave clock enabled */
- brcmf_sdiod_regwl(sdiodev, CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
- SSB_TMSLOW_CLOCK, NULL);
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_SB(ci->c_inf[idx].base, sbtmstatelow),
- NULL);
- udelay(1);
-}
-
-static void
-brcmf_sdio_ai_resetcore(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
- u32 in_resetbits, u32 post_resetbits)
-{
- u8 idx;
- u32 regdata;
- u32 wrapbase;
-
- idx = brcmf_sdio_chip_getinfidx(ci, coreid);
- if (idx == BRCMF_MAX_CORENUM)
- return;
-
- wrapbase = ci->c_inf[idx].wrapbase;
-
- /* must disable first to work for arbitrary current core state */
- brcmf_sdio_ai_coredisable(sdiodev, ci, coreid, pre_resetbits,
- in_resetbits);
-
- while (brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_RESET_CTL, NULL) &
- BCMA_RESET_CTL_RESET) {
- brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_RESET_CTL, 0, NULL);
- usleep_range(40, 60);
- }
-
- brcmf_sdiod_regwl(sdiodev, wrapbase + BCMA_IOCTL, post_resetbits |
- BCMA_IOCTL_CLK, NULL);
- regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_IOCTL, NULL);
-}
-
-#ifdef DEBUG
-/* safety check for chipinfo */
-static int brcmf_sdio_chip_cichk(struct brcmf_chip *ci)
-{
- u8 core_idx;
-
- /* check RAM core presence for ARM CM3 core */
- core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CM3);
- if (BRCMF_MAX_CORENUM != core_idx) {
- core_idx = brcmf_sdio_chip_getinfidx(ci,
- BCMA_CORE_INTERNAL_MEM);
- if (BRCMF_MAX_CORENUM == core_idx) {
- brcmf_err("RAM core not provided with ARM CM3 core\n");
- return -ENODEV;
- }
- }
-
- /* check RAM base for ARM CR4 core */
- core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CR4);
- if (BRCMF_MAX_CORENUM != core_idx) {
- if (ci->rambase == 0) {
- brcmf_err("RAM base not provided with ARM CR4 core\n");
- return -ENOMEM;
- }
- }
-
- return 0;
-}
-#else /* DEBUG */
-static inline int brcmf_sdio_chip_cichk(struct brcmf_chip *ci)
-{
- return 0;
-}
-#endif
-
-static int brcmf_sdio_chip_recognition(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci)
-{
- u32 regdata;
- u32 socitype;
-
- /* Get CC core rev
- * Chipid is assume to be at offset 0 from SI_ENUM_BASE
- * For different chiptypes or old sdio hosts w/o chipcommon,
- * other ways of recognition should be added here.
- */
- regdata = brcmf_sdiod_regrl(sdiodev,
- CORE_CC_REG(SI_ENUM_BASE, chipid),
- NULL);
- ci->chip = regdata & CID_ID_MASK;
- ci->chiprev = (regdata & CID_REV_MASK) >> CID_REV_SHIFT;
- if (sdiodev->func[0]->device == SDIO_DEVICE_ID_BROADCOM_4335_4339 &&
- ci->chiprev >= 2)
- ci->chip = BCM4339_CHIP_ID;
- socitype = (regdata & CID_TYPE_MASK) >> CID_TYPE_SHIFT;
-
- brcmf_dbg(INFO, "found %s chip: id=0x%x, rev=%d\n",
- socitype == SOCI_SB ? "SB" : "AXI", ci->chip, ci->chiprev);
-
- if (socitype == SOCI_SB) {
- if (ci->chip != BCM4329_CHIP_ID) {
- brcmf_err("SB chip is not supported\n");
- return -ENODEV;
- }
- ci->iscoreup = brcmf_sdio_sb_iscoreup;
- ci->corerev = brcmf_sdio_sb_corerev;
- ci->coredisable = brcmf_sdio_sb_coredisable;
- ci->resetcore = brcmf_sdio_sb_resetcore;
-
- ci->c_inf[0].id = BCMA_CORE_CHIPCOMMON;
- ci->c_inf[0].base = SI_ENUM_BASE;
- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
- ci->c_inf[1].base = BCM4329_CORE_BUS_BASE;
- ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
- ci->c_inf[2].base = BCM4329_CORE_SOCRAM_BASE;
- ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
- ci->c_inf[3].base = BCM4329_CORE_ARM_BASE;
- ci->c_inf[4].id = BCMA_CORE_80211;
- ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
- ci->ramsize = BCM4329_RAMSIZE;
- } else if (socitype == SOCI_AI) {
- ci->iscoreup = brcmf_sdio_ai_iscoreup;
- ci->corerev = brcmf_sdio_ai_corerev;
- ci->coredisable = brcmf_sdio_ai_coredisable;
- ci->resetcore = brcmf_sdio_ai_resetcore;
-
- ci->c_inf[0].id = BCMA_CORE_CHIPCOMMON;
- ci->c_inf[0].base = SI_ENUM_BASE;
-
- /* Address of cores for new chips should be added here */
- switch (ci->chip) {
- case BCM43143_CHIP_ID:
- ci->c_inf[0].wrapbase = ci->c_inf[0].base + 0x00100000;
- ci->c_inf[0].cib = 0x2b000000;
- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
- ci->c_inf[1].base = BCM43143_CORE_BUS_BASE;
- ci->c_inf[1].wrapbase = ci->c_inf[1].base + 0x00100000;
- ci->c_inf[1].cib = 0x18000000;
- ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
- ci->c_inf[2].base = BCM43143_CORE_SOCRAM_BASE;
- ci->c_inf[2].wrapbase = ci->c_inf[2].base + 0x00100000;
- ci->c_inf[2].cib = 0x14000000;
- ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
- ci->c_inf[3].base = BCM43143_CORE_ARM_BASE;
- ci->c_inf[3].wrapbase = ci->c_inf[3].base + 0x00100000;
- ci->c_inf[3].cib = 0x07000000;
- ci->c_inf[4].id = BCMA_CORE_80211;
- ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
- ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
- ci->ramsize = BCM43143_RAMSIZE;
- break;
- case BCM43241_CHIP_ID:
- ci->c_inf[0].wrapbase = 0x18100000;
- ci->c_inf[0].cib = 0x2a084411;
- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
- ci->c_inf[1].base = 0x18002000;
- ci->c_inf[1].wrapbase = 0x18102000;
- ci->c_inf[1].cib = 0x0e004211;
- ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
- ci->c_inf[2].base = 0x18004000;
- ci->c_inf[2].wrapbase = 0x18104000;
- ci->c_inf[2].cib = 0x14080401;
- ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
- ci->c_inf[3].base = 0x18003000;
- ci->c_inf[3].wrapbase = 0x18103000;
- ci->c_inf[3].cib = 0x07004211;
- ci->c_inf[4].id = BCMA_CORE_80211;
- ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
- ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
- ci->ramsize = 0x90000;
- break;
- case BCM4330_CHIP_ID:
- ci->c_inf[0].wrapbase = 0x18100000;
- ci->c_inf[0].cib = 0x27004211;
- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
- ci->c_inf[1].base = 0x18002000;
- ci->c_inf[1].wrapbase = 0x18102000;
- ci->c_inf[1].cib = 0x07004211;
- ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
- ci->c_inf[2].base = 0x18004000;
- ci->c_inf[2].wrapbase = 0x18104000;
- ci->c_inf[2].cib = 0x0d080401;
- ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
- ci->c_inf[3].base = 0x18003000;
- ci->c_inf[3].wrapbase = 0x18103000;
- ci->c_inf[3].cib = 0x03004211;
- ci->c_inf[4].id = BCMA_CORE_80211;
- ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
- ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
- ci->ramsize = 0x48000;
- break;
- case BCM4334_CHIP_ID:
- ci->c_inf[0].wrapbase = 0x18100000;
- ci->c_inf[0].cib = 0x29004211;
- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
- ci->c_inf[1].base = 0x18002000;
- ci->c_inf[1].wrapbase = 0x18102000;
- ci->c_inf[1].cib = 0x0d004211;
- ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
- ci->c_inf[2].base = 0x18004000;
- ci->c_inf[2].wrapbase = 0x18104000;
- ci->c_inf[2].cib = 0x13080401;
- ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
- ci->c_inf[3].base = 0x18003000;
- ci->c_inf[3].wrapbase = 0x18103000;
- ci->c_inf[3].cib = 0x07004211;
- ci->c_inf[4].id = BCMA_CORE_80211;
- ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
- ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
- ci->ramsize = 0x80000;
- break;
- case BCM4335_CHIP_ID:
- ci->c_inf[0].wrapbase = 0x18100000;
- ci->c_inf[0].cib = 0x2b084411;
- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
- ci->c_inf[1].base = 0x18005000;
- ci->c_inf[1].wrapbase = 0x18105000;
- ci->c_inf[1].cib = 0x0f004211;
- ci->c_inf[2].id = BCMA_CORE_ARM_CR4;
- ci->c_inf[2].base = 0x18002000;
- ci->c_inf[2].wrapbase = 0x18102000;
- ci->c_inf[2].cib = 0x01084411;
- ci->c_inf[3].id = BCMA_CORE_80211;
- ci->c_inf[3].base = BCM43xx_CORE_D11_BASE;
- ci->c_inf[3].wrapbase = ci->c_inf[3].base + 0x00100000;
- ci->ramsize = 0xc0000;
- ci->rambase = 0x180000;
- break;
- case BCM43362_CHIP_ID:
- ci->c_inf[0].wrapbase = 0x18100000;
- ci->c_inf[0].cib = 0x27004211;
- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
- ci->c_inf[1].base = 0x18002000;
- ci->c_inf[1].wrapbase = 0x18102000;
- ci->c_inf[1].cib = 0x0a004211;
- ci->c_inf[2].id = BCMA_CORE_INTERNAL_MEM;
- ci->c_inf[2].base = 0x18004000;
- ci->c_inf[2].wrapbase = 0x18104000;
- ci->c_inf[2].cib = 0x08080401;
- ci->c_inf[3].id = BCMA_CORE_ARM_CM3;
- ci->c_inf[3].base = 0x18003000;
- ci->c_inf[3].wrapbase = 0x18103000;
- ci->c_inf[3].cib = 0x03004211;
- ci->c_inf[4].id = BCMA_CORE_80211;
- ci->c_inf[4].base = BCM43xx_CORE_D11_BASE;
- ci->c_inf[4].wrapbase = ci->c_inf[4].base + 0x00100000;
- ci->ramsize = 0x3C000;
- break;
- case BCM4339_CHIP_ID:
- ci->c_inf[0].wrapbase = 0x18100000;
- ci->c_inf[0].cib = 0x2e084411;
- ci->c_inf[1].id = BCMA_CORE_SDIO_DEV;
- ci->c_inf[1].base = 0x18005000;
- ci->c_inf[1].wrapbase = 0x18105000;
- ci->c_inf[1].cib = 0x15004211;
- ci->c_inf[2].id = BCMA_CORE_ARM_CR4;
- ci->c_inf[2].base = 0x18002000;
- ci->c_inf[2].wrapbase = 0x18102000;
- ci->c_inf[2].cib = 0x04084411;
- ci->c_inf[3].id = BCMA_CORE_80211;
- ci->c_inf[3].base = BCM43xx_CORE_D11_BASE;
- ci->c_inf[3].wrapbase = ci->c_inf[3].base + 0x00100000;
- ci->ramsize = 0xc0000;
- ci->rambase = 0x180000;
- break;
- default:
- brcmf_err("AXI chip is not supported\n");
- return -ENODEV;
- }
- } else {
- brcmf_err("chip backplane type %u is not supported\n",
- socitype);
- return -ENODEV;
- }
-
- return brcmf_sdio_chip_cichk(ci);
-}
-
-static int
-brcmf_sdio_chip_buscoreprep(struct brcmf_sdio_dev *sdiodev)
-{
- int err = 0;
- u8 clkval, clkset;
-
- /* Try forcing SDIO core to do ALPAvail request only */
- clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_ALP_AVAIL_REQ;
- brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
- if (err) {
- brcmf_err("error writing for HT off\n");
- return err;
- }
-
- /* If register supported, wait for ALPAvail and then force ALP */
- /* This may take up to 15 milliseconds */
- clkval = brcmf_sdiod_regrb(sdiodev,
- SBSDIO_FUNC1_CHIPCLKCSR, NULL);
-
- if ((clkval & ~SBSDIO_AVBITS) != clkset) {
- brcmf_err("ChipClkCSR access: wrote 0x%02x read 0x%02x\n",
- clkset, clkval);
- return -EACCES;
- }
-
- SPINWAIT(((clkval = brcmf_sdiod_regrb(sdiodev,
- SBSDIO_FUNC1_CHIPCLKCSR, NULL)),
- !SBSDIO_ALPAV(clkval)),
- PMU_MAX_TRANSITION_DLY);
- if (!SBSDIO_ALPAV(clkval)) {
- brcmf_err("timeout on ALPAV wait, clkval 0x%02x\n",
- clkval);
- return -EBUSY;
- }
-
- clkset = SBSDIO_FORCE_HW_CLKREQ_OFF | SBSDIO_FORCE_ALP;
- brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, clkset, &err);
- udelay(65);
-
- /* Also, disable the extra SDIO pull-ups */
- brcmf_sdiod_regwb(sdiodev, SBSDIO_FUNC1_SDIOPULLUP, 0, NULL);
-
- return 0;
-}
-
-static void
-brcmf_sdio_chip_buscoresetup(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci)
-{
- u32 base = ci->c_inf[0].base;
-
- /* get chipcommon rev */
- ci->c_inf[0].rev = ci->corerev(sdiodev, ci, ci->c_inf[0].id);
-
- /* get chipcommon capabilites */
- ci->c_inf[0].caps = brcmf_sdiod_regrl(sdiodev,
- CORE_CC_REG(base, capabilities),
- NULL);
-
- /* get pmu caps & rev */
- if (ci->c_inf[0].caps & CC_CAP_PMU) {
- ci->pmucaps =
- brcmf_sdiod_regrl(sdiodev,
- CORE_CC_REG(base, pmucapabilities),
- NULL);
- ci->pmurev = ci->pmucaps & PCAP_REV_MASK;
- }
-
- ci->c_inf[1].rev = ci->corerev(sdiodev, ci, ci->c_inf[1].id);
-
- brcmf_dbg(INFO, "ccrev=%d, pmurev=%d, buscore rev/type=%d/0x%x\n",
- ci->c_inf[0].rev, ci->pmurev,
- ci->c_inf[1].rev, ci->c_inf[1].id);
-
- /*
- * Make sure any on-chip ARM is off (in case strapping is wrong),
- * or downloaded code was already running.
- */
- ci->coredisable(sdiodev, ci, BCMA_CORE_ARM_CM3, 0, 0);
-}
-
-int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip **ci_ptr)
-{
- int ret;
- struct brcmf_chip *ci;
-
- brcmf_dbg(TRACE, "Enter\n");
-
- ci = kzalloc(sizeof(*ci), GFP_ATOMIC);
- if (!ci)
- return -ENOMEM;
-
- ret = brcmf_sdio_chip_buscoreprep(sdiodev);
- if (ret != 0)
- goto err;
-
- ret = brcmf_sdio_chip_recognition(sdiodev, ci);
- if (ret != 0)
- goto err;
-
- brcmf_sdio_chip_buscoresetup(sdiodev, ci);
-
- brcmf_sdiod_regwl(sdiodev, CORE_CC_REG(ci->c_inf[0].base, gpiopullup),
- 0, NULL);
- brcmf_sdiod_regwl(sdiodev, CORE_CC_REG(ci->c_inf[0].base, gpiopulldown),
- 0, NULL);
-
- *ci_ptr = ci;
- return 0;
-
-err:
- kfree(ci);
- return ret;
-}
-
-void
-brcmf_sdio_chip_detach(struct brcmf_chip **ci_ptr)
-{
- brcmf_dbg(TRACE, "Enter\n");
-
- kfree(*ci_ptr);
- *ci_ptr = NULL;
-}
-
-static char *brcmf_sdio_chip_name(uint chipid, char *buf, uint len)
-{
- const char *fmt;
-
- fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x";
- snprintf(buf, len, fmt, chipid);
- return buf;
-}
-
-void
-brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci, u32 drivestrength)
-{
- const struct sdiod_drive_str *str_tab = NULL;
- u32 str_mask;
- u32 str_shift;
- char chn[8];
- u32 base = ci->c_inf[0].base;
- u32 i;
- u32 drivestrength_sel = 0;
- u32 cc_data_temp;
- u32 addr;
-
- if (!(ci->c_inf[0].caps & CC_CAP_PMU))
- return;
-
- switch (SDIOD_DRVSTR_KEY(ci->chip, ci->pmurev)) {
- case SDIOD_DRVSTR_KEY(BCM4330_CHIP_ID, 12):
- str_tab = sdiod_drvstr_tab1_1v8;
- str_mask = 0x00003800;
- str_shift = 11;
- break;
- case SDIOD_DRVSTR_KEY(BCM4334_CHIP_ID, 17):
- str_tab = sdiod_drvstr_tab6_1v8;
- str_mask = 0x00001800;
- str_shift = 11;
- break;
- case SDIOD_DRVSTR_KEY(BCM43143_CHIP_ID, 17):
- /* note: 43143 does not support tristate */
- i = ARRAY_SIZE(sdiod_drvstr_tab2_3v3) - 1;
- if (drivestrength >= sdiod_drvstr_tab2_3v3[i].strength) {
- str_tab = sdiod_drvstr_tab2_3v3;
- str_mask = 0x00000007;
- str_shift = 0;
- } else
- brcmf_err("Invalid SDIO Drive strength for chip %s, strength=%d\n",
- brcmf_sdio_chip_name(ci->chip, chn, 8),
- drivestrength);
- break;
- case SDIOD_DRVSTR_KEY(BCM43362_CHIP_ID, 13):
- str_tab = sdiod_drive_strength_tab5_1v8;
- str_mask = 0x00003800;
- str_shift = 11;
- break;
- default:
- brcmf_err("No SDIO Drive strength init done for chip %s rev %d pmurev %d\n",
- brcmf_sdio_chip_name(ci->chip, chn, 8),
- ci->chiprev, ci->pmurev);
- break;
- }
-
- if (str_tab != NULL) {
- for (i = 0; str_tab[i].strength != 0; i++) {
- if (drivestrength >= str_tab[i].strength) {
- drivestrength_sel = str_tab[i].sel;
- break;
- }
- }
- addr = CORE_CC_REG(base, chipcontrol_addr);
- brcmf_sdiod_regwl(sdiodev, addr, 1, NULL);
- cc_data_temp = brcmf_sdiod_regrl(sdiodev, addr, NULL);
- cc_data_temp &= ~str_mask;
- drivestrength_sel <<= str_shift;
- cc_data_temp |= drivestrength_sel;
- brcmf_sdiod_regwl(sdiodev, addr, cc_data_temp, NULL);
-
- brcmf_dbg(INFO, "SDIO: %d mA (req=%d mA) drive strength selected, set to 0x%08x\n",
- str_tab[i].strength, drivestrength, cc_data_temp);
- }
-}
-
-static void
-brcmf_sdio_chip_cm3_enterdl(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci)
-{
- ci->coredisable(sdiodev, ci, BCMA_CORE_ARM_CM3, 0, 0);
- ci->resetcore(sdiodev, ci, BCMA_CORE_80211,
- D11_BCMA_IOCTL_PHYRESET | D11_BCMA_IOCTL_PHYCLOCKEN,
- D11_BCMA_IOCTL_PHYCLOCKEN, D11_BCMA_IOCTL_PHYCLOCKEN);
- ci->resetcore(sdiodev, ci, BCMA_CORE_INTERNAL_MEM, 0, 0, 0);
-}
-
-static bool brcmf_sdio_chip_cm3_exitdl(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci)
-{
- u8 core_idx;
- u32 reg_addr;
-
- if (!ci->iscoreup(sdiodev, ci, BCMA_CORE_INTERNAL_MEM)) {
- brcmf_err("SOCRAM core is down after reset?\n");
- return false;
- }
-
- /* clear all interrupts */
- core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_SDIO_DEV);
- reg_addr = ci->c_inf[core_idx].base;
- reg_addr += offsetof(struct sdpcmd_regs, intstatus);
- brcmf_sdiod_regwl(sdiodev, reg_addr, 0xFFFFFFFF, NULL);
-
- ci->resetcore(sdiodev, ci, BCMA_CORE_ARM_CM3, 0, 0, 0);
-
- return true;
-}
-
-static inline void
-brcmf_sdio_chip_cr4_enterdl(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci)
-{
- u8 idx;
- u32 regdata;
- u32 wrapbase;
- idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CR4);
-
- if (idx == BRCMF_MAX_CORENUM)
- return;
-
- wrapbase = ci->c_inf[idx].wrapbase;
- regdata = brcmf_sdiod_regrl(sdiodev, wrapbase + BCMA_IOCTL, NULL);
- regdata &= ARMCR4_BCMA_IOCTL_CPUHALT;
- ci->resetcore(sdiodev, ci, BCMA_CORE_ARM_CR4, regdata,
- ARMCR4_BCMA_IOCTL_CPUHALT, ARMCR4_BCMA_IOCTL_CPUHALT);
- ci->resetcore(sdiodev, ci, BCMA_CORE_80211,
- D11_BCMA_IOCTL_PHYRESET | D11_BCMA_IOCTL_PHYCLOCKEN,
- D11_BCMA_IOCTL_PHYCLOCKEN, D11_BCMA_IOCTL_PHYCLOCKEN);
-}
-
-static bool brcmf_sdio_chip_cr4_exitdl(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci, u32 rstvec)
-{
- u8 core_idx;
- u32 reg_addr;
-
- /* clear all interrupts */
- core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_SDIO_DEV);
- reg_addr = ci->c_inf[core_idx].base;
- reg_addr += offsetof(struct sdpcmd_regs, intstatus);
- brcmf_sdiod_regwl(sdiodev, reg_addr, 0xFFFFFFFF, NULL);
-
- /* Write reset vector to address 0 */
- brcmf_sdiod_ramrw(sdiodev, true, 0, (void *)&rstvec,
- sizeof(rstvec));
-
- /* restore ARM */
- ci->resetcore(sdiodev, ci, BCMA_CORE_ARM_CR4, ARMCR4_BCMA_IOCTL_CPUHALT,
- 0, 0);
-
- return true;
-}
-
-void brcmf_sdio_chip_enter_download(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci)
-{
- u8 arm_core_idx;
-
- arm_core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CM3);
- if (BRCMF_MAX_CORENUM != arm_core_idx) {
- brcmf_sdio_chip_cm3_enterdl(sdiodev, ci);
- return;
- }
-
- brcmf_sdio_chip_cr4_enterdl(sdiodev, ci);
-}
-
-bool brcmf_sdio_chip_exit_download(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci, u32 rstvec)
-{
- u8 arm_core_idx;
-
- arm_core_idx = brcmf_sdio_chip_getinfidx(ci, BCMA_CORE_ARM_CM3);
- if (BRCMF_MAX_CORENUM != arm_core_idx)
- return brcmf_sdio_chip_cm3_exitdl(sdiodev, ci);
-
- return brcmf_sdio_chip_cr4_exitdl(sdiodev, ci, rstvec);
-}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h
deleted file mode 100644
index fb06143..0000000
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_chip.h
+++ /dev/null
@@ -1,231 +0,0 @@
-/*
- * Copyright (c) 2011 Broadcom Corporation
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
- * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
- * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
- * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- */
-
-#ifndef _BRCMFMAC_SDIO_CHIP_H_
-#define _BRCMFMAC_SDIO_CHIP_H_
-
-/*
- * Core reg address translation.
- * Both macro's returns a 32 bits byte address on the backplane bus.
- */
-#define CORE_CC_REG(base, field) \
- (base + offsetof(struct chipcregs, field))
-#define CORE_BUS_REG(base, field) \
- (base + offsetof(struct sdpcmd_regs, field))
-#define CORE_SB(base, field) \
- (base + SBCONFIGOFF + offsetof(struct sbconfig, field))
-
-/* SDIO function 1 register CHIPCLKCSR */
-/* Force ALP request to backplane */
-#define SBSDIO_FORCE_ALP 0x01
-/* Force HT request to backplane */
-#define SBSDIO_FORCE_HT 0x02
-/* Force ILP request to backplane */
-#define SBSDIO_FORCE_ILP 0x04
-/* Make ALP ready (power up xtal) */
-#define SBSDIO_ALP_AVAIL_REQ 0x08
-/* Make HT ready (power up PLL) */
-#define SBSDIO_HT_AVAIL_REQ 0x10
-/* Squelch clock requests from HW */
-#define SBSDIO_FORCE_HW_CLKREQ_OFF 0x20
-/* Status: ALP is ready */
-#define SBSDIO_ALP_AVAIL 0x40
-/* Status: HT is ready */
-#define SBSDIO_HT_AVAIL 0x80
-#define SBSDIO_AVBITS (SBSDIO_HT_AVAIL | SBSDIO_ALP_AVAIL)
-#define SBSDIO_ALPAV(regval) ((regval) & SBSDIO_AVBITS)
-#define SBSDIO_HTAV(regval) (((regval) & SBSDIO_AVBITS) == SBSDIO_AVBITS)
-#define SBSDIO_ALPONLY(regval) (SBSDIO_ALPAV(regval) && !SBSDIO_HTAV(regval))
-#define SBSDIO_CLKAV(regval, alponly) \
- (SBSDIO_ALPAV(regval) && (alponly ? 1 : SBSDIO_HTAV(regval)))
-
-#define BRCMF_MAX_CORENUM 6
-
-struct brcmf_core {
- u16 id;
- u16 rev;
- u32 base;
- u32 wrapbase;
- u32 caps;
- u32 cib;
-};
-
-struct brcmf_chip {
- u32 chip;
- u32 chiprev;
- /* core info */
- /* always put chipcommon core at 0, bus core at 1 */
- struct brcmf_core c_inf[BRCMF_MAX_CORENUM];
- u32 pmurev;
- u32 pmucaps;
- u32 ramsize;
- u32 rambase;
- u32 rst_vec; /* reset vertor for ARM CR4 core */
-
- bool (*iscoreup)(struct brcmf_sdio_dev *sdiodev, struct brcmf_chip *ci,
- u16 coreid);
- u32 (*corerev)(struct brcmf_sdio_dev *sdiodev, struct brcmf_chip *ci,
- u16 coreid);
- void (*coredisable)(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
- u32 in_resetbits);
- void (*resetcore)(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci, u16 coreid, u32 pre_resetbits,
- u32 in_resetbits, u32 post_resetbits);
-};
-
-struct sbconfig {
- u32 PAD[2];
- u32 sbipsflag; /* initiator port ocp slave flag */
- u32 PAD[3];
- u32 sbtpsflag; /* target port ocp slave flag */
- u32 PAD[11];
- u32 sbtmerrloga; /* (sonics >= 2.3) */
- u32 PAD;
- u32 sbtmerrlog; /* (sonics >= 2.3) */
- u32 PAD[3];
- u32 sbadmatch3; /* address match3 */
- u32 PAD;
- u32 sbadmatch2; /* address match2 */
- u32 PAD;
- u32 sbadmatch1; /* address match1 */
- u32 PAD[7];
- u32 sbimstate; /* initiator agent state */
- u32 sbintvec; /* interrupt mask */
- u32 sbtmstatelow; /* target state */
- u32 sbtmstatehigh; /* target state */
- u32 sbbwa0; /* bandwidth allocation table0 */
- u32 PAD;
- u32 sbimconfiglow; /* initiator configuration */
- u32 sbimconfighigh; /* initiator configuration */
- u32 sbadmatch0; /* address match0 */
- u32 PAD;
- u32 sbtmconfiglow; /* target configuration */
- u32 sbtmconfighigh; /* target configuration */
- u32 sbbconfig; /* broadcast configuration */
- u32 PAD;
- u32 sbbstate; /* broadcast state */
- u32 PAD[3];
- u32 sbactcnfg; /* activate configuration */
- u32 PAD[3];
- u32 sbflagst; /* current sbflags */
- u32 PAD[3];
- u32 sbidlow; /* identification */
- u32 sbidhigh; /* identification */
-};
-
-/* sdio core registers */
-struct sdpcmd_regs {
- u32 corecontrol; /* 0x00, rev8 */
- u32 corestatus; /* rev8 */
- u32 PAD[1];
- u32 biststatus; /* rev8 */
-
- /* PCMCIA access */
- u16 pcmciamesportaladdr; /* 0x010, rev8 */
- u16 PAD[1];
- u16 pcmciamesportalmask; /* rev8 */
- u16 PAD[1];
- u16 pcmciawrframebc; /* rev8 */
- u16 PAD[1];
- u16 pcmciaunderflowtimer; /* rev8 */
- u16 PAD[1];
-
- /* interrupt */
- u32 intstatus; /* 0x020, rev8 */
- u32 hostintmask; /* rev8 */
- u32 intmask; /* rev8 */
- u32 sbintstatus; /* rev8 */
- u32 sbintmask; /* rev8 */
- u32 funcintmask; /* rev4 */
- u32 PAD[2];
- u32 tosbmailbox; /* 0x040, rev8 */
- u32 tohostmailbox; /* rev8 */
- u32 tosbmailboxdata; /* rev8 */
- u32 tohostmailboxdata; /* rev8 */
-
- /* synchronized access to registers in SDIO clock domain */
- u32 sdioaccess; /* 0x050, rev8 */
- u32 PAD[3];
-
- /* PCMCIA frame control */
- u8 pcmciaframectrl; /* 0x060, rev8 */
- u8 PAD[3];
- u8 pcmciawatermark; /* rev8 */
- u8 PAD[155];
-
- /* interrupt batching control */
- u32 intrcvlazy; /* 0x100, rev8 */
- u32 PAD[3];
-
- /* counters */
- u32 cmd52rd; /* 0x110, rev8 */
- u32 cmd52wr; /* rev8 */
- u32 cmd53rd; /* rev8 */
- u32 cmd53wr; /* rev8 */
- u32 abort; /* rev8 */
- u32 datacrcerror; /* rev8 */
- u32 rdoutofsync; /* rev8 */
- u32 wroutofsync; /* rev8 */
- u32 writebusy; /* rev8 */
- u32 readwait; /* rev8 */
- u32 readterm; /* rev8 */
- u32 writeterm; /* rev8 */
- u32 PAD[40];
- u32 clockctlstatus; /* rev8 */
- u32 PAD[7];
-
- u32 PAD[128]; /* DMA engines */
-
- /* SDIO/PCMCIA CIS region */
- char cis[512]; /* 0x400-0x5ff, rev6 */
-
- /* PCMCIA function control registers */
- char pcmciafcr[256]; /* 0x600-6ff, rev6 */
- u16 PAD[55];
-
- /* PCMCIA backplane access */
- u16 backplanecsr; /* 0x76E, rev6 */
- u16 backplaneaddr0; /* rev6 */
- u16 backplaneaddr1; /* rev6 */
- u16 backplaneaddr2; /* rev6 */
- u16 backplaneaddr3; /* rev6 */
- u16 backplanedata0; /* rev6 */
- u16 backplanedata1; /* rev6 */
- u16 backplanedata2; /* rev6 */
- u16 backplanedata3; /* rev6 */
- u16 PAD[31];
-
- /* sprom "size" & "blank" info */
- u16 spromstatus; /* 0x7BE, rev2 */
- u32 PAD[464];
-
- u16 PAD[0x80];
-};
-
-int brcmf_sdio_chip_attach(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip **ci_ptr);
-void brcmf_sdio_chip_detach(struct brcmf_chip **ci_ptr);
-void brcmf_sdio_chip_drivestrengthinit(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci,
- u32 drivestrength);
-u8 brcmf_sdio_chip_getinfidx(struct brcmf_chip *ci, u16 coreid);
-void brcmf_sdio_chip_enter_download(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci);
-bool brcmf_sdio_chip_exit_download(struct brcmf_sdio_dev *sdiodev,
- struct brcmf_chip *ci, u32 rstvec);
-
-#endif /* _BRCMFMAC_SDIO_CHIP_H_ */
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
index 092e9c8..5e53eb1 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/sdio_host.h
@@ -182,6 +182,95 @@ struct brcmf_sdio_dev {
uint max_segment_size;
};
+/* sdio core registers */
+struct sdpcmd_regs {
+ u32 corecontrol; /* 0x00, rev8 */
+ u32 corestatus; /* rev8 */
+ u32 PAD[1];
+ u32 biststatus; /* rev8 */
+
+ /* PCMCIA access */
+ u16 pcmciamesportaladdr; /* 0x010, rev8 */
+ u16 PAD[1];
+ u16 pcmciamesportalmask; /* rev8 */
+ u16 PAD[1];
+ u16 pcmciawrframebc; /* rev8 */
+ u16 PAD[1];
+ u16 pcmciaunderflowtimer; /* rev8 */
+ u16 PAD[1];
+
+ /* interrupt */
+ u32 intstatus; /* 0x020, rev8 */
+ u32 hostintmask; /* rev8 */
+ u32 intmask; /* rev8 */
+ u32 sbintstatus; /* rev8 */
+ u32 sbintmask; /* rev8 */
+ u32 funcintmask; /* rev4 */
+ u32 PAD[2];
+ u32 tosbmailbox; /* 0x040, rev8 */
+ u32 tohostmailbox; /* rev8 */
+ u32 tosbmailboxdata; /* rev8 */
+ u32 tohostmailboxdata; /* rev8 */
+
+ /* synchronized access to registers in SDIO clock domain */
+ u32 sdioaccess; /* 0x050, rev8 */
+ u32 PAD[3];
+
+ /* PCMCIA frame control */
+ u8 pcmciaframectrl; /* 0x060, rev8 */
+ u8 PAD[3];
+ u8 pcmciawatermark; /* rev8 */
+ u8 PAD[155];
+
+ /* interrupt batching control */
+ u32 intrcvlazy; /* 0x100, rev8 */
+ u32 PAD[3];
+
+ /* counters */
+ u32 cmd52rd; /* 0x110, rev8 */
+ u32 cmd52wr; /* rev8 */
+ u32 cmd53rd; /* rev8 */
+ u32 cmd53wr; /* rev8 */
+ u32 abort; /* rev8 */
+ u32 datacrcerror; /* rev8 */
+ u32 rdoutofsync; /* rev8 */
+ u32 wroutofsync; /* rev8 */
+ u32 writebusy; /* rev8 */
+ u32 readwait; /* rev8 */
+ u32 readterm; /* rev8 */
+ u32 writeterm; /* rev8 */
+ u32 PAD[40];
+ u32 clockctlstatus; /* rev8 */
+ u32 PAD[7];
+
+ u32 PAD[128]; /* DMA engines */
+
+ /* SDIO/PCMCIA CIS region */
+ char cis[512]; /* 0x400-0x5ff, rev6 */
+
+ /* PCMCIA function control registers */
+ char pcmciafcr[256]; /* 0x600-6ff, rev6 */
+ u16 PAD[55];
+
+ /* PCMCIA backplane access */
+ u16 backplanecsr; /* 0x76E, rev6 */
+ u16 backplaneaddr0; /* rev6 */
+ u16 backplaneaddr1; /* rev6 */
+ u16 backplaneaddr2; /* rev6 */
+ u16 backplaneaddr3; /* rev6 */
+ u16 backplanedata0; /* rev6 */
+ u16 backplanedata1; /* rev6 */
+ u16 backplanedata2; /* rev6 */
+ u16 backplanedata3; /* rev6 */
+ u16 PAD[31];
+
+ /* sprom "size" & "blank" info */
+ u16 spromstatus; /* 0x7BE, rev2 */
+ u32 PAD[464];
+
+ u16 PAD[0x80];
+};
+
/* Register/deregister interrupt handler. */
int brcmf_sdiod_intr_register(struct brcmf_sdio_dev *sdiodev);
int brcmf_sdiod_intr_unregister(struct brcmf_sdio_dev *sdiodev);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index d7718a5..a54db91 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -351,13 +351,11 @@ u16 channel_to_chanspec(struct brcmu_d11inf *d11inf,
* triples, returning a pointer to the substring whose first element
* matches tag
*/
-struct brcmf_tlv *brcmf_parse_tlvs(void *buf, int buflen, uint key)
+const struct brcmf_tlv *
+brcmf_parse_tlvs(const void *buf, int buflen, uint key)
{
- struct brcmf_tlv *elt;
- int totlen;
-
- elt = (struct brcmf_tlv *)buf;
- totlen = buflen;
+ const struct brcmf_tlv *elt = buf;
+ int totlen = buflen;
/* find tagged parameter */
while (totlen >= TLV_HDR_LEN) {
@@ -378,8 +376,8 @@ struct brcmf_tlv *brcmf_parse_tlvs(void *buf, int buflen, uint key)
* not update the tlvs buffer pointer/length.
*/
static bool
-brcmf_tlv_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len,
- u8 *oui, u32 oui_len, u8 type)
+brcmf_tlv_has_ie(const u8 *ie, const u8 **tlvs, u32 *tlvs_len,
+ const u8 *oui, u32 oui_len, u8 type)
{
/* If the contents match the OUI and the type */
if (ie[TLV_LEN_OFF] >= oui_len + 1 &&
@@ -401,12 +399,12 @@ brcmf_tlv_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len,
}
static struct brcmf_vs_tlv *
-brcmf_find_wpaie(u8 *parse, u32 len)
+brcmf_find_wpaie(const u8 *parse, u32 len)
{
- struct brcmf_tlv *ie;
+ const struct brcmf_tlv *ie;
while ((ie = brcmf_parse_tlvs(parse, len, WLAN_EID_VENDOR_SPECIFIC))) {
- if (brcmf_tlv_has_ie((u8 *)ie, &parse, &len,
+ if (brcmf_tlv_has_ie((const u8 *)ie, &parse, &len,
WPA_OUI, TLV_OUI_LEN, WPA_OUI_TYPE))
return (struct brcmf_vs_tlv *)ie;
}
@@ -414,9 +412,9 @@ brcmf_find_wpaie(u8 *parse, u32 len)
}
static struct brcmf_vs_tlv *
-brcmf_find_wpsie(u8 *parse, u32 len)
+brcmf_find_wpsie(const u8 *parse, u32 len)
{
- struct brcmf_tlv *ie;
+ const struct brcmf_tlv *ie;
while ((ie = brcmf_parse_tlvs(parse, len, WLAN_EID_VENDOR_SPECIFIC))) {
if (brcmf_tlv_has_ie((u8 *)ie, &parse, &len,
@@ -1562,9 +1560,9 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
struct ieee80211_channel *chan = sme->channel;
struct brcmf_join_params join_params;
size_t join_params_size;
- struct brcmf_tlv *rsn_ie;
- struct brcmf_vs_tlv *wpa_ie;
- void *ie;
+ const struct brcmf_tlv *rsn_ie;
+ const struct brcmf_vs_tlv *wpa_ie;
+ const void *ie;
u32 ie_len;
struct brcmf_ext_join_params_le *ext_join_params;
u16 chanspec;
@@ -1591,7 +1589,8 @@ brcmf_cfg80211_connect(struct wiphy *wiphy, struct net_device *ndev,
ie_len = wpa_ie->len + TLV_HDR_LEN;
} else {
/* find the RSN_IE */
- rsn_ie = brcmf_parse_tlvs((u8 *)sme->ie, sme->ie_len,
+ rsn_ie = brcmf_parse_tlvs((const u8 *)sme->ie,
+ sme->ie_len,
WLAN_EID_RSN);
if (rsn_ie) {
ie = rsn_ie;
@@ -1981,7 +1980,9 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
if (!check_vif_up(ifp->vif))
return -EIO;
- if (mac_addr) {
+ if (mac_addr &&
+ (params->cipher != WLAN_CIPHER_SUITE_WEP40) &&
+ (params->cipher != WLAN_CIPHER_SUITE_WEP104)) {
brcmf_dbg(TRACE, "Exit");
return brcmf_add_keyext(wiphy, ndev, key_idx, mac_addr, params);
}
@@ -2164,6 +2165,8 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
s32 err = 0;
u8 *bssid = profile->bssid;
struct brcmf_sta_info_le sta_info_le;
+ u32 beacon_period;
+ u32 dtim_period;
brcmf_dbg(TRACE, "Enter, MAC %pM\n", mac);
if (!check_vif_up(ifp->vif))
@@ -2218,6 +2221,30 @@ brcmf_cfg80211_get_station(struct wiphy *wiphy, struct net_device *ndev,
sinfo->signal = rssi;
brcmf_dbg(CONN, "RSSI %d dBm\n", rssi);
}
+ err = brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_BCNPRD,
+ &beacon_period);
+ if (err) {
+ brcmf_err("Could not get beacon period (%d)\n",
+ err);
+ goto done;
+ } else {
+ sinfo->bss_param.beacon_interval =
+ beacon_period;
+ brcmf_dbg(CONN, "Beacon peroid %d\n",
+ beacon_period);
+ }
+ err = brcmf_fil_cmd_int_get(ifp, BRCMF_C_GET_DTIMPRD,
+ &dtim_period);
+ if (err) {
+ brcmf_err("Could not get DTIM period (%d)\n",
+ err);
+ goto done;
+ } else {
+ sinfo->bss_param.dtim_period = dtim_period;
+ brcmf_dbg(CONN, "DTIM peroid %d\n",
+ dtim_period);
+ }
+ sinfo->filled |= STATION_INFO_BSS_PARAM;
}
} else
err = -EPERM;
@@ -2455,7 +2482,7 @@ static s32 brcmf_update_bss_info(struct brcmf_cfg80211_info *cfg,
struct brcmf_cfg80211_profile *profile = ndev_to_prof(ifp->ndev);
struct brcmf_bss_info_le *bi;
struct brcmf_ssid *ssid;
- struct brcmf_tlv *tim;
+ const struct brcmf_tlv *tim;
u16 beacon_interval;
u8 dtim_period;
size_t ie_len;
@@ -3220,8 +3247,9 @@ static bool brcmf_valid_wpa_oui(u8 *oui, bool is_rsn_ie)
}
static s32
-brcmf_configure_wpaie(struct net_device *ndev, struct brcmf_vs_tlv *wpa_ie,
- bool is_rsn_ie)
+brcmf_configure_wpaie(struct net_device *ndev,
+ const struct brcmf_vs_tlv *wpa_ie,
+ bool is_rsn_ie)
{
struct brcmf_if *ifp = netdev_priv(ndev);
u32 auth = 0; /* d11 open authentication */
@@ -3707,11 +3735,11 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
s32 ie_offset;
struct brcmf_cfg80211_info *cfg = wiphy_to_cfg(wiphy);
struct brcmf_if *ifp = netdev_priv(ndev);
- struct brcmf_tlv *ssid_ie;
+ const struct brcmf_tlv *ssid_ie;
struct brcmf_ssid_le ssid_le;
s32 err = -EPERM;
- struct brcmf_tlv *rsn_ie;
- struct brcmf_vs_tlv *wpa_ie;
+ const struct brcmf_tlv *rsn_ie;
+ const struct brcmf_vs_tlv *wpa_ie;
struct brcmf_join_params join_params;
enum nl80211_iftype dev_role;
struct brcmf_fil_bss_enable_le bss_enable;
@@ -4658,6 +4686,7 @@ brcmf_notify_connect_status(struct brcmf_if *ifp,
struct brcmf_cfg80211_info *cfg = ifp->drvr->config;
struct net_device *ndev = ifp->ndev;
struct brcmf_cfg80211_profile *profile = &ifp->vif->profile;
+ struct ieee80211_channel *chan;
s32 err = 0;
if (ifp->vif->mode == WL_MODE_AP) {
@@ -4665,9 +4694,10 @@ brcmf_notify_connect_status(struct brcmf_if *ifp,
} else if (brcmf_is_linkup(e)) {
brcmf_dbg(CONN, "Linkup\n");
if (brcmf_is_ibssmode(ifp->vif)) {
+ chan = ieee80211_get_channel(cfg->wiphy, cfg->channel);
memcpy(profile->bssid, e->addr, ETH_ALEN);
wl_inform_ibss(cfg, ndev, e->addr);
- cfg80211_ibss_joined(ndev, e->addr, GFP_KERNEL);
+ cfg80211_ibss_joined(ndev, e->addr, chan, GFP_KERNEL);
clear_bit(BRCMF_VIF_STATUS_CONNECTING,
&ifp->vif->sme_state);
set_bit(BRCMF_VIF_STATUS_CONNECTED,
@@ -5164,9 +5194,6 @@ static s32 brcmf_construct_reginfo(struct brcmf_cfg80211_info *cfg,
ieee80211_channel_to_frequency(ch.chnum, band);
band_chan_arr[index].hw_value = ch.chnum;
- brcmf_err("channel %d: f=%d bw=%d sb=%d\n",
- ch.chnum, band_chan_arr[index].center_freq,
- ch.bw, ch.sb);
if (ch.bw == BRCMU_CHAN_BW_40) {
/* assuming the order is HT20, HT40 Upper,
* HT40 lower from chanspecs
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
index 2dc6a07..254feed 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.h
@@ -491,7 +491,8 @@ void brcmf_free_vif(struct brcmf_cfg80211_vif *vif);
s32 brcmf_vif_set_mgmt_ie(struct brcmf_cfg80211_vif *vif, s32 pktflag,
const u8 *vndr_ie_buf, u32 vndr_ie_len);
s32 brcmf_vif_clear_mgmt_ies(struct brcmf_cfg80211_vif *vif);
-struct brcmf_tlv *brcmf_parse_tlvs(void *buf, int buflen, uint key);
+const struct brcmf_tlv *
+brcmf_parse_tlvs(const void *buf, int buflen, uint key);
u16 channel_to_chanspec(struct brcmu_d11inf *d11inf,
struct ieee80211_channel *ch);
u32 wl_get_vif_state_all(struct brcmf_cfg80211_info *cfg, unsigned long state);
diff --git a/drivers/net/wireless/iwlegacy/3945-rs.c b/drivers/net/wireless/iwlegacy/3945-rs.c
index 9a45f6f..76b0729 100644
--- a/drivers/net/wireless/iwlegacy/3945-rs.c
+++ b/drivers/net/wireless/iwlegacy/3945-rs.c
@@ -891,8 +891,7 @@ il3945_rs_rate_init_stub(void *il_r, struct ieee80211_supported_band *sband,
{
}
-static struct rate_control_ops rs_ops = {
- .module = NULL,
+static const struct rate_control_ops rs_ops = {
.name = RS_NAME,
.tx_status = il3945_rs_tx_status,
.get_rate = il3945_rs_get_rate,
diff --git a/drivers/net/wireless/iwlegacy/4965-rs.c b/drivers/net/wireless/iwlegacy/4965-rs.c
index 4d5e332..eaaeea1 100644
--- a/drivers/net/wireless/iwlegacy/4965-rs.c
+++ b/drivers/net/wireless/iwlegacy/4965-rs.c
@@ -2807,8 +2807,7 @@ il4965_rs_rate_init_stub(void *il_r, struct ieee80211_supported_band *sband,
{
}
-static struct rate_control_ops rs_4965_ops = {
- .module = NULL,
+static const struct rate_control_ops rs_4965_ops = {
.name = IL4965_RS_NAME,
.tx_status = il4965_rs_tx_status,
.get_rate = il4965_rs_get_rate,
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index 3eb2102..74b3b4d 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -68,6 +68,19 @@ config IWLWIFI_OPMODE_MODULAR
comment "WARNING: iwlwifi is useless without IWLDVM or IWLMVM"
depends on IWLWIFI && IWLDVM=n && IWLMVM=n
+config IWLWIFI_BCAST_FILTERING
+ bool "Enable broadcast filtering"
+ depends on IWLMVM
+ help
+ Say Y here to enable default bcast filtering configuration.
+
+ Enabling broadcast filtering will drop any incoming wireless
+ broadcast frames, except some very specific predefined
+ patterns (e.g. incoming arp requests).
+
+ If unsure, don't enable this option, as some programs might
+ expect incoming broadcasts for their normal operations.
+
menu "Debugging Options"
depends on IWLWIFI
@@ -111,6 +124,7 @@ config IWLWIFI_DEBUG_EXPERIMENTAL_UCODE
Enable use of experimental ucode for testing and debugging.
config IWLWIFI_DEVICE_TRACING
+
bool "iwlwifi device access tracing"
depends on IWLWIFI
depends on EVENT_TRACING
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 1fa6442..3d32f41 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -8,7 +8,7 @@ iwlwifi-objs += iwl-eeprom-read.o iwl-eeprom-parse.o
iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o
iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
iwlwifi-$(CONFIG_IWLDVM) += iwl-1000.o iwl-2000.o iwl-5000.o iwl-6000.o
-iwlwifi-$(CONFIG_IWLMVM) += iwl-7000.o
+iwlwifi-$(CONFIG_IWLMVM) += iwl-7000.o iwl-8000.o
iwlwifi-objs += $(iwlwifi-m)
diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.c b/drivers/net/wireless/iwlwifi/dvm/rs.c
index 0977d93..aa773a2 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.c
@@ -176,46 +176,46 @@ static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
* (2.4 GHz) band.
*/
-static s32 expected_tpt_legacy[IWL_RATE_COUNT] = {
+static const u16 expected_tpt_legacy[IWL_RATE_COUNT] = {
7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0
};
-static s32 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 42, 0, 76, 102, 124, 159, 183, 193, 202}, /* Norm */
{0, 0, 0, 0, 46, 0, 82, 110, 132, 168, 192, 202, 210}, /* SGI */
{0, 0, 0, 0, 47, 0, 91, 133, 171, 242, 305, 334, 362}, /* AGG */
{0, 0, 0, 0, 52, 0, 101, 145, 187, 264, 330, 361, 390}, /* AGG+SGI */
};
-static s32 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */
{0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */
{0, 0, 0, 0, 94, 0, 177, 249, 313, 423, 512, 550, 586}, /* AGG */
{0, 0, 0, 0, 104, 0, 193, 270, 338, 454, 545, 584, 620}, /* AGG+SGI */
};
-static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 74, 0, 123, 155, 179, 214, 236, 244, 251}, /* Norm */
{0, 0, 0, 0, 81, 0, 131, 164, 188, 223, 243, 251, 257}, /* SGI */
{0, 0, 0, 0, 89, 0, 167, 235, 296, 402, 488, 526, 560}, /* AGG */
{0, 0, 0, 0, 97, 0, 182, 255, 320, 431, 520, 558, 593}, /* AGG+SGI*/
};
-static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */
{0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */
{0, 0, 0, 0, 171, 0, 305, 410, 496, 634, 731, 771, 805}, /* AGG */
{0, 0, 0, 0, 186, 0, 329, 439, 527, 667, 764, 803, 838}, /* AGG+SGI */
};
-static s32 expected_tpt_mimo3_20MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_mimo3_20MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 99, 0, 153, 186, 208, 239, 256, 263, 268}, /* Norm */
{0, 0, 0, 0, 106, 0, 162, 194, 215, 246, 262, 268, 273}, /* SGI */
{0, 0, 0, 0, 134, 0, 249, 346, 431, 574, 685, 732, 775}, /* AGG */
{0, 0, 0, 0, 148, 0, 272, 376, 465, 614, 727, 775, 818}, /* AGG+SGI */
};
-static s32 expected_tpt_mimo3_40MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_mimo3_40MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 152, 0, 211, 239, 255, 279, 290, 294, 297}, /* Norm */
{0, 0, 0, 0, 160, 0, 219, 245, 261, 284, 294, 297, 300}, /* SGI */
{0, 0, 0, 0, 254, 0, 443, 584, 695, 868, 984, 1030, 1070}, /* AGG */
@@ -1111,7 +1111,7 @@ static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
struct iwl_scale_tbl_info *tbl)
{
/* Used to choose among HT tables */
- s32 (*ht_tbl_pointer)[IWL_RATE_COUNT];
+ const u16 (*ht_tbl_pointer)[IWL_RATE_COUNT];
/* Check for invalid LQ type */
if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) {
@@ -1173,9 +1173,8 @@ static s32 rs_get_best_rate(struct iwl_priv *priv,
&(lq_sta->lq_info[lq_sta->active_tbl]);
s32 active_sr = active_tbl->win[index].success_ratio;
s32 active_tpt = active_tbl->expected_tpt[index];
-
/* expected "search" throughput */
- s32 *tpt_tbl = tbl->expected_tpt;
+ const u16 *tpt_tbl = tbl->expected_tpt;
s32 new_rate, high, low, start_hi;
u16 high_low;
@@ -3319,8 +3318,8 @@ static void rs_rate_init_stub(void *priv_r, struct ieee80211_supported_band *sba
struct ieee80211_sta *sta, void *priv_sta)
{
}
-static struct rate_control_ops rs_ops = {
- .module = NULL,
+
+static const struct rate_control_ops rs_ops = {
.name = RS_NAME,
.tx_status = rs_tx_status,
.get_rate = rs_get_rate,
diff --git a/drivers/net/wireless/iwlwifi/dvm/rs.h b/drivers/net/wireless/iwlwifi/dvm/rs.h
index bdd5644..f6bd25c 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rs.h
+++ b/drivers/net/wireless/iwlwifi/dvm/rs.h
@@ -315,7 +315,7 @@ struct iwl_scale_tbl_info {
u8 is_dup; /* 1 = duplicated data streams */
u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */
u8 max_search; /* maximun number of tables we can search */
- s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
+ const u16 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
u32 current_rate; /* rate_n_flags, uCode API format */
struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
};
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index 2a59da2..fbd262f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -71,8 +71,8 @@
#define IWL3160_UCODE_API_MAX 8
/* Oldest version we won't warn about */
-#define IWL7260_UCODE_API_OK 7
-#define IWL3160_UCODE_API_OK 7
+#define IWL7260_UCODE_API_OK 8
+#define IWL3160_UCODE_API_OK 8
/* Lowest firmware API version supported */
#define IWL7260_UCODE_API_MIN 7
@@ -95,6 +95,8 @@
#define IWL7265_FW_PRE "iwlwifi-7265-"
#define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
+#define NVM_HW_SECTION_NUM_FAMILY_7000 0
+
static const struct iwl_base_params iwl7000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
@@ -120,7 +122,8 @@ static const struct iwl_ht_params iwl7000_ht_params = {
.max_inst_size = IWL60_RTC_INST_SIZE, \
.max_data_size = IWL60_RTC_DATA_SIZE, \
.base_params = &iwl7000_base_params, \
- .led_mode = IWL_LED_RF_STATE
+ .led_mode = IWL_LED_RF_STATE, \
+ .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_7000
const struct iwl_cfg iwl7260_2ac_cfg = {
@@ -194,6 +197,17 @@ const struct iwl_cfg iwl3160_n_cfg = {
.host_interrupt_operation_mode = true,
};
+static const struct iwl_pwr_tx_backoff iwl7265_pwr_tx_backoffs[] = {
+ {.pwr = 1600, .backoff = 0},
+ {.pwr = 1300, .backoff = 467},
+ {.pwr = 900, .backoff = 1900},
+ {.pwr = 800, .backoff = 2630},
+ {.pwr = 700, .backoff = 3720},
+ {.pwr = 600, .backoff = 5550},
+ {.pwr = 500, .backoff = 9350},
+ {0},
+};
+
const struct iwl_cfg iwl7265_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 7265",
.fw_name_pre = IWL7265_FW_PRE,
@@ -201,6 +215,7 @@ const struct iwl_cfg iwl7265_2ac_cfg = {
.ht_params = &iwl7000_ht_params,
.nvm_ver = IWL7265_NVM_VERSION,
.nvm_calib_ver = IWL7265_TX_POWER_VERSION,
+ .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
};
const struct iwl_cfg iwl7265_2n_cfg = {
@@ -210,6 +225,7 @@ const struct iwl_cfg iwl7265_2n_cfg = {
.ht_params = &iwl7000_ht_params,
.nvm_ver = IWL7265_NVM_VERSION,
.nvm_calib_ver = IWL7265_TX_POWER_VERSION,
+ .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
};
const struct iwl_cfg iwl7265_n_cfg = {
@@ -219,6 +235,7 @@ const struct iwl_cfg iwl7265_n_cfg = {
.ht_params = &iwl7000_ht_params,
.nvm_ver = IWL7265_NVM_VERSION,
.nvm_calib_ver = IWL7265_TX_POWER_VERSION,
+ .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs,
};
MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c
new file mode 100644
index 0000000..f5bd82b
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-8000.c
@@ -0,0 +1,132 @@
+/******************************************************************************
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2014 Intel Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
+ * USA
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * BSD LICENSE
+ *
+ * Copyright(c) 2014 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *****************************************************************************/
+
+#include <linux/module.h>
+#include <linux/stringify.h>
+#include "iwl-config.h"
+#include "iwl-agn-hw.h"
+
+/* Highest firmware API version supported */
+#define IWL8000_UCODE_API_MAX 8
+
+/* Oldest version we won't warn about */
+#define IWL8000_UCODE_API_OK 8
+
+/* Lowest firmware API version supported */
+#define IWL8000_UCODE_API_MIN 8
+
+/* NVM versions */
+#define IWL8000_NVM_VERSION 0x0a1d
+#define IWL8000_TX_POWER_VERSION 0xffff /* meaningless */
+
+#define IWL8000_FW_PRE "iwlwifi-8000-"
+#define IWL8000_MODULE_FIRMWARE(api) IWL8000_FW_PRE __stringify(api) ".ucode"
+
+#define NVM_HW_SECTION_NUM_FAMILY_8000 10
+
+static const struct iwl_base_params iwl8000_base_params = {
+ .eeprom_size = OTP_LOW_IMAGE_SIZE,
+ .num_of_queues = IWLAGN_NUM_QUEUES,
+ .pll_cfg_val = 0,
+ .shadow_ram_support = true,
+ .led_compensation = 57,
+ .wd_timeout = IWL_LONG_WD_TIMEOUT,
+ .max_event_log_size = 512,
+ .shadow_reg_enable = true,
+ .pcie_l1_allowed = true,
+};
+
+static const struct iwl_ht_params iwl8000_ht_params = {
+ .ht40_bands = BIT(IEEE80211_BAND_2GHZ) | BIT(IEEE80211_BAND_5GHZ),
+};
+
+#define IWL_DEVICE_8000 \
+ .ucode_api_max = IWL8000_UCODE_API_MAX, \
+ .ucode_api_ok = IWL8000_UCODE_API_OK, \
+ .ucode_api_min = IWL8000_UCODE_API_MIN, \
+ .device_family = IWL_DEVICE_FAMILY_8000, \
+ .max_inst_size = IWL60_RTC_INST_SIZE, \
+ .max_data_size = IWL60_RTC_DATA_SIZE, \
+ .base_params = &iwl8000_base_params, \
+ .led_mode = IWL_LED_RF_STATE, \
+ .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_8000
+
+const struct iwl_cfg iwl8260_2ac_cfg = {
+ .name = "Intel(R) Dual Band Wireless AC 8260",
+ .fw_name_pre = IWL8000_FW_PRE,
+ IWL_DEVICE_8000,
+ .ht_params = &iwl8000_ht_params,
+ .nvm_ver = IWL8000_NVM_VERSION,
+ .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
+};
+
+const struct iwl_cfg iwl8260_n_cfg = {
+ .name = "Intel(R) Dual Band Wireless-AC 8260",
+ .fw_name_pre = IWL8000_FW_PRE,
+ IWL_DEVICE_8000,
+ .ht_params = &iwl8000_ht_params,
+ .nvm_ver = IWL8000_NVM_VERSION,
+ .nvm_calib_ver = IWL8000_TX_POWER_VERSION,
+};
+
+MODULE_FIRMWARE(IWL8000_MODULE_FIRMWARE(IWL8000_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index 1ced525..13ec566 100644
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -84,6 +84,7 @@ enum iwl_device_family {
IWL_DEVICE_FAMILY_6050,
IWL_DEVICE_FAMILY_6150,
IWL_DEVICE_FAMILY_7000,
+ IWL_DEVICE_FAMILY_8000,
};
/*
@@ -192,6 +193,15 @@ struct iwl_eeprom_params {
bool enhanced_txpower;
};
+/* Tx-backoff power threshold
+ * @pwr: The power limit in mw
+ * @backoff: The tx-backoff in uSec
+ */
+struct iwl_pwr_tx_backoff {
+ u32 pwr;
+ u32 backoff;
+};
+
/**
* struct iwl_cfg
* @name: Offical name of the device
@@ -217,6 +227,9 @@ struct iwl_eeprom_params {
* @high_temp: Is this NIC is designated to be in high temperature.
* @host_interrupt_operation_mode: device needs host interrupt operation
* mode set
+ * @d0i3: device uses d0i3 instead of d3
+ * @nvm_hw_section_num: the ID of the HW NVM section
+ * @pwr_tx_backoffs: translation table between power limits and backoffs
*
* We enable the driver to be backward compatible wrt. hardware features.
* API differences in uCode shouldn't be handled here but through TLVs
@@ -247,6 +260,9 @@ struct iwl_cfg {
const bool internal_wimax_coex;
const bool host_interrupt_operation_mode;
bool high_temp;
+ bool d0i3;
+ u8 nvm_hw_section_num;
+ const struct iwl_pwr_tx_backoff *pwr_tx_backoffs;
};
/*
@@ -307,6 +323,8 @@ extern const struct iwl_cfg iwl3160_n_cfg;
extern const struct iwl_cfg iwl7265_2ac_cfg;
extern const struct iwl_cfg iwl7265_2n_cfg;
extern const struct iwl_cfg iwl7265_n_cfg;
+extern const struct iwl_cfg iwl8260_2ac_cfg;
+extern const struct iwl_cfg iwl8260_n_cfg;
#endif /* CONFIG_IWLMVM */
#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index 9d32551..f13dec9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -395,38 +395,6 @@
#define CSR_DRAM_INT_TBL_ENABLE (1 << 31)
#define CSR_DRAM_INIT_TBL_WRAP_CHECK (1 << 27)
-/* SECURE boot registers */
-#define CSR_SECURE_BOOT_CONFIG_ADDR (0x100)
-enum secure_boot_config_reg {
- CSR_SECURE_BOOT_CONFIG_INSPECTOR_BURNED_IN_OTP = 0x00000001,
- CSR_SECURE_BOOT_CONFIG_INSPECTOR_NOT_REQ = 0x00000002,
-};
-
-#define CSR_SECURE_BOOT_CPU1_STATUS_ADDR (0x100)
-#define CSR_SECURE_BOOT_CPU2_STATUS_ADDR (0x100)
-enum secure_boot_status_reg {
- CSR_SECURE_BOOT_CPU_STATUS_VERF_STATUS = 0x00000003,
- CSR_SECURE_BOOT_CPU_STATUS_VERF_COMPLETED = 0x00000002,
- CSR_SECURE_BOOT_CPU_STATUS_VERF_SUCCESS = 0x00000004,
- CSR_SECURE_BOOT_CPU_STATUS_VERF_FAIL = 0x00000008,
- CSR_SECURE_BOOT_CPU_STATUS_SIGN_VERF_FAIL = 0x00000010,
-};
-
-#define CSR_UCODE_LOAD_STATUS_ADDR (0x100)
-enum secure_load_status_reg {
- CSR_CPU_STATUS_LOADING_STARTED = 0x00000001,
- CSR_CPU_STATUS_LOADING_COMPLETED = 0x00000002,
- CSR_CPU_STATUS_NUM_OF_LAST_COMPLETED = 0x000000F8,
- CSR_CPU_STATUS_NUM_OF_LAST_LOADED_BLOCK = 0x0000FF00,
-};
-
-#define CSR_SECURE_INSPECTOR_CODE_ADDR (0x100)
-#define CSR_SECURE_INSPECTOR_DATA_ADDR (0x100)
-
-#define CSR_SECURE_TIME_OUT (100)
-
-#define FH_TCSR_0_REG0 (0x1D00)
-
/*
* HBUS (Host-side Bus)
*
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index a75aac9..c8cbdbe 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -126,6 +126,7 @@ do { \
/* 0x00000F00 - 0x00000100 */
#define IWL_DL_POWER 0x00000100
#define IWL_DL_TEMP 0x00000200
+#define IWL_DL_RPM 0x00000400
#define IWL_DL_SCAN 0x00000800
/* 0x0000F000 - 0x00001000 */
#define IWL_DL_ASSOC 0x00001000
@@ -189,5 +190,6 @@ do { \
#define IWL_DEBUG_RADIO(p, f, a...) IWL_DEBUG(p, IWL_DL_RADIO, f, ## a)
#define IWL_DEBUG_POWER(p, f, a...) IWL_DEBUG(p, IWL_DL_POWER, f, ## a)
#define IWL_DEBUG_11H(p, f, a...) IWL_DEBUG(p, IWL_DL_11H, f, ## a)
+#define IWL_DEBUG_RPM(p, f, a...) IWL_DEBUG(p, IWL_DL_RPM, f, ## a)
#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index 7510355..bcfdcfb 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -128,7 +128,7 @@ struct iwl_drv {
const struct iwl_cfg *cfg;
int fw_index; /* firmware we're trying to load */
- char firmware_name[25]; /* name of firmware file to load */
+ char firmware_name[32]; /* name of firmware file to load */
struct completion request_firmware_complete;
@@ -237,7 +237,8 @@ static int iwl_request_firmware(struct iwl_drv *drv, bool first)
return -ENOENT;
}
- sprintf(drv->firmware_name, "%s%s%s", name_pre, tag, ".ucode");
+ snprintf(drv->firmware_name, sizeof(drv->firmware_name), "%s%s.ucode",
+ name_pre, tag);
IWL_DEBUG_INFO(drv, "attempting to load firmware %s'%s'\n",
(drv->fw_index == UCODE_EXPERIMENTAL_INDEX)
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
index 5f1493c..f80ba58 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw.h
@@ -95,6 +95,8 @@
* @IWL_UCODE_TLV_FLAGS_P2P_PS: P2P client power save is supported (only on a
* single bound interface).
* @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save
+ * @IWL_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering.
+ * @IWL_UCODE_TLV_FLAGS_GO_UAPSD: AP/GO interfaces support uAPSD clients
*/
enum iwl_ucode_tlv_flag {
IWL_UCODE_TLV_FLAGS_PAN = BIT(0),
@@ -119,6 +121,8 @@ enum iwl_ucode_tlv_flag {
IWL_UCODE_TLV_FLAGS_P2P_PS = BIT(21),
IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT = BIT(24),
IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD = BIT(26),
+ IWL_UCODE_TLV_FLAGS_BCAST_FILTERING = BIT(29),
+ IWL_UCODE_TLV_FLAGS_GO_UAPSD = BIT(30),
};
/* The default calibrate table size if not specified by firmware file */
@@ -160,8 +164,7 @@ enum iwl_ucode_sec {
* For 16.0 uCode and above, there is no differentiation between sections,
* just an offset to the HW address.
*/
-#define IWL_UCODE_SECTION_MAX 6
-#define IWL_UCODE_FIRST_SECTION_OF_SECOND_CPU (IWL_UCODE_SECTION_MAX/2)
+#define IWL_UCODE_SECTION_MAX 12
struct iwl_ucode_capabilities {
u32 max_probe_length;
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.c b/drivers/net/wireless/iwlwifi/iwl-io.c
index f98175a..07372f2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.c
+++ b/drivers/net/wireless/iwlwifi/iwl-io.c
@@ -130,6 +130,21 @@ void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
}
IWL_EXPORT_SYMBOL(iwl_write_prph);
+int iwl_poll_prph_bit(struct iwl_trans *trans, u32 addr,
+ u32 bits, u32 mask, int timeout)
+{
+ int t = 0;
+
+ do {
+ if ((iwl_read_prph(trans, addr) & mask) == (bits & mask))
+ return t;
+ udelay(IWL_POLL_INTERVAL);
+ t += IWL_POLL_INTERVAL;
+ } while (t < timeout);
+
+ return -ETIMEDOUT;
+}
+
void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask)
{
unsigned long flags;
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index c339c1b..9e81b23 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -72,6 +72,8 @@ void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value);
u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs);
void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val);
+int iwl_poll_prph_bit(struct iwl_trans *trans, u32 addr,
+ u32 bits, u32 mask, int timeout);
void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask);
void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs,
u32 bits, u32 mask);
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
index 725e954d..53b9cad 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
@@ -402,11 +402,7 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
iwl_init_sbands(dev, cfg, data, nvm_sw, sku & NVM_SKU_CAP_11AC_ENABLE,
tx_chains, rx_chains);
- data->calib_version = 255; /* TODO:
- this value will prevent some checks from
- failing, we need to check if this
- field is still needed, and if it does,
- where is it in the NVM*/
+ data->calib_version = 255;
return data;
}
diff --git a/drivers/net/wireless/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
index b5be51f..5d78207 100644
--- a/drivers/net/wireless/iwlwifi/iwl-op-mode.h
+++ b/drivers/net/wireless/iwlwifi/iwl-op-mode.h
@@ -131,6 +131,8 @@ struct iwl_cfg;
* @nic_config: configure NIC, called before firmware is started.
* May sleep
* @wimax_active: invoked when WiMax becomes active. May sleep
+ * @enter_d0i3: configure the fw to enter d0i3. May sleep.
+ * @exit_d0i3: configure the fw to exit d0i3. May sleep.
*/
struct iwl_op_mode_ops {
struct iwl_op_mode *(*start)(struct iwl_trans *trans,
@@ -148,6 +150,8 @@ struct iwl_op_mode_ops {
void (*cmd_queue_full)(struct iwl_op_mode *op_mode);
void (*nic_config)(struct iwl_op_mode *op_mode);
void (*wimax_active)(struct iwl_op_mode *op_mode);
+ int (*enter_d0i3)(struct iwl_op_mode *op_mode);
+ int (*exit_d0i3)(struct iwl_op_mode *op_mode);
};
int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops);
@@ -155,7 +159,7 @@ void iwl_opmode_deregister(const char *name);
/**
* struct iwl_op_mode - operational mode
- * @ops - pointer to its own ops
+ * @ops: pointer to its own ops
*
* This holds an implementation of the mac80211 / fw API.
*/
@@ -226,4 +230,22 @@ static inline void iwl_op_mode_wimax_active(struct iwl_op_mode *op_mode)
op_mode->ops->wimax_active(op_mode);
}
+static inline int iwl_op_mode_enter_d0i3(struct iwl_op_mode *op_mode)
+{
+ might_sleep();
+
+ if (!op_mode->ops->enter_d0i3)
+ return 0;
+ return op_mode->ops->enter_d0i3(op_mode);
+}
+
+static inline int iwl_op_mode_exit_d0i3(struct iwl_op_mode *op_mode)
+{
+ might_sleep();
+
+ if (!op_mode->ops->exit_d0i3)
+ return 0;
+ return op_mode->ops->exit_d0i3(op_mode);
+}
+
#endif /* __iwl_op_mode_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.c b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
index fa77d63..b761ac4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-phy-db.c
+++ b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
@@ -72,7 +72,7 @@
#include "iwl-trans.h"
#define CHANNEL_NUM_SIZE 4 /* num of channels in calib_ch size */
-#define IWL_NUM_PAPD_CH_GROUPS 4
+#define IWL_NUM_PAPD_CH_GROUPS 7
#define IWL_NUM_TXP_CH_GROUPS 9
struct iwl_phy_db_entry {
@@ -383,7 +383,7 @@ static int iwl_phy_db_send_all_channel_groups(
if (!entry)
return -EINVAL;
- if (WARN_ON_ONCE(!entry->size))
+ if (!entry->size)
continue;
/* Send the requested PHY DB section */
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index 100bd0d..9c90186 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -105,6 +105,13 @@
/* Device NMI register */
#define DEVICE_SET_NMI_REG 0x00a01c30
+/*
+ * Device reset for family 8000
+ * write to bit 24 in order to reset the CPU
+*/
+#define RELEASE_CPU_RESET (0x300C)
+#define RELEASE_CPU_RESET_BIT BIT(24)
+
/*****************************************************************************
* 7000/3000 series SHR DTS addresses *
*****************************************************************************/
@@ -281,4 +288,43 @@ static inline unsigned int SCD_QUEUE_STATUS_BITS(unsigned int chnl)
#define OSC_CLK (0xa04068)
#define OSC_CLK_FORCE_CONTROL (0x8)
+/* SECURE boot registers */
+#define LMPM_SECURE_BOOT_CONFIG_ADDR (0x100)
+enum secure_boot_config_reg {
+ LMPM_SECURE_BOOT_CONFIG_INSPECTOR_BURNED_IN_OTP = 0x00000001,
+ LMPM_SECURE_BOOT_CONFIG_INSPECTOR_NOT_REQ = 0x00000002,
+};
+
+#define LMPM_SECURE_BOOT_CPU1_STATUS_ADDR (0x1E30)
+#define LMPM_SECURE_BOOT_CPU2_STATUS_ADDR (0x1E34)
+enum secure_boot_status_reg {
+ LMPM_SECURE_BOOT_CPU_STATUS_VERF_STATUS = 0x00000001,
+ LMPM_SECURE_BOOT_CPU_STATUS_VERF_COMPLETED = 0x00000002,
+ LMPM_SECURE_BOOT_CPU_STATUS_VERF_SUCCESS = 0x00000004,
+ LMPM_SECURE_BOOT_CPU_STATUS_VERF_FAIL = 0x00000008,
+ LMPM_SECURE_BOOT_CPU_STATUS_SIGN_VERF_FAIL = 0x00000010,
+ LMPM_SECURE_BOOT_STATUS_SUCCESS = 0x00000003,
+};
+
+#define CSR_UCODE_LOAD_STATUS_ADDR (0x1E70)
+enum secure_load_status_reg {
+ LMPM_CPU_UCODE_LOADING_STARTED = 0x00000001,
+ LMPM_CPU_HDRS_LOADING_COMPLETED = 0x00000003,
+ LMPM_CPU_UCODE_LOADING_COMPLETED = 0x00000007,
+ LMPM_CPU_STATUS_NUM_OF_LAST_COMPLETED = 0x000000F8,
+ LMPM_CPU_STATUS_NUM_OF_LAST_LOADED_BLOCK = 0x0000FF00,
+};
+
+#define LMPM_SECURE_INSPECTOR_CODE_ADDR (0x1E38)
+#define LMPM_SECURE_INSPECTOR_DATA_ADDR (0x1E3C)
+#define LMPM_SECURE_UCODE_LOAD_CPU1_HDR_ADDR (0x1E78)
+#define LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR (0x1E7C)
+
+#define LMPM_SECURE_INSPECTOR_CODE_MEM_SPACE (0x400000)
+#define LMPM_SECURE_INSPECTOR_DATA_MEM_SPACE (0x402000)
+#define LMPM_SECURE_CPU1_HDR_MEM_SPACE (0x420000)
+#define LMPM_SECURE_CPU2_HDR_MEM_SPACE (0x420400)
+
+#define LMPM_SECURE_TIME_OUT (100)
+
#endif /* __iwl_prph_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 1f065cf..7b19274 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -193,12 +193,23 @@ static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
* @CMD_ASYNC: Return right away and don't wait for the response
* @CMD_WANT_SKB: valid only with CMD_SYNC. The caller needs the buffer of the
* response. The caller needs to call iwl_free_resp when done.
+ * @CMD_HIGH_PRIO: The command is high priority - it goes to the front of the
+ * command queue, but after other high priority commands. valid only
+ * with CMD_ASYNC.
+ * @CMD_SEND_IN_IDLE: The command should be sent even when the trans is idle.
+ * @CMD_MAKE_TRANS_IDLE: The command response should mark the trans as idle.
+ * @CMD_WAKE_UP_TRANS: The command response should wake up the trans
+ * (i.e. mark it as non-idle).
*/
enum CMD_MODE {
CMD_SYNC = 0,
CMD_ASYNC = BIT(0),
CMD_WANT_SKB = BIT(1),
CMD_SEND_IN_RFKILL = BIT(2),
+ CMD_HIGH_PRIO = BIT(3),
+ CMD_SEND_IN_IDLE = BIT(4),
+ CMD_MAKE_TRANS_IDLE = BIT(5),
+ CMD_WAKE_UP_TRANS = BIT(6),
};
#define DEF_CMD_PAYLOAD_SIZE 320
@@ -335,6 +346,9 @@ enum iwl_d3_status {
* @STATUS_INT_ENABLED: interrupts are enabled
* @STATUS_RFKILL: the HW RFkill switch is in KILL position
* @STATUS_FW_ERROR: the fw is in error state
+ * @STATUS_TRANS_GOING_IDLE: shutting down the trans, only special commands
+ * are sent
+ * @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent
*/
enum iwl_trans_status {
STATUS_SYNC_HCMD_ACTIVE,
@@ -343,6 +357,8 @@ enum iwl_trans_status {
STATUS_INT_ENABLED,
STATUS_RFKILL,
STATUS_FW_ERROR,
+ STATUS_TRANS_GOING_IDLE,
+ STATUS_TRANS_IDLE,
};
/**
@@ -443,6 +459,11 @@ struct iwl_trans;
* @release_nic_access: let the NIC go to sleep. The "flags" parameter
* must be the same one that was sent before to the grab_nic_access.
* @set_bits_mask - set SRAM register according to value and mask.
+ * @ref: grab a reference to the transport/FW layers, disallowing
+ * certain low power states
+ * @unref: release a reference previously taken with @ref. Note that
+ * initially the reference count is 1, making an initial @unref
+ * necessary to allow low power states.
*/
struct iwl_trans_ops {
@@ -489,6 +510,8 @@ struct iwl_trans_ops {
unsigned long *flags);
void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask,
u32 value);
+ void (*ref)(struct iwl_trans *trans);
+ void (*unref)(struct iwl_trans *trans);
};
/**
@@ -523,6 +546,7 @@ enum iwl_trans_state {
* starting the firmware, used for tracing
* @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the
* start of the 802.11 header in the @rx_mpdu_cmd
+ * @dflt_pwr_limit: default power limit fetched from the platform (ACPI)
*/
struct iwl_trans {
const struct iwl_trans_ops *ops;
@@ -551,6 +575,8 @@ struct iwl_trans {
struct lockdep_map sync_cmd_lockdep_map;
#endif
+ u64 dflt_pwr_limit;
+
/* pointer to trans specific struct */
/*Ensure that this pointer will always be aligned to sizeof pointer */
char trans_specific[0] __aligned(sizeof(void *));
@@ -627,6 +653,18 @@ static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
return trans->ops->d3_resume(trans, status, test);
}
+static inline void iwl_trans_ref(struct iwl_trans *trans)
+{
+ if (trans->ops->ref)
+ trans->ops->ref(trans);
+}
+
+static inline void iwl_trans_unref(struct iwl_trans *trans)
+{
+ if (trans->ops->unref)
+ trans->ops->unref(trans);
+}
+
static inline int iwl_trans_send_cmd(struct iwl_trans *trans,
struct iwl_host_cmd *cmd)
{
diff --git a/drivers/net/wireless/iwlwifi/mvm/Makefile b/drivers/net/wireless/iwlwifi/mvm/Makefile
index f98ec2b..41d390f 100644
--- a/drivers/net/wireless/iwlwifi/mvm/Makefile
+++ b/drivers/net/wireless/iwlwifi/mvm/Makefile
@@ -2,7 +2,7 @@ obj-$(CONFIG_IWLMVM) += iwlmvm.o
iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o
iwlmvm-y += utils.o rx.o tx.o binding.o quota.o sta.o sf.o
iwlmvm-y += scan.o time-event.o rs.o
-iwlmvm-y += power.o power_legacy.o bt-coex.o
+iwlmvm-y += power.o bt-coex.o
iwlmvm-y += led.o tt.o
iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o
iwlmvm-$(CONFIG_PM_SLEEP) += d3.o
diff --git a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
index 76cde6c..38a54a3 100644
--- a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
@@ -378,7 +378,6 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
flags = iwlwifi_mod_params.bt_coex_active ?
BT_COEX_NW : BT_COEX_DISABLE;
- flags |= BT_CH_PRIMARY_EN | BT_CH_SECONDARY_EN | BT_SYNC_2_BT_DISABLE;
bt_cmd->flags = cpu_to_le32(flags);
bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_ENABLE |
@@ -399,6 +398,9 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
BT_VALID_TXRX_MAX_FREQ_0 |
BT_VALID_SYNC_TO_SCO);
+ if (IWL_MVM_BT_COEX_SYNC2SCO)
+ bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO);
+
if (mvm->cfg->bt_shared_single_ant)
memcpy(&bt_cmd->decision_lut, iwl_single_shared_ant,
sizeof(iwl_single_shared_ant));
@@ -489,8 +491,7 @@ static int iwl_mvm_bt_udpate_ctrl_kill_msk(struct iwl_mvm *mvm,
return ret;
}
-static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
- bool enable)
+int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, bool enable)
{
struct iwl_bt_coex_cmd *bt_cmd;
/* Send ASYNC since this can be sent from an atomic context */
@@ -500,25 +501,16 @@ static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
.dataflags = { IWL_HCMD_DFL_DUP, },
.flags = CMD_ASYNC,
};
-
- struct ieee80211_sta *sta;
struct iwl_mvm_sta *mvmsta;
int ret;
- if (sta_id == IWL_MVM_STATION_COUNT)
- return 0;
-
- sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
- lockdep_is_held(&mvm->mutex));
-
- /* This can happen if the station has been removed right now */
- if (IS_ERR_OR_NULL(sta))
+ mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
+ if (!mvmsta)
return 0;
- mvmsta = iwl_mvm_sta_from_mac80211(sta);
-
/* nothing to do */
- if (mvmsta->bt_reduced_txpower == enable)
+ if (mvmsta->bt_reduced_txpower_dbg ||
+ mvmsta->bt_reduced_txpower == enable)
return 0;
bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_ATOMIC);
@@ -552,6 +544,7 @@ struct iwl_bt_iterator_data {
bool reduced_tx_power;
struct ieee80211_chanctx_conf *primary;
struct ieee80211_chanctx_conf *secondary;
+ bool primary_ll;
};
static inline
@@ -577,72 +570,113 @@ static void iwl_mvm_bt_notif_iterator(void *_data, u8 *mac,
struct iwl_mvm *mvm = data->mvm;
struct ieee80211_chanctx_conf *chanctx_conf;
enum ieee80211_smps_mode smps_mode;
+ u32 bt_activity_grading;
int ave_rssi;
lockdep_assert_held(&mvm->mutex);
- if (vif->type != NL80211_IFTYPE_STATION &&
- vif->type != NL80211_IFTYPE_AP)
- return;
+ switch (vif->type) {
+ case NL80211_IFTYPE_STATION:
+ /* default smps_mode for BSS / P2P client is AUTOMATIC */
+ smps_mode = IEEE80211_SMPS_AUTOMATIC;
+ data->num_bss_ifaces++;
- smps_mode = IEEE80211_SMPS_AUTOMATIC;
+ /*
+ * Count unassoc BSSes, relax SMSP constraints
+ * and disable reduced Tx Power
+ */
+ if (!vif->bss_conf.assoc) {
+ iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
+ smps_mode);
+ if (iwl_mvm_bt_coex_reduced_txp(mvm,
+ mvmvif->ap_sta_id,
+ false))
+ IWL_ERR(mvm, "Couldn't send BT_CONFIG cmd\n");
+ return;
+ }
+ break;
+ case NL80211_IFTYPE_AP:
+ /* default smps_mode for AP / GO is OFF */
+ smps_mode = IEEE80211_SMPS_OFF;
+ if (!mvmvif->ap_ibss_active) {
+ iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
+ smps_mode);
+ return;
+ }
+
+ /* the Ack / Cts kill mask must be default if AP / GO */
+ data->reduced_tx_power = false;
+ break;
+ default:
+ return;
+ }
chanctx_conf = rcu_dereference(vif->chanctx_conf);
/* If channel context is invalid or not on 2.4GHz .. */
if ((!chanctx_conf ||
chanctx_conf->def.chan->band != IEEE80211_BAND_2GHZ)) {
- /* ... and it is an associated STATION, relax constraints */
- if (vif->type == NL80211_IFTYPE_STATION && vif->bss_conf.assoc)
- iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
- smps_mode);
- iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
+ /* ... relax constraints and disable rssi events */
+ iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX,
+ smps_mode);
+ if (vif->type == NL80211_IFTYPE_STATION)
+ iwl_mvm_bt_coex_enable_rssi_event(mvm, vif, false, 0);
return;
}
- /* SoftAP / GO will always be primary */
- if (vif->type == NL80211_IFTYPE_AP) {
- if (!mvmvif->ap_ibss_active)
- return;
+ bt_activity_grading = le32_to_cpu(data->notif->bt_activity_grading);
+ if (bt_activity_grading >= BT_HIGH_TRAFFIC)
+ smps_mode = IEEE80211_SMPS_STATIC;
+ else if (bt_activity_grading >= BT_LOW_TRAFFIC)
+ smps_mode = vif->type == NL80211_IFTYPE_AP ?
+ IEEE80211_SMPS_OFF :
+ IEEE80211_SMPS_DYNAMIC;
+ IWL_DEBUG_COEX(data->mvm,
+ "mac %d: bt_status %d bt_activity_grading %d smps_req %d\n",
+ mvmvif->id, data->notif->bt_status, bt_activity_grading,
+ smps_mode);
- /* the Ack / Cts kill mask must be default if AP / GO */
- data->reduced_tx_power = false;
+ iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, smps_mode);
- if (chanctx_conf == data->primary)
- return;
+ /* low latency is always primary */
+ if (iwl_mvm_vif_low_latency(mvmvif)) {
+ data->primary_ll = true;
- /* downgrade the current primary no matter what its type is */
data->secondary = data->primary;
data->primary = chanctx_conf;
- return;
}
- data->num_bss_ifaces++;
+ if (vif->type == NL80211_IFTYPE_AP) {
+ if (!mvmvif->ap_ibss_active)
+ return;
- /* we are now a STA / P2P Client, and take associated ones only */
- if (!vif->bss_conf.assoc)
+ if (chanctx_conf == data->primary)
+ return;
+
+ if (!data->primary_ll) {
+ /*
+ * downgrade the current primary no matter what its
+ * type is.
+ */
+ data->secondary = data->primary;
+ data->primary = chanctx_conf;
+ } else {
+ /* there is low latency vif - we will be secondary */
+ data->secondary = chanctx_conf;
+ }
return;
+ }
- /* STA / P2P Client, try to be primary if first vif */
+ /*
+ * STA / P2P Client, try to be primary if first vif. If we are in low
+ * latency mode, we are already in primary and just don't do much
+ */
if (!data->primary || data->primary == chanctx_conf)
data->primary = chanctx_conf;
else if (!data->secondary)
/* if secondary is not NULL, it might be a GO */
data->secondary = chanctx_conf;
- if (le32_to_cpu(data->notif->bt_activity_grading) >= BT_HIGH_TRAFFIC)
- smps_mode = IEEE80211_SMPS_STATIC;
- else if (le32_to_cpu(data->notif->bt_activity_grading) >=
- BT_LOW_TRAFFIC)
- smps_mode = IEEE80211_SMPS_DYNAMIC;
-
- IWL_DEBUG_COEX(data->mvm,
- "mac %d: bt_status %d bt_activity_grading %d smps_req %d\n",
- mvmvif->id, data->notif->bt_status,
- data->notif->bt_activity_grading, smps_mode);
-
- iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_BT_COEX, smps_mode);
-
/* don't reduce the Tx power if in loose scheme */
if (iwl_get_coex_type(mvm, vif) == BT_COEX_LOOSE_LUT ||
mvm->cfg->bt_shared_single_ant) {
diff --git a/drivers/net/wireless/iwlwifi/mvm/constants.h b/drivers/net/wireless/iwlwifi/mvm/constants.h
index 0368576..2d133b1 100644
--- a/drivers/net/wireless/iwlwifi/mvm/constants.h
+++ b/drivers/net/wireless/iwlwifi/mvm/constants.h
@@ -78,5 +78,9 @@
#define IWL_MVM_PS_SNOOZE_INTERVAL 25
#define IWL_MVM_PS_SNOOZE_WINDOW 50
#define IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW 25
+#define IWL_MVM_LOWLAT_QUOTA_MIN_PERCENT 64
+#define IWL_MVM_LOWLAT_SINGLE_BINDING_MAXDUR 24 /* TU */
+#define IWL_MVM_LOWLAT_DUAL_BINDING_MAXDUR 24 /* TU */
+#define IWL_MVM_BT_COEX_SYNC2SCO 1
#endif /* __MVM_CONSTANTS_H */
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
index f36a7ee..b956e2f 100644
--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
@@ -963,7 +963,6 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
};
int ret, i;
int len __maybe_unused;
- u8 old_aux_sta_id, old_ap_sta_id = IWL_MVM_STATION_COUNT;
if (!wowlan) {
/*
@@ -980,8 +979,6 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex);
- old_aux_sta_id = mvm->aux_sta.sta_id;
-
/* see if there's only a single BSS vif and it's associated */
ieee80211_iterate_active_interfaces_atomic(
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
@@ -1067,16 +1064,6 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
iwl_trans_stop_device(mvm->trans);
/*
- * The D3 firmware still hardcodes the AP station ID for the
- * BSS we're associated with as 0. Store the real STA ID here
- * and assign 0. When we leave this function, we'll restore
- * the original value for the resume code.
- */
- old_ap_sta_id = mvm_ap_sta->sta_id;
- mvm_ap_sta->sta_id = 0;
- mvmvif->ap_sta_id = 0;
-
- /*
* Set the HW restart bit -- this is mostly true as we're
* going to load new firmware and reprogram that, though
* the reprogramming is going to be manual to avoid adding
@@ -1096,16 +1083,6 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
mvm->ptk_ivlen = 0;
mvm->ptk_icvlen = 0;
- /*
- * The D3 firmware still hardcodes the AP station ID for the
- * BSS we're associated with as 0. As a result, we have to move
- * the auxiliary station to ID 1 so the ID 0 remains free for
- * the AP station for later.
- * We set the sta_id to 1 here, and reset it to its previous
- * value (that we stored above) later.
- */
- mvm->aux_sta.sta_id = 1;
-
ret = iwl_mvm_load_d3_fw(mvm);
if (ret)
goto out;
@@ -1191,11 +1168,11 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
if (ret)
goto out;
- ret = iwl_mvm_power_update_device_mode(mvm);
+ ret = iwl_mvm_power_update_device(mvm);
if (ret)
goto out;
- ret = iwl_mvm_power_update_mode(mvm, vif);
+ ret = iwl_mvm_power_update_mac(mvm, vif);
if (ret)
goto out;
@@ -1222,10 +1199,6 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
iwl_trans_d3_suspend(mvm->trans, test);
out:
- mvm->aux_sta.sta_id = old_aux_sta_id;
- mvm_ap_sta->sta_id = old_ap_sta_id;
- mvmvif->ap_sta_id = old_ap_sta_id;
-
if (ret < 0)
ieee80211_restart_hw(mvm->hw);
out_noreset:
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
index 0e29cd8..29b4396 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs-vif.c
@@ -185,7 +185,7 @@ static ssize_t iwl_dbgfs_pm_params_write(struct ieee80211_vif *vif, char *buf,
mutex_lock(&mvm->mutex);
iwl_dbgfs_update_pm(mvm, vif, param, val);
- ret = iwl_mvm_power_update_mode(mvm, vif);
+ ret = iwl_mvm_power_update_mac(mvm, vif);
mutex_unlock(&mvm->mutex);
return ret ?: count;
@@ -202,7 +202,7 @@ static ssize_t iwl_dbgfs_pm_params_read(struct file *file,
int bufsz = sizeof(buf);
int pos;
- pos = iwl_mvm_power_dbgfs_read(mvm, vif, buf, bufsz);
+ pos = iwl_mvm_power_mac_dbgfs_read(mvm, vif, buf, bufsz);
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
@@ -225,6 +225,29 @@ static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
ap_sta_id = mvmvif->ap_sta_id;
+ switch (ieee80211_vif_type_p2p(vif)) {
+ case NL80211_IFTYPE_ADHOC:
+ pos += scnprintf(buf+pos, bufsz-pos, "type: ibss\n");
+ break;
+ case NL80211_IFTYPE_STATION:
+ pos += scnprintf(buf+pos, bufsz-pos, "type: bss\n");
+ break;
+ case NL80211_IFTYPE_AP:
+ pos += scnprintf(buf+pos, bufsz-pos, "type: ap\n");
+ break;
+ case NL80211_IFTYPE_P2P_CLIENT:
+ pos += scnprintf(buf+pos, bufsz-pos, "type: p2p client\n");
+ break;
+ case NL80211_IFTYPE_P2P_GO:
+ pos += scnprintf(buf+pos, bufsz-pos, "type: p2p go\n");
+ break;
+ case NL80211_IFTYPE_P2P_DEVICE:
+ pos += scnprintf(buf+pos, bufsz-pos, "type: p2p dev\n");
+ break;
+ default:
+ break;
+ }
+
pos += scnprintf(buf+pos, bufsz-pos, "mac id/color: %d / %d\n",
mvmvif->id, mvmvif->color);
pos += scnprintf(buf+pos, bufsz-pos, "bssid: %pM\n",
@@ -249,9 +272,10 @@ static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
pos += scnprintf(buf+pos, bufsz-pos,
- "ap_sta_id %d - reduced Tx power %d\n",
+ "ap_sta_id %d - reduced Tx power %d force %d\n",
ap_sta_id,
- mvm_sta->bt_reduced_txpower);
+ mvm_sta->bt_reduced_txpower,
+ mvm_sta->bt_reduced_txpower_dbg);
}
}
@@ -269,6 +293,36 @@ static ssize_t iwl_dbgfs_mac_params_read(struct file *file,
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
+static ssize_t iwl_dbgfs_reduced_txp_write(struct ieee80211_vif *vif,
+ char *buf, size_t count,
+ loff_t *ppos)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ struct iwl_mvm_sta *mvmsta;
+ bool reduced_tx_power;
+ int ret;
+
+ if (mvmvif->ap_sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))
+ return -ENOTCONN;
+
+ if (strtobool(buf, &reduced_tx_power) != 0)
+ return -EINVAL;
+
+ mutex_lock(&mvm->mutex);
+
+ mvmsta = iwl_mvm_sta_from_staid_protected(mvm, mvmvif->ap_sta_id);
+ mvmsta->bt_reduced_txpower_dbg = false;
+ ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id,
+ reduced_tx_power);
+ if (!ret)
+ mvmsta->bt_reduced_txpower_dbg = true;
+
+ mutex_unlock(&mvm->mutex);
+
+ return ret ? : count;
+}
+
static void iwl_dbgfs_update_bf(struct ieee80211_vif *vif,
enum iwl_dbgfs_bf_mask param, int value)
{
@@ -403,9 +457,9 @@ static ssize_t iwl_dbgfs_bf_params_write(struct ieee80211_vif *vif, char *buf,
mutex_lock(&mvm->mutex);
iwl_dbgfs_update_bf(vif, param, value);
if (param == MVM_DEBUGFS_BF_ENABLE_BEACON_FILTER && !value)
- ret = iwl_mvm_disable_beacon_filter(mvm, vif);
+ ret = iwl_mvm_disable_beacon_filter(mvm, vif, CMD_SYNC);
else
- ret = iwl_mvm_enable_beacon_filter(mvm, vif);
+ ret = iwl_mvm_enable_beacon_filter(mvm, vif, CMD_SYNC);
mutex_unlock(&mvm->mutex);
return ret ?: count;
@@ -460,6 +514,41 @@ static ssize_t iwl_dbgfs_bf_params_read(struct file *file,
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
+static ssize_t iwl_dbgfs_low_latency_write(struct ieee80211_vif *vif, char *buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mvm *mvm = mvmvif->mvm;
+ u8 value;
+ int ret;
+
+ ret = kstrtou8(buf, 0, &value);
+ if (ret)
+ return ret;
+ if (value > 1)
+ return -EINVAL;
+
+ mutex_lock(&mvm->mutex);
+ iwl_mvm_update_low_latency(mvm, vif, value);
+ mutex_unlock(&mvm->mutex);
+
+ return count;
+}
+
+static ssize_t iwl_dbgfs_low_latency_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_vif *vif = file->private_data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ char buf[3];
+
+ buf[0] = mvmvif->low_latency ? '1' : '0';
+ buf[1] = '\n';
+ buf[2] = '\0';
+ return simple_read_from_buffer(user_buf, count, ppos, buf, sizeof(buf));
+}
+
#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
_MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif)
#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
@@ -473,6 +562,8 @@ static ssize_t iwl_dbgfs_bf_params_read(struct file *file,
MVM_DEBUGFS_READ_FILE_OPS(mac_params);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(pm_params, 32);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(bf_params, 256);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(low_latency, 10);
+MVM_DEBUGFS_WRITE_FILE_OPS(reduced_txp, 10);
void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
@@ -496,15 +587,18 @@ void iwl_mvm_vif_dbgfs_register(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
return;
}
- if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
+ if ((mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT) &&
+ iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM &&
((vif->type == NL80211_IFTYPE_STATION && !vif->p2p) ||
(vif->type == NL80211_IFTYPE_STATION && vif->p2p &&
mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_P2P_PS)))
MVM_DEBUGFS_ADD_FILE_VIF(pm_params, mvmvif->dbgfs_dir, S_IWUSR |
S_IRUSR);
- MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir,
- S_IRUSR);
+ MVM_DEBUGFS_ADD_FILE_VIF(mac_params, mvmvif->dbgfs_dir, S_IRUSR);
+ MVM_DEBUGFS_ADD_FILE_VIF(reduced_txp, mvmvif->dbgfs_dir, S_IWUSR);
+ MVM_DEBUGFS_ADD_FILE_VIF(low_latency, mvmvif->dbgfs_dir,
+ S_IRUSR | S_IWUSR);
if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p &&
mvmvif == mvm->bf_allowed_vif)
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
index 369d4c9..6853e5e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
@@ -90,7 +90,7 @@ static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf,
static ssize_t iwl_dbgfs_sta_drain_write(struct iwl_mvm *mvm, char *buf,
size_t count, loff_t *ppos)
{
- struct ieee80211_sta *sta;
+ struct iwl_mvm_sta *mvmsta;
int sta_id, drain, ret;
if (!mvm->ucode_loaded || mvm->cur_ucode != IWL_UCODE_REGULAR)
@@ -105,13 +105,12 @@ static ssize_t iwl_dbgfs_sta_drain_write(struct iwl_mvm *mvm, char *buf,
mutex_lock(&mvm->mutex);
- sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
- lockdep_is_held(&mvm->mutex));
- if (IS_ERR_OR_NULL(sta))
+ mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
+
+ if (!mvmsta)
ret = -ENOENT;
else
- ret = iwl_mvm_drain_sta(mvm, (void *)sta->drv_priv, drain) ? :
- count;
+ ret = iwl_mvm_drain_sta(mvm, mvmsta, drain) ? : count;
mutex_unlock(&mvm->mutex);
@@ -251,7 +250,7 @@ static ssize_t iwl_dbgfs_disable_power_off_write(struct iwl_mvm *mvm, char *buf,
}
mutex_lock(&mvm->mutex);
- ret = iwl_mvm_power_update_device_mode(mvm);
+ ret = iwl_mvm_power_update_device(mvm);
mutex_unlock(&mvm->mutex);
return ret ?: count;
@@ -600,6 +599,187 @@ iwl_dbgfs_scan_ant_rxchain_write(struct iwl_mvm *mvm, char *buf,
return count;
}
+#define ADD_TEXT(...) pos += scnprintf(buf + pos, bufsz - pos, __VA_ARGS__)
+#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
+static ssize_t iwl_dbgfs_bcast_filters_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ struct iwl_bcast_filter_cmd cmd;
+ const struct iwl_fw_bcast_filter *filter;
+ char *buf;
+ int bufsz = 1024;
+ int i, j, pos = 0;
+ ssize_t ret;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ mutex_lock(&mvm->mutex);
+ if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) {
+ ADD_TEXT("None\n");
+ mutex_unlock(&mvm->mutex);
+ goto out;
+ }
+ mutex_unlock(&mvm->mutex);
+
+ for (i = 0; cmd.filters[i].attrs[0].mask; i++) {
+ filter = &cmd.filters[i];
+
+ ADD_TEXT("Filter [%d]:\n", i);
+ ADD_TEXT("\tDiscard=%d\n", filter->discard);
+ ADD_TEXT("\tFrame Type: %s\n",
+ filter->frame_type ? "IPv4" : "Generic");
+
+ for (j = 0; j < ARRAY_SIZE(filter->attrs); j++) {
+ const struct iwl_fw_bcast_filter_attr *attr;
+
+ attr = &filter->attrs[j];
+ if (!attr->mask)
+ break;
+
+ ADD_TEXT("\tAttr [%d]: offset=%d (from %s), mask=0x%x, value=0x%x reserved=0x%x\n",
+ j, attr->offset,
+ attr->offset_type ? "IP End" :
+ "Payload Start",
+ be32_to_cpu(attr->mask),
+ be32_to_cpu(attr->val),
+ le16_to_cpu(attr->reserved1));
+ }
+ }
+out:
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_bcast_filters_write(struct iwl_mvm *mvm, char *buf,
+ size_t count, loff_t *ppos)
+{
+ int pos, next_pos;
+ struct iwl_fw_bcast_filter filter = {};
+ struct iwl_bcast_filter_cmd cmd;
+ u32 filter_id, attr_id, mask, value;
+ int err = 0;
+
+ if (sscanf(buf, "%d %hhi %hhi %n", &filter_id, &filter.discard,
+ &filter.frame_type, &pos) != 3)
+ return -EINVAL;
+
+ if (filter_id >= ARRAY_SIZE(mvm->dbgfs_bcast_filtering.cmd.filters) ||
+ filter.frame_type > BCAST_FILTER_FRAME_TYPE_IPV4)
+ return -EINVAL;
+
+ for (attr_id = 0; attr_id < ARRAY_SIZE(filter.attrs);
+ attr_id++) {
+ struct iwl_fw_bcast_filter_attr *attr =
+ &filter.attrs[attr_id];
+
+ if (pos >= count)
+ break;
+
+ if (sscanf(&buf[pos], "%hhi %hhi %i %i %n",
+ &attr->offset, &attr->offset_type,
+ &mask, &value, &next_pos) != 4)
+ return -EINVAL;
+
+ attr->mask = cpu_to_be32(mask);
+ attr->val = cpu_to_be32(value);
+ if (mask)
+ filter.num_attrs++;
+
+ pos += next_pos;
+ }
+
+ mutex_lock(&mvm->mutex);
+ memcpy(&mvm->dbgfs_bcast_filtering.cmd.filters[filter_id],
+ &filter, sizeof(filter));
+
+ /* send updated bcast filtering configuration */
+ if (mvm->dbgfs_bcast_filtering.override &&
+ iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
+ err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, CMD_SYNC,
+ sizeof(cmd), &cmd);
+ mutex_unlock(&mvm->mutex);
+
+ return err ?: count;
+}
+
+static ssize_t iwl_dbgfs_bcast_filters_macs_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ struct iwl_bcast_filter_cmd cmd;
+ char *buf;
+ int bufsz = 1024;
+ int i, pos = 0;
+ ssize_t ret;
+
+ buf = kzalloc(bufsz, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ mutex_lock(&mvm->mutex);
+ if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) {
+ ADD_TEXT("None\n");
+ mutex_unlock(&mvm->mutex);
+ goto out;
+ }
+ mutex_unlock(&mvm->mutex);
+
+ for (i = 0; i < ARRAY_SIZE(cmd.macs); i++) {
+ const struct iwl_fw_bcast_mac *mac = &cmd.macs[i];
+
+ ADD_TEXT("Mac [%d]: discard=%d attached_filters=0x%x\n",
+ i, mac->default_discard, mac->attached_filters);
+ }
+out:
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+ kfree(buf);
+ return ret;
+}
+
+static ssize_t iwl_dbgfs_bcast_filters_macs_write(struct iwl_mvm *mvm,
+ char *buf, size_t count,
+ loff_t *ppos)
+{
+ struct iwl_bcast_filter_cmd cmd;
+ struct iwl_fw_bcast_mac mac = {};
+ u32 mac_id, attached_filters;
+ int err = 0;
+
+ if (!mvm->bcast_filters)
+ return -ENOENT;
+
+ if (sscanf(buf, "%d %hhi %i", &mac_id, &mac.default_discard,
+ &attached_filters) != 3)
+ return -EINVAL;
+
+ if (mac_id >= ARRAY_SIZE(cmd.macs) ||
+ mac.default_discard > 1 ||
+ attached_filters >= BIT(ARRAY_SIZE(cmd.filters)))
+ return -EINVAL;
+
+ mac.attached_filters = cpu_to_le16(attached_filters);
+
+ mutex_lock(&mvm->mutex);
+ memcpy(&mvm->dbgfs_bcast_filtering.cmd.macs[mac_id],
+ &mac, sizeof(mac));
+
+ /* send updated bcast filtering configuration */
+ if (mvm->dbgfs_bcast_filtering.override &&
+ iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
+ err = iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, CMD_SYNC,
+ sizeof(cmd), &cmd);
+ mutex_unlock(&mvm->mutex);
+
+ return err ?: count;
+}
+#endif
+
#ifdef CONFIG_PM_SLEEP
static ssize_t iwl_dbgfs_d3_sram_write(struct iwl_mvm *mvm, char *buf,
size_t count, loff_t *ppos)
@@ -658,15 +838,74 @@ static ssize_t iwl_dbgfs_d3_sram_read(struct file *file, char __user *user_buf,
}
#endif
+#define PRINT_MVM_REF(ref) do { \
+ if (test_bit(ref, mvm->ref_bitmap)) \
+ pos += scnprintf(buf + pos, bufsz - pos, \
+ "\t(0x%lx) %s\n", \
+ BIT(ref), #ref); \
+} while (0)
+
+static ssize_t iwl_dbgfs_d0i3_refs_read(struct file *file,
+ char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct iwl_mvm *mvm = file->private_data;
+ int pos = 0;
+ char buf[256];
+ const size_t bufsz = sizeof(buf);
+
+ pos += scnprintf(buf + pos, bufsz - pos, "taken mvm refs: 0x%lx\n",
+ mvm->ref_bitmap[0]);
+
+ PRINT_MVM_REF(IWL_MVM_REF_UCODE_DOWN);
+ PRINT_MVM_REF(IWL_MVM_REF_SCAN);
+ PRINT_MVM_REF(IWL_MVM_REF_ROC);
+ PRINT_MVM_REF(IWL_MVM_REF_P2P_CLIENT);
+ PRINT_MVM_REF(IWL_MVM_REF_AP_IBSS);
+ PRINT_MVM_REF(IWL_MVM_REF_USER);
+
+ return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
+}
+
+static ssize_t iwl_dbgfs_d0i3_refs_write(struct iwl_mvm *mvm, char *buf,
+ size_t count, loff_t *ppos)
+{
+ unsigned long value;
+ int ret;
+ bool taken;
+
+ ret = kstrtoul(buf, 10, &value);
+ if (ret < 0)
+ return ret;
+
+ mutex_lock(&mvm->mutex);
+
+ taken = test_bit(IWL_MVM_REF_USER, mvm->ref_bitmap);
+ if (value == 1 && !taken)
+ iwl_mvm_ref(mvm, IWL_MVM_REF_USER);
+ else if (value == 0 && taken)
+ iwl_mvm_unref(mvm, IWL_MVM_REF_USER);
+ else
+ ret = -EINVAL;
+
+ mutex_unlock(&mvm->mutex);
+
+ if (ret < 0)
+ return ret;
+ return count;
+}
+
#define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \
_MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm)
#define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \
_MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz, struct iwl_mvm)
-#define MVM_DEBUGFS_ADD_FILE(name, parent, mode) do { \
- if (!debugfs_create_file(#name, mode, parent, mvm, \
+#define MVM_DEBUGFS_ADD_FILE_ALIAS(alias, name, parent, mode) do { \
+ if (!debugfs_create_file(alias, mode, parent, mvm, \
&iwl_dbgfs_##name##_ops)) \
goto err; \
} while (0)
+#define MVM_DEBUGFS_ADD_FILE(name, parent, mode) \
+ MVM_DEBUGFS_ADD_FILE_ALIAS(#name, name, parent, mode)
/* Device wide debugfs entries */
MVM_DEBUGFS_WRITE_FILE_OPS(tx_flush, 16);
@@ -680,6 +919,12 @@ MVM_DEBUGFS_READ_FILE_OPS(fw_rx_stats);
MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart, 10);
MVM_DEBUGFS_WRITE_FILE_OPS(fw_nmi, 10);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(d0i3_refs, 8);
+
+#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256);
+MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters_macs, 256);
+#endif
#ifdef CONFIG_PM_SLEEP
MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram, 8);
@@ -687,6 +932,7 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(d3_sram, 8);
int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
{
+ struct dentry *bcast_dir __maybe_unused;
char buf[100];
mvm->debugfs_dir = dbgfs_dir;
@@ -705,6 +951,27 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
MVM_DEBUGFS_ADD_FILE(fw_nmi, mvm->debugfs_dir, S_IWUSR);
MVM_DEBUGFS_ADD_FILE(scan_ant_rxchain, mvm->debugfs_dir,
S_IWUSR | S_IRUSR);
+ MVM_DEBUGFS_ADD_FILE(d0i3_refs, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
+
+#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING) {
+ bcast_dir = debugfs_create_dir("bcast_filtering",
+ mvm->debugfs_dir);
+ if (!bcast_dir)
+ goto err;
+
+ if (!debugfs_create_bool("override", S_IRUSR | S_IWUSR,
+ bcast_dir,
+ &mvm->dbgfs_bcast_filtering.override))
+ goto err;
+
+ MVM_DEBUGFS_ADD_FILE_ALIAS("filters", bcast_filters,
+ bcast_dir, S_IWUSR | S_IRUSR);
+ MVM_DEBUGFS_ADD_FILE_ALIAS("macs", bcast_filters_macs,
+ bcast_dir, S_IWUSR | S_IRUSR);
+ }
+#endif
+
#ifdef CONFIG_PM_SLEEP
MVM_DEBUGFS_ADD_FILE(d3_sram, mvm->debugfs_dir, S_IRUSR | S_IWUSR);
MVM_DEBUGFS_ADD_FILE(d3_test, mvm->debugfs_dir, S_IRUSR);
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h
index 1b4e54d..20b723d 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-bt-coex.h
@@ -70,37 +70,24 @@
/**
* enum iwl_bt_coex_flags - flags for BT_COEX command
- * @BT_CH_PRIMARY_EN:
- * @BT_CH_SECONDARY_EN:
- * @BT_NOTIF_COEX_OFF:
* @BT_COEX_MODE_POS:
* @BT_COEX_MODE_MSK:
* @BT_COEX_DISABLE:
* @BT_COEX_2W:
* @BT_COEX_3W:
* @BT_COEX_NW:
- * @BT_USE_DEFAULTS:
- * @BT_SYNC_2_BT_DISABLE:
- * @BT_COEX_CORUNNING_TBL_EN:
+ * @BT_COEX_SYNC2SCO:
*
* The COEX_MODE must be set for each command. Even if it is not changed.
*/
enum iwl_bt_coex_flags {
- BT_CH_PRIMARY_EN = BIT(0),
- BT_CH_SECONDARY_EN = BIT(1),
- BT_NOTIF_COEX_OFF = BIT(2),
BT_COEX_MODE_POS = 3,
BT_COEX_MODE_MSK = BITS(3) << BT_COEX_MODE_POS,
BT_COEX_DISABLE = 0x0 << BT_COEX_MODE_POS,
BT_COEX_2W = 0x1 << BT_COEX_MODE_POS,
BT_COEX_3W = 0x2 << BT_COEX_MODE_POS,
BT_COEX_NW = 0x3 << BT_COEX_MODE_POS,
- BT_USE_DEFAULTS = BIT(6),
- BT_SYNC_2_BT_DISABLE = BIT(7),
- BT_COEX_CORUNNING_TBL_EN = BIT(8),
- BT_COEX_MPLUT_TBL_EN = BIT(9),
- /* Bit 10 is reserved */
- BT_COEX_WF_PRIO_BOOST_CHECK_EN = BIT(11),
+ BT_COEX_SYNC2SCO = BIT(7),
};
/*
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
index 8415ff3..5219976 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-d3.h
@@ -231,8 +231,12 @@ enum iwl_wowlan_wakeup_filters {
IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT = BIT(8),
IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS = BIT(9),
IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE = BIT(10),
- /* BIT(11) reserved */
+ IWL_WOWLAN_WAKEUP_REMOTE_TCP_EXTERNAL = BIT(11),
IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET = BIT(12),
+ IWL_WOWLAN_WAKEUP_IOAC_MAGIC_PACKET = BIT(13),
+ IWL_WOWLAN_WAKEUP_HOST_TIMER = BIT(14),
+ IWL_WOWLAN_WAKEUP_RX_FRAME = BIT(15),
+ IWL_WOWLAN_WAKEUP_BCN_FILTERING = BIT(16),
}; /* WOWLAN_WAKEUP_FILTER_API_E_VER_4 */
struct iwl_wowlan_config_cmd {
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
index 884c087..cbbcd8e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
@@ -301,54 +301,65 @@ struct iwl_beacon_filter_cmd {
/* Beacon filtering and beacon abort */
#define IWL_BF_ENERGY_DELTA_DEFAULT 5
+#define IWL_BF_ENERGY_DELTA_D0I3 20
#define IWL_BF_ENERGY_DELTA_MAX 255
#define IWL_BF_ENERGY_DELTA_MIN 0
#define IWL_BF_ROAMING_ENERGY_DELTA_DEFAULT 1
+#define IWL_BF_ROAMING_ENERGY_DELTA_D0I3 20
#define IWL_BF_ROAMING_ENERGY_DELTA_MAX 255
#define IWL_BF_ROAMING_ENERGY_DELTA_MIN 0
#define IWL_BF_ROAMING_STATE_DEFAULT 72
+#define IWL_BF_ROAMING_STATE_D0I3 72
#define IWL_BF_ROAMING_STATE_MAX 255
#define IWL_BF_ROAMING_STATE_MIN 0
#define IWL_BF_TEMP_THRESHOLD_DEFAULT 112
+#define IWL_BF_TEMP_THRESHOLD_D0I3 112
#define IWL_BF_TEMP_THRESHOLD_MAX 255
#define IWL_BF_TEMP_THRESHOLD_MIN 0
#define IWL_BF_TEMP_FAST_FILTER_DEFAULT 1
+#define IWL_BF_TEMP_FAST_FILTER_D0I3 1
#define IWL_BF_TEMP_FAST_FILTER_MAX 255
#define IWL_BF_TEMP_FAST_FILTER_MIN 0
#define IWL_BF_TEMP_SLOW_FILTER_DEFAULT 5
+#define IWL_BF_TEMP_SLOW_FILTER_D0I3 5
#define IWL_BF_TEMP_SLOW_FILTER_MAX 255
#define IWL_BF_TEMP_SLOW_FILTER_MIN 0
#define IWL_BF_ENABLE_BEACON_FILTER_DEFAULT 1
#define IWL_BF_DEBUG_FLAG_DEFAULT 0
+#define IWL_BF_DEBUG_FLAG_D0I3 0
#define IWL_BF_ESCAPE_TIMER_DEFAULT 50
+#define IWL_BF_ESCAPE_TIMER_D0I3 1024
#define IWL_BF_ESCAPE_TIMER_MAX 1024
#define IWL_BF_ESCAPE_TIMER_MIN 0
#define IWL_BA_ESCAPE_TIMER_DEFAULT 6
+#define IWL_BA_ESCAPE_TIMER_D0I3 6
#define IWL_BA_ESCAPE_TIMER_D3 9
#define IWL_BA_ESCAPE_TIMER_MAX 1024
#define IWL_BA_ESCAPE_TIMER_MIN 0
#define IWL_BA_ENABLE_BEACON_ABORT_DEFAULT 1
-#define IWL_BF_CMD_CONFIG_DEFAULTS \
- .bf_energy_delta = cpu_to_le32(IWL_BF_ENERGY_DELTA_DEFAULT), \
- .bf_roaming_energy_delta = \
- cpu_to_le32(IWL_BF_ROAMING_ENERGY_DELTA_DEFAULT), \
- .bf_roaming_state = cpu_to_le32(IWL_BF_ROAMING_STATE_DEFAULT), \
- .bf_temp_threshold = cpu_to_le32(IWL_BF_TEMP_THRESHOLD_DEFAULT), \
- .bf_temp_fast_filter = cpu_to_le32(IWL_BF_TEMP_FAST_FILTER_DEFAULT), \
- .bf_temp_slow_filter = cpu_to_le32(IWL_BF_TEMP_SLOW_FILTER_DEFAULT), \
- .bf_debug_flag = cpu_to_le32(IWL_BF_DEBUG_FLAG_DEFAULT), \
- .bf_escape_timer = cpu_to_le32(IWL_BF_ESCAPE_TIMER_DEFAULT), \
- .ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER_DEFAULT)
+#define IWL_BF_CMD_CONFIG(mode) \
+ .bf_energy_delta = cpu_to_le32(IWL_BF_ENERGY_DELTA ## mode), \
+ .bf_roaming_energy_delta = \
+ cpu_to_le32(IWL_BF_ROAMING_ENERGY_DELTA ## mode), \
+ .bf_roaming_state = cpu_to_le32(IWL_BF_ROAMING_STATE ## mode), \
+ .bf_temp_threshold = cpu_to_le32(IWL_BF_TEMP_THRESHOLD ## mode), \
+ .bf_temp_fast_filter = cpu_to_le32(IWL_BF_TEMP_FAST_FILTER ## mode), \
+ .bf_temp_slow_filter = cpu_to_le32(IWL_BF_TEMP_SLOW_FILTER ## mode), \
+ .bf_debug_flag = cpu_to_le32(IWL_BF_DEBUG_FLAG ## mode), \
+ .bf_escape_timer = cpu_to_le32(IWL_BF_ESCAPE_TIMER ## mode), \
+ .ba_escape_timer = cpu_to_le32(IWL_BA_ESCAPE_TIMER ## mode)
+#define IWL_BF_CMD_CONFIG_DEFAULTS IWL_BF_CMD_CONFIG(_DEFAULT)
+#define IWL_BF_CMD_CONFIG_D0I3 IWL_BF_CMD_CONFIG(_D0I3)
#endif
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
index 1b60fdf..d636478 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-sta.h
@@ -199,11 +199,14 @@ enum iwl_sta_modify_flag {
* @STA_SLEEP_STATE_AWAKE:
* @STA_SLEEP_STATE_PS_POLL:
* @STA_SLEEP_STATE_UAPSD:
+ * @STA_SLEEP_STATE_MOREDATA: set more-data bit on
+ * (last) released frame
*/
enum iwl_sta_sleep_flag {
- STA_SLEEP_STATE_AWAKE = 0,
- STA_SLEEP_STATE_PS_POLL = BIT(0),
- STA_SLEEP_STATE_UAPSD = BIT(1),
+ STA_SLEEP_STATE_AWAKE = 0,
+ STA_SLEEP_STATE_PS_POLL = BIT(0),
+ STA_SLEEP_STATE_UAPSD = BIT(1),
+ STA_SLEEP_STATE_MOREDATA = BIT(2),
};
/* STA ID and color bits definitions */
@@ -318,13 +321,15 @@ struct iwl_mvm_add_sta_cmd_v5 {
} __packed; /* ADD_STA_CMD_API_S_VER_5 */
/**
- * struct iwl_mvm_add_sta_cmd_v6 - Add / modify a station
- * VER_6 of this command is quite similar to VER_5 except
+ * struct iwl_mvm_add_sta_cmd_v7 - Add / modify a station
+ * VER_7 of this command is quite similar to VER_5 except
* exclusion of all fields related to the security key installation.
+ * It only differs from VER_6 by the "awake_acs" field that is
+ * reserved and ignored in VER_6.
*/
-struct iwl_mvm_add_sta_cmd_v6 {
+struct iwl_mvm_add_sta_cmd_v7 {
u8 add_modify;
- u8 reserved1;
+ u8 awake_acs;
__le16 tid_disable_tx;
__le32 mac_id_n_color;
u8 addr[ETH_ALEN]; /* _STA_ID_MODIFY_INFO_API_S_VER_1 */
@@ -342,7 +347,7 @@ struct iwl_mvm_add_sta_cmd_v6 {
__le16 assoc_id;
__le16 beamform_flags;
__le32 tfd_queue_msk;
-} __packed; /* ADD_STA_CMD_API_S_VER_6 */
+} __packed; /* ADD_STA_CMD_API_S_VER_7 */
/**
* struct iwl_mvm_add_sta_key_cmd - add/modify sta key
@@ -432,5 +437,15 @@ struct iwl_mvm_wep_key_cmd {
struct iwl_mvm_wep_key wep_key[0];
} __packed; /* SEC_CURR_WEP_KEY_CMD_API_S_VER_2 */
+/**
+ * struct iwl_mvm_eosp_notification - EOSP notification from firmware
+ * @remain_frame_count: # of frames remaining, non-zero if SP was cut
+ * short by GO absence
+ * @sta_id: station ID
+ */
+struct iwl_mvm_eosp_notification {
+ __le32 remain_frame_count;
+ __le32 sta_id;
+} __packed; /* UAPSD_EOSP_NTFY_API_S_VER_1 */
#endif /* __fw_api_sta_h__ */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
index 989d7db..a7c88f1 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
@@ -163,6 +163,7 @@ enum {
TX_ANT_CONFIGURATION_CMD = 0x98,
BT_CONFIG = 0x9b,
STATISTICS_NOTIFICATION = 0x9d,
+ EOSP_NOTIFICATION = 0x9e,
REDUCE_TX_POWER_CMD = 0x9f,
/* RF-KILL commands and notifications */
@@ -190,6 +191,7 @@ enum {
REPLY_DEBUG_CMD = 0xf0,
DEBUG_LOG_MSG = 0xf7,
+ BCAST_FILTER_CMD = 0xcf,
MCAST_FILTER_CMD = 0xd0,
/* D3 commands/notifications */
@@ -197,6 +199,7 @@ enum {
PROT_OFFLOAD_CONFIG_CMD = 0xd4,
OFFLOADS_QUERY_CMD = 0xd5,
REMOTE_WAKE_CONFIG_CMD = 0xd6,
+ D0I3_END_CMD = 0xed,
/* for WoWLAN in particular */
WOWLAN_PATTERNS = 0xe0,
@@ -303,6 +306,7 @@ struct iwl_phy_cfg_cmd {
#define PHY_CFG_RX_CHAIN_B BIT(13)
#define PHY_CFG_RX_CHAIN_C BIT(14)
+#define NVM_MAX_NUM_SECTIONS 11
/* Target of the NVM_ACCESS_CMD */
enum {
@@ -313,14 +317,9 @@ enum {
/* Section types for NVM_ACCESS_CMD */
enum {
- NVM_SECTION_TYPE_HW = 0,
- NVM_SECTION_TYPE_SW,
- NVM_SECTION_TYPE_PAPD,
- NVM_SECTION_TYPE_BT,
- NVM_SECTION_TYPE_CALIBRATION,
- NVM_SECTION_TYPE_PRODUCTION,
- NVM_SECTION_TYPE_POST_FCS_CALIB,
- NVM_NUM_OF_SECTIONS,
+ NVM_SECTION_TYPE_SW = 1,
+ NVM_SECTION_TYPE_CALIBRATION = 4,
+ NVM_SECTION_TYPE_PRODUCTION = 5,
};
/**
@@ -412,6 +411,35 @@ struct mvm_alive_resp {
__le32 scd_base_ptr; /* SRAM address for SCD */
} __packed; /* ALIVE_RES_API_S_VER_1 */
+struct mvm_alive_resp_ver2 {
+ __le16 status;
+ __le16 flags;
+ u8 ucode_minor;
+ u8 ucode_major;
+ __le16 id;
+ u8 api_minor;
+ u8 api_major;
+ u8 ver_subtype;
+ u8 ver_type;
+ u8 mac;
+ u8 opt;
+ __le16 reserved2;
+ __le32 timestamp;
+ __le32 error_event_table_ptr; /* SRAM address for error log */
+ __le32 log_event_table_ptr; /* SRAM address for LMAC event log */
+ __le32 cpu_register_ptr;
+ __le32 dbgm_config_ptr;
+ __le32 alive_counter_ptr;
+ __le32 scd_base_ptr; /* SRAM address for SCD */
+ __le32 st_fwrd_addr; /* pointer to Store and forward */
+ __le32 st_fwrd_size;
+ u8 umac_minor; /* UMAC version: minor */
+ u8 umac_major; /* UMAC version: major */
+ __le16 umac_id; /* UMAC version: id */
+ __le32 error_info_addr; /* SRAM address for UMAC error log */
+ __le32 dbg_print_buff_addr;
+} __packed; /* ALIVE_RES_API_S_VER_2 */
+
/* Error response/notification */
enum {
FW_ERR_UNKNOWN_CMD = 0x0,
@@ -1159,6 +1187,90 @@ struct iwl_mcast_filter_cmd {
u8 addr_list[0];
} __packed; /* MCAST_FILTERING_CMD_API_S_VER_1 */
+#define MAX_BCAST_FILTERS 8
+#define MAX_BCAST_FILTER_ATTRS 2
+
+/**
+ * enum iwl_mvm_bcast_filter_attr_offset - written by fw for each Rx packet
+ * @BCAST_FILTER_OFFSET_PAYLOAD_START: offset is from payload start.
+ * @BCAST_FILTER_OFFSET_IP_END: offset is from ip header end (i.e.
+ * start of ip payload).
+ */
+enum iwl_mvm_bcast_filter_attr_offset {
+ BCAST_FILTER_OFFSET_PAYLOAD_START = 0,
+ BCAST_FILTER_OFFSET_IP_END = 1,
+};
+
+/**
+ * struct iwl_fw_bcast_filter_attr - broadcast filter attribute
+ * @offset_type: &enum iwl_mvm_bcast_filter_attr_offset.
+ * @offset: starting offset of this pattern.
+ * @val: value to match - big endian (MSB is the first
+ * byte to match from offset pos).
+ * @mask: mask to match (big endian).
+ */
+struct iwl_fw_bcast_filter_attr {
+ u8 offset_type;
+ u8 offset;
+ __le16 reserved1;
+ __be32 val;
+ __be32 mask;
+} __packed; /* BCAST_FILTER_ATT_S_VER_1 */
+
+/**
+ * enum iwl_mvm_bcast_filter_frame_type - filter frame type
+ * @BCAST_FILTER_FRAME_TYPE_ALL: consider all frames.
+ * @BCAST_FILTER_FRAME_TYPE_IPV4: consider only ipv4 frames
+ */
+enum iwl_mvm_bcast_filter_frame_type {
+ BCAST_FILTER_FRAME_TYPE_ALL = 0,
+ BCAST_FILTER_FRAME_TYPE_IPV4 = 1,
+};
+
+/**
+ * struct iwl_fw_bcast_filter - broadcast filter
+ * @discard: discard frame (1) or let it pass (0).
+ * @frame_type: &enum iwl_mvm_bcast_filter_frame_type.
+ * @num_attrs: number of valid attributes in this filter.
+ * @attrs: attributes of this filter. a filter is considered matched
+ * only when all its attributes are matched (i.e. AND relationship)
+ */
+struct iwl_fw_bcast_filter {
+ u8 discard;
+ u8 frame_type;
+ u8 num_attrs;
+ u8 reserved1;
+ struct iwl_fw_bcast_filter_attr attrs[MAX_BCAST_FILTER_ATTRS];
+} __packed; /* BCAST_FILTER_S_VER_1 */
+
+/**
+ * struct iwl_fw_bcast_mac - per-mac broadcast filtering configuration.
+ * @default_discard: default action for this mac (discard (1) / pass (0)).
+ * @attached_filters: bitmap of relevant filters for this mac.
+ */
+struct iwl_fw_bcast_mac {
+ u8 default_discard;
+ u8 reserved1;
+ __le16 attached_filters;
+} __packed; /* BCAST_MAC_CONTEXT_S_VER_1 */
+
+/**
+ * struct iwl_bcast_filter_cmd - broadcast filtering configuration
+ * @disable: enable (0) / disable (1)
+ * @max_bcast_filters: max number of filters (MAX_BCAST_FILTERS)
+ * @max_macs: max number of macs (NUM_MAC_INDEX_DRIVER)
+ * @filters: broadcast filters
+ * @macs: broadcast filtering configuration per-mac
+ */
+struct iwl_bcast_filter_cmd {
+ u8 disable;
+ u8 max_bcast_filters;
+ u8 max_macs;
+ u8 reserved1;
+ struct iwl_fw_bcast_filter filters[MAX_BCAST_FILTERS];
+ struct iwl_fw_bcast_mac macs[NUM_MAC_INDEX_DRIVER];
+} __packed; /* BCAST_FILTERING_HCMD_API_S_VER_1 */
+
struct mvm_statistics_dbg {
__le32 burst_check;
__le32 burst_count;
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
index c03d395..bae75b3 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
@@ -110,18 +110,46 @@ static bool iwl_alive_fn(struct iwl_notif_wait_data *notif_wait,
container_of(notif_wait, struct iwl_mvm, notif_wait);
struct iwl_mvm_alive_data *alive_data = data;
struct mvm_alive_resp *palive;
-
- palive = (void *)pkt->data;
-
- mvm->error_event_table = le32_to_cpu(palive->error_event_table_ptr);
- mvm->log_event_table = le32_to_cpu(palive->log_event_table_ptr);
- alive_data->scd_base_addr = le32_to_cpu(palive->scd_base_ptr);
-
- alive_data->valid = le16_to_cpu(palive->status) == IWL_ALIVE_STATUS_OK;
- IWL_DEBUG_FW(mvm,
- "Alive ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
- le16_to_cpu(palive->status), palive->ver_type,
- palive->ver_subtype, palive->flags);
+ struct mvm_alive_resp_ver2 *palive2;
+
+ if (iwl_rx_packet_payload_len(pkt) == sizeof(*palive)) {
+ palive = (void *)pkt->data;
+
+ mvm->support_umac_log = false;
+ mvm->error_event_table =
+ le32_to_cpu(palive->error_event_table_ptr);
+ mvm->log_event_table = le32_to_cpu(palive->log_event_table_ptr);
+ alive_data->scd_base_addr = le32_to_cpu(palive->scd_base_ptr);
+
+ alive_data->valid = le16_to_cpu(palive->status) ==
+ IWL_ALIVE_STATUS_OK;
+ IWL_DEBUG_FW(mvm,
+ "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
+ le16_to_cpu(palive->status), palive->ver_type,
+ palive->ver_subtype, palive->flags);
+ } else {
+ palive2 = (void *)pkt->data;
+
+ mvm->support_umac_log = true;
+ mvm->error_event_table =
+ le32_to_cpu(palive2->error_event_table_ptr);
+ mvm->log_event_table =
+ le32_to_cpu(palive2->log_event_table_ptr);
+ alive_data->scd_base_addr = le32_to_cpu(palive2->scd_base_ptr);
+ mvm->umac_error_event_table =
+ le32_to_cpu(palive2->error_info_addr);
+
+ alive_data->valid = le16_to_cpu(palive2->status) ==
+ IWL_ALIVE_STATUS_OK;
+ IWL_DEBUG_FW(mvm,
+ "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
+ le16_to_cpu(palive2->status), palive2->ver_type,
+ palive2->ver_subtype, palive2->flags);
+
+ IWL_DEBUG_FW(mvm,
+ "UMAC version: Major - 0x%x, Minor - 0x%x\n",
+ palive2->umac_major, palive2->umac_minor);
+ }
return true;
}
@@ -439,10 +467,23 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
goto error;
}
- ret = iwl_mvm_power_update_device_mode(mvm);
+ /* Initialize tx backoffs to the minimal possible */
+ iwl_mvm_tt_tx_backoff(mvm, 0);
+
+ if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
+ ret = iwl_power_legacy_set_cam_mode(mvm);
+ if (ret)
+ goto error;
+ }
+
+ ret = iwl_mvm_power_update_device(mvm);
if (ret)
goto error;
+ /* allow FW/transport low power modes if not during restart */
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
+
IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
return 0;
error:
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
index ba723d5..5c21aab 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
@@ -90,6 +90,7 @@ static void iwl_mvm_mac_tsf_id_iter(void *_data, u8 *mac,
{
struct iwl_mvm_mac_iface_iterator_data *data = _data;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ u16 min_bi;
/* Skip the interface for which we are trying to assign a tsf_id */
if (vif == data->vif)
@@ -114,42 +115,57 @@ static void iwl_mvm_mac_tsf_id_iter(void *_data, u8 *mac,
switch (data->vif->type) {
case NL80211_IFTYPE_STATION:
/*
- * The new interface is client, so if the existing one
- * we're iterating is an AP, and both interfaces have the
- * same beacon interval, the same TSF should be used to
- * avoid drift between the new client and existing AP,
- * the existing AP will get drift updates from the new
- * client context in this case
+ * The new interface is a client, so if the one we're iterating
+ * is an AP, and the beacon interval of the AP is a multiple or
+ * divisor of the beacon interval of the client, the same TSF
+ * should be used to avoid drift between the new client and
+ * existing AP. The existing AP will get drift updates from the
+ * new client context in this case.
*/
- if (vif->type == NL80211_IFTYPE_AP) {
- if (data->preferred_tsf == NUM_TSF_IDS &&
- test_bit(mvmvif->tsf_id, data->available_tsf_ids) &&
- (vif->bss_conf.beacon_int ==
- data->vif->bss_conf.beacon_int)) {
- data->preferred_tsf = mvmvif->tsf_id;
- return;
- }
+ if (vif->type != NL80211_IFTYPE_AP ||
+ data->preferred_tsf != NUM_TSF_IDS ||
+ !test_bit(mvmvif->tsf_id, data->available_tsf_ids))
+ break;
+
+ min_bi = min(data->vif->bss_conf.beacon_int,
+ vif->bss_conf.beacon_int);
+
+ if (!min_bi)
+ break;
+
+ if ((data->vif->bss_conf.beacon_int -
+ vif->bss_conf.beacon_int) % min_bi == 0) {
+ data->preferred_tsf = mvmvif->tsf_id;
+ return;
}
break;
+
case NL80211_IFTYPE_AP:
/*
- * The new interface is AP/GO, so in case both interfaces
- * have the same beacon interval, it should get drift
- * updates from an existing client or use the same
- * TSF as an existing GO. There's no drift between
- * TSFs internally but if they used different TSFs
- * then a new client MAC could update one of them
- * and cause drift that way.
+ * The new interface is AP/GO, so if its beacon interval is a
+ * multiple or a divisor of the beacon interval of an existing
+ * interface, it should get drift updates from an existing
+ * client or use the same TSF as an existing GO. There's no
+ * drift between TSFs internally but if they used different
+ * TSFs then a new client MAC could update one of them and
+ * cause drift that way.
*/
- if (vif->type == NL80211_IFTYPE_STATION ||
- vif->type == NL80211_IFTYPE_AP) {
- if (data->preferred_tsf == NUM_TSF_IDS &&
- test_bit(mvmvif->tsf_id, data->available_tsf_ids) &&
- (vif->bss_conf.beacon_int ==
- data->vif->bss_conf.beacon_int)) {
- data->preferred_tsf = mvmvif->tsf_id;
- return;
- }
+ if ((vif->type != NL80211_IFTYPE_AP &&
+ vif->type != NL80211_IFTYPE_STATION) ||
+ data->preferred_tsf != NUM_TSF_IDS ||
+ !test_bit(mvmvif->tsf_id, data->available_tsf_ids))
+ break;
+
+ min_bi = min(data->vif->bss_conf.beacon_int,
+ vif->bss_conf.beacon_int);
+
+ if (!min_bi)
+ break;
+
+ if ((data->vif->bss_conf.beacon_int -
+ vif->bss_conf.beacon_int) % min_bi == 0) {
+ data->preferred_tsf = mvmvif->tsf_id;
+ return;
}
break;
default:
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index c35b866..a49d1ef 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -66,6 +66,7 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/ip.h>
+#include <linux/if_arp.h>
#include <net/mac80211.h>
#include <net/tcp.h>
@@ -128,6 +129,117 @@ static const struct wiphy_wowlan_tcp_support iwl_mvm_wowlan_tcp_support = {
};
#endif
+#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
+/*
+ * Use the reserved field to indicate magic values.
+ * these values will only be used internally by the driver,
+ * and won't make it to the fw (reserved will be 0).
+ * BC_FILTER_MAGIC_IP - configure the val of this attribute to
+ * be the vif's ip address. in case there is not a single
+ * ip address (0, or more than 1), this attribute will
+ * be skipped.
+ * BC_FILTER_MAGIC_MAC - set the val of this attribute to
+ * the LSB bytes of the vif's mac address
+ */
+enum {
+ BC_FILTER_MAGIC_NONE = 0,
+ BC_FILTER_MAGIC_IP,
+ BC_FILTER_MAGIC_MAC,
+};
+
+static const struct iwl_fw_bcast_filter iwl_mvm_default_bcast_filters[] = {
+ {
+ /* arp */
+ .discard = 0,
+ .frame_type = BCAST_FILTER_FRAME_TYPE_ALL,
+ .attrs = {
+ {
+ /* frame type - arp, hw type - ethernet */
+ .offset_type =
+ BCAST_FILTER_OFFSET_PAYLOAD_START,
+ .offset = sizeof(rfc1042_header),
+ .val = cpu_to_be32(0x08060001),
+ .mask = cpu_to_be32(0xffffffff),
+ },
+ {
+ /* arp dest ip */
+ .offset_type =
+ BCAST_FILTER_OFFSET_PAYLOAD_START,
+ .offset = sizeof(rfc1042_header) + 2 +
+ sizeof(struct arphdr) +
+ ETH_ALEN + sizeof(__be32) +
+ ETH_ALEN,
+ .mask = cpu_to_be32(0xffffffff),
+ /* mark it as special field */
+ .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_IP),
+ },
+ },
+ },
+ {
+ /* dhcp offer bcast */
+ .discard = 0,
+ .frame_type = BCAST_FILTER_FRAME_TYPE_IPV4,
+ .attrs = {
+ {
+ /* udp dest port - 68 (bootp client)*/
+ .offset_type = BCAST_FILTER_OFFSET_IP_END,
+ .offset = offsetof(struct udphdr, dest),
+ .val = cpu_to_be32(0x00440000),
+ .mask = cpu_to_be32(0xffff0000),
+ },
+ {
+ /* dhcp - lsb bytes of client hw address */
+ .offset_type = BCAST_FILTER_OFFSET_IP_END,
+ .offset = 38,
+ .mask = cpu_to_be32(0xffffffff),
+ /* mark it as special field */
+ .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_MAC),
+ },
+ },
+ },
+ /* last filter must be empty */
+ {},
+};
+#endif
+
+void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
+{
+ if (!mvm->trans->cfg->d0i3)
+ return;
+
+ IWL_DEBUG_RPM(mvm, "Take mvm reference - type %d\n", ref_type);
+ WARN_ON(test_and_set_bit(ref_type, mvm->ref_bitmap));
+ iwl_trans_ref(mvm->trans);
+}
+
+void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
+{
+ if (!mvm->trans->cfg->d0i3)
+ return;
+
+ IWL_DEBUG_RPM(mvm, "Leave mvm reference - type %d\n", ref_type);
+ WARN_ON(!test_and_clear_bit(ref_type, mvm->ref_bitmap));
+ iwl_trans_unref(mvm->trans);
+}
+
+static void
+iwl_mvm_unref_all_except(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref)
+{
+ int i;
+
+ if (!mvm->trans->cfg->d0i3)
+ return;
+
+ for_each_set_bit(i, mvm->ref_bitmap, IWL_MVM_REF_COUNT) {
+ if (ref == i)
+ continue;
+
+ IWL_DEBUG_RPM(mvm, "Cleanup: remove mvm ref type %d\n", i);
+ clear_bit(i, mvm->ref_bitmap);
+ iwl_trans_unref(mvm->trans);
+ }
+}
+
static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm)
{
int i;
@@ -203,6 +315,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG |
REGULATORY_DISABLE_BEACON_HINTS;
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_GO_UAPSD)
+ hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
+
hw->wiphy->iface_combinations = iwl_mvm_iface_combinations;
hw->wiphy->n_iface_combinations =
ARRAY_SIZE(iwl_mvm_iface_combinations);
@@ -289,6 +404,11 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
}
#endif
+#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
+ /* assign default bcast filtering configuration */
+ mvm->bcast_filters = iwl_mvm_default_bcast_filters;
+#endif
+
ret = iwl_mvm_leds_init(mvm);
if (ret)
return ret;
@@ -305,6 +425,9 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
struct sk_buff *skb)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct ieee80211_sta *sta = control->sta;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
+ struct ieee80211_hdr *hdr = (void *)skb->data;
if (iwl_mvm_is_radio_killed(mvm)) {
IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n");
@@ -315,8 +438,16 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
!test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status))
goto drop;
- if (control->sta) {
- if (iwl_mvm_tx_skb(mvm, skb, control->sta))
+ /* treat non-bufferable MMPDUs as broadcast if sta is sleeping */
+ if (unlikely(info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER &&
+ ieee80211_is_mgmt(hdr->frame_control) &&
+ !ieee80211_is_deauth(hdr->frame_control) &&
+ !ieee80211_is_disassoc(hdr->frame_control) &&
+ !ieee80211_is_action(hdr->frame_control)))
+ sta = NULL;
+
+ if (sta) {
+ if (iwl_mvm_tx_skb(mvm, skb, sta))
goto drop;
return;
}
@@ -434,6 +565,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
iwl_mvm_cleanup_iterator, mvm);
mvm->p2p_device_vif = NULL;
+ mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
iwl_mvm_reset_phy_ctxts(mvm);
memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table));
@@ -441,6 +573,10 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
ieee80211_wake_queues(mvm->hw);
+ /* cleanup all stale references (scan, roc), but keep the
+ * ucode_down ref until reconfig is complete */
+ iwl_mvm_unref_all_except(mvm, IWL_MVM_REF_UCODE_DOWN);
+
mvm->vif_count = 0;
mvm->rx_ba_sessions = 0;
}
@@ -475,6 +611,9 @@ static void iwl_mvm_mac_restart_complete(struct ieee80211_hw *hw)
IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n",
ret);
+ /* allow transport/FW low power modes */
+ iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN);
+
mutex_unlock(&mvm->mutex);
}
@@ -482,9 +621,14 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ flush_work(&mvm->d0i3_exit_work);
flush_work(&mvm->async_handlers_wk);
mutex_lock(&mvm->mutex);
+
+ /* disallow low power states when the FW is down */
+ iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
+
/* async_handlers_wk is now blocked */
/*
@@ -510,14 +654,6 @@ static void iwl_mvm_mac_stop(struct ieee80211_hw *hw)
cancel_work_sync(&mvm->async_handlers_wk);
}
-static void iwl_mvm_power_update_iterator(void *data, u8 *mac,
- struct ieee80211_vif *vif)
-{
- struct iwl_mvm *mvm = data;
-
- iwl_mvm_power_update_mode(mvm, vif);
-}
-
static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm)
{
u16 i;
@@ -585,7 +721,8 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
vif->type == NL80211_IFTYPE_ADHOC) {
u32 qmask = iwl_mvm_mac_get_queues_mask(mvm, vif);
ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta,
- qmask);
+ qmask,
+ ieee80211_vif_type_p2p(vif));
if (ret) {
IWL_ERR(mvm, "Failed to allocate bcast sta\n");
goto out_release;
@@ -599,10 +736,12 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
if (ret)
goto out_release;
- iwl_mvm_power_disable(mvm, vif);
+ ret = iwl_mvm_power_update_mac(mvm, vif);
+ if (ret)
+ goto out_release;
/* beacon filtering */
- ret = iwl_mvm_disable_beacon_filter(mvm, vif);
+ ret = iwl_mvm_disable_beacon_filter(mvm, vif, CMD_SYNC);
if (ret)
goto out_remove_mac;
@@ -661,11 +800,6 @@ static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
if (vif->type != NL80211_IFTYPE_P2P_DEVICE)
mvm->vif_count--;
- /* TODO: remove this when legacy PM will be discarded */
- ieee80211_iterate_active_interfaces(
- mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_power_update_iterator, mvm);
-
iwl_mvm_mac_ctxt_release(mvm, vif);
out_unlock:
mutex_unlock(&mvm->mutex);
@@ -754,11 +888,7 @@ static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw,
if (mvm->vif_count && vif->type != NL80211_IFTYPE_P2P_DEVICE)
mvm->vif_count--;
- /* TODO: remove this when legacy PM will be discarded */
- ieee80211_iterate_active_interfaces(
- mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_power_update_iterator, mvm);
-
+ iwl_mvm_power_update_mac(mvm, vif);
iwl_mvm_mac_ctxt_remove(mvm, vif);
out_release:
@@ -876,6 +1006,156 @@ out:
*total_flags = 0;
}
+#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
+struct iwl_bcast_iter_data {
+ struct iwl_mvm *mvm;
+ struct iwl_bcast_filter_cmd *cmd;
+ u8 current_filter;
+};
+
+static void
+iwl_mvm_set_bcast_filter(struct ieee80211_vif *vif,
+ const struct iwl_fw_bcast_filter *in_filter,
+ struct iwl_fw_bcast_filter *out_filter)
+{
+ struct iwl_fw_bcast_filter_attr *attr;
+ int i;
+
+ memcpy(out_filter, in_filter, sizeof(*out_filter));
+
+ for (i = 0; i < ARRAY_SIZE(out_filter->attrs); i++) {
+ attr = &out_filter->attrs[i];
+
+ if (!attr->mask)
+ break;
+
+ switch (attr->reserved1) {
+ case cpu_to_le16(BC_FILTER_MAGIC_IP):
+ if (vif->bss_conf.arp_addr_cnt != 1) {
+ attr->mask = 0;
+ continue;
+ }
+
+ attr->val = vif->bss_conf.arp_addr_list[0];
+ break;
+ case cpu_to_le16(BC_FILTER_MAGIC_MAC):
+ attr->val = *(__be32 *)&vif->addr[2];
+ break;
+ default:
+ break;
+ }
+ attr->reserved1 = 0;
+ out_filter->num_attrs++;
+ }
+}
+
+static void iwl_mvm_bcast_filter_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_bcast_iter_data *data = _data;
+ struct iwl_mvm *mvm = data->mvm;
+ struct iwl_bcast_filter_cmd *cmd = data->cmd;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_fw_bcast_mac *bcast_mac;
+ int i;
+
+ if (WARN_ON(mvmvif->id >= ARRAY_SIZE(cmd->macs)))
+ return;
+
+ bcast_mac = &cmd->macs[mvmvif->id];
+
+ /* enable filtering only for associated stations */
+ if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc)
+ return;
+
+ bcast_mac->default_discard = 1;
+
+ /* copy all configured filters */
+ for (i = 0; mvm->bcast_filters[i].attrs[0].mask; i++) {
+ /*
+ * Make sure we don't exceed our filters limit.
+ * if there is still a valid filter to be configured,
+ * be on the safe side and just allow bcast for this mac.
+ */
+ if (WARN_ON_ONCE(data->current_filter >=
+ ARRAY_SIZE(cmd->filters))) {
+ bcast_mac->default_discard = 0;
+ bcast_mac->attached_filters = 0;
+ break;
+ }
+
+ iwl_mvm_set_bcast_filter(vif,
+ &mvm->bcast_filters[i],
+ &cmd->filters[data->current_filter]);
+
+ /* skip current filter if it contains no attributes */
+ if (!cmd->filters[data->current_filter].num_attrs)
+ continue;
+
+ /* attach the filter to current mac */
+ bcast_mac->attached_filters |=
+ cpu_to_le16(BIT(data->current_filter));
+
+ data->current_filter++;
+ }
+}
+
+bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
+ struct iwl_bcast_filter_cmd *cmd)
+{
+ struct iwl_bcast_iter_data iter_data = {
+ .mvm = mvm,
+ .cmd = cmd,
+ };
+
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->max_bcast_filters = ARRAY_SIZE(cmd->filters);
+ cmd->max_macs = ARRAY_SIZE(cmd->macs);
+
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ /* use debugfs filters/macs if override is configured */
+ if (mvm->dbgfs_bcast_filtering.override) {
+ memcpy(cmd->filters, &mvm->dbgfs_bcast_filtering.cmd.filters,
+ sizeof(cmd->filters));
+ memcpy(cmd->macs, &mvm->dbgfs_bcast_filtering.cmd.macs,
+ sizeof(cmd->macs));
+ return true;
+ }
+#endif
+
+ /* if no filters are configured, do nothing */
+ if (!mvm->bcast_filters)
+ return false;
+
+ /* configure and attach these filters for each associated sta vif */
+ ieee80211_iterate_active_interfaces(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_bcast_filter_iterator, &iter_data);
+
+ return true;
+}
+static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_bcast_filter_cmd cmd;
+
+ if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING))
+ return 0;
+
+ if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
+ return 0;
+
+ return iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, CMD_SYNC,
+ sizeof(cmd), &cmd);
+}
+#else
+static inline int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif)
+{
+ return 0;
+}
+#endif
+
static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
@@ -928,6 +1208,8 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
iwl_mvm_sf_update(mvm, vif, false);
iwl_mvm_power_vif_assoc(mvm, vif);
+ if (vif->p2p)
+ iwl_mvm_ref(mvm, IWL_MVM_REF_P2P_CLIENT);
} else if (mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) {
/*
* If update fails - SF might be running in associated
@@ -940,27 +1222,25 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
ret = iwl_mvm_rm_sta_id(mvm, vif, mvmvif->ap_sta_id);
if (ret)
IWL_ERR(mvm, "failed to remove AP station\n");
+
+ if (mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id)
+ mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
/* remove quota for this interface */
ret = iwl_mvm_update_quotas(mvm, NULL);
if (ret)
IWL_ERR(mvm, "failed to update quotas\n");
+
+ if (vif->p2p)
+ iwl_mvm_unref(mvm, IWL_MVM_REF_P2P_CLIENT);
}
iwl_mvm_recalc_multicast(mvm);
+ iwl_mvm_configure_bcast_filter(mvm, vif);
/* reset rssi values */
mvmvif->bf_data.ave_beacon_signal = 0;
- if (!(mvm->fw->ucode_capa.flags &
- IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)) {
- /* Workaround for FW bug, otherwise FW disables device
- * power save upon disassociation
- */
- ret = iwl_mvm_power_update_mode(mvm, vif);
- if (ret)
- IWL_ERR(mvm, "failed to update power mode\n");
- }
iwl_mvm_bt_coex_vif_change(mvm);
iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT,
IEEE80211_SMPS_AUTOMATIC);
@@ -973,7 +1253,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
&mvmvif->time_event_data);
} else if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS |
BSS_CHANGED_QOS)) {
- ret = iwl_mvm_power_update_mode(mvm, vif);
+ ret = iwl_mvm_power_update_mac(mvm, vif);
if (ret)
IWL_ERR(mvm, "failed to update power mode\n");
}
@@ -987,10 +1267,15 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
IWL_DEBUG_MAC80211(mvm, "cqm info_changed");
/* reset cqm events tracking */
mvmvif->bf_data.last_cqm_event = 0;
- ret = iwl_mvm_update_beacon_filter(mvm, vif);
+ ret = iwl_mvm_update_beacon_filter(mvm, vif, false, CMD_SYNC);
if (ret)
IWL_ERR(mvm, "failed to update CQM thresholds\n");
}
+
+ if (changes & BSS_CHANGED_ARP_FILTER) {
+ IWL_DEBUG_MAC80211(mvm, "arp filter changed");
+ iwl_mvm_configure_bcast_filter(mvm, vif);
+ }
}
static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
@@ -1024,8 +1309,6 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
if (ret)
goto out_remove;
- mvmvif->ap_ibss_active = true;
-
/* Send the bcast station. At this stage the TBTT and DTIM time events
* are added and applied to the scheduler */
ret = iwl_mvm_send_bcast_sta(mvm, vif, &mvmvif->bcast_sta);
@@ -1037,7 +1320,7 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
/* power updated needs to be done before quotas */
mvm->bound_vif_cnt++;
- iwl_mvm_power_update_binding(mvm, vif, true);
+ iwl_mvm_power_update_mac(mvm, vif);
ret = iwl_mvm_update_quotas(mvm, vif);
if (ret)
@@ -1047,6 +1330,8 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
if (vif->p2p && mvm->p2p_device_vif)
iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif);
+ iwl_mvm_ref(mvm, IWL_MVM_REF_AP_IBSS);
+
iwl_mvm_bt_coex_vif_change(mvm);
mutex_unlock(&mvm->mutex);
@@ -1054,7 +1339,7 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw,
out_quota_failed:
mvm->bound_vif_cnt--;
- iwl_mvm_power_update_binding(mvm, vif, false);
+ iwl_mvm_power_update_mac(mvm, vif);
mvmvif->ap_ibss_active = false;
iwl_mvm_send_rm_bcast_sta(mvm, &mvmvif->bcast_sta);
out_unbind:
@@ -1080,6 +1365,8 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
iwl_mvm_bt_coex_vif_change(mvm);
+ iwl_mvm_unref(mvm, IWL_MVM_REF_AP_IBSS);
+
/* Need to update the P2P Device MAC (only GO, IBSS is single vif) */
if (vif->p2p && mvm->p2p_device_vif)
iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif);
@@ -1089,7 +1376,7 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw,
iwl_mvm_binding_remove_vif(mvm, vif);
mvm->bound_vif_cnt--;
- iwl_mvm_power_update_binding(mvm, vif, false);
+ iwl_mvm_power_update_mac(mvm, vif);
iwl_mvm_mac_ctxt_remove(mvm, vif);
@@ -1103,26 +1390,20 @@ iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm,
u32 changes)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- enum ieee80211_bss_change ht_change = BSS_CHANGED_ERP_CTS_PROT |
- BSS_CHANGED_HT |
- BSS_CHANGED_BANDWIDTH;
- int ret;
/* Changes will be applied when the AP/IBSS is started */
if (!mvmvif->ap_ibss_active)
return;
- if (changes & ht_change) {
- ret = iwl_mvm_mac_ctxt_changed(mvm, vif);
- if (ret)
- IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
- }
+ if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_HT |
+ BSS_CHANGED_BANDWIDTH) &&
+ iwl_mvm_mac_ctxt_changed(mvm, vif))
+ IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr);
/* Need to send a new beacon template to the FW */
- if (changes & BSS_CHANGED_BEACON) {
- if (iwl_mvm_mac_ctxt_beacon_changed(mvm, vif))
- IWL_WARN(mvm, "Failed updating beacon data\n");
- }
+ if (changes & BSS_CHANGED_BEACON &&
+ iwl_mvm_mac_ctxt_beacon_changed(mvm, vif))
+ IWL_WARN(mvm, "Failed updating beacon data\n");
}
static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
@@ -1155,6 +1436,8 @@ static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
struct cfg80211_scan_request *req)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+ struct iwl_notification_wait wait_scan_done;
+ static const u8 scan_done_notif[] = { SCAN_OFFLOAD_COMPLETE, };
int ret;
if (req->n_channels == 0 || req->n_channels > MAX_NUM_SCAN_CHANNELS)
@@ -1162,11 +1445,38 @@ static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex);
- if (mvm->scan_status == IWL_MVM_SCAN_NONE)
- ret = iwl_mvm_scan_request(mvm, vif, req);
- else
+ switch (mvm->scan_status) {
+ case IWL_MVM_SCAN_SCHED:
+ iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done,
+ scan_done_notif,
+ ARRAY_SIZE(scan_done_notif),
+ NULL, NULL);
+ iwl_mvm_sched_scan_stop(mvm);
+ ret = iwl_wait_notification(&mvm->notif_wait,
+ &wait_scan_done, HZ);
+ if (ret) {
+ ret = -EBUSY;
+ goto out;
+ }
+ /* iwl_mvm_rx_scan_offload_complete_notif() will be called
+ * soon but will not reset the scan status as it won't be
+ * IWL_MVM_SCAN_SCHED any more since we queue the next scan
+ * immediately (below)
+ */
+ break;
+ case IWL_MVM_SCAN_NONE:
+ break;
+ default:
ret = -EBUSY;
+ goto out;
+ }
+
+ iwl_mvm_ref(mvm, IWL_MVM_REF_SCAN);
+ ret = iwl_mvm_scan_request(mvm, vif, req);
+ if (ret)
+ iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
+out:
mutex_unlock(&mvm->mutex);
return ret;
@@ -1186,20 +1496,32 @@ static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw,
static void
iwl_mvm_mac_allow_buffered_frames(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta, u16 tid,
+ struct ieee80211_sta *sta, u16 tids,
int num_frames,
enum ieee80211_frame_release_type reason,
bool more_data)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
- /* TODO: how do we tell the fw to send frames for a specific TID */
+ /* Called when we need to transmit (a) frame(s) from mac80211 */
- /*
- * The fw will send EOSP notification when the last frame will be
- * transmitted.
- */
- iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames);
+ iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
+ tids, more_data, false);
+}
+
+static void
+iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta, u16 tids,
+ int num_frames,
+ enum ieee80211_frame_release_type reason,
+ bool more_data)
+{
+ struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
+
+ /* Called when we need to transmit (a) frame(s) from agg queue */
+
+ iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames,
+ tids, more_data, true);
}
static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
@@ -1209,11 +1531,25 @@ static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
+ int tid;
switch (cmd) {
case STA_NOTIFY_SLEEP:
if (atomic_read(&mvm->pending_frames[mvmsta->sta_id]) > 0)
ieee80211_sta_block_awake(hw, sta, true);
+ spin_lock_bh(&mvmsta->lock);
+ for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
+ struct iwl_mvm_tid_data *tid_data;
+
+ tid_data = &mvmsta->tid_data[tid];
+ if (tid_data->state != IWL_AGG_ON &&
+ tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA)
+ continue;
+ if (iwl_mvm_tid_queued(tid_data) == 0)
+ continue;
+ ieee80211_sta_set_buffered(sta, tid, true);
+ }
+ spin_unlock_bh(&mvmsta->lock);
/*
* The fw updates the STA to be asleep. Tx packets on the Tx
* queues to this station will not be transmitted. The fw will
@@ -1304,12 +1640,12 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTHORIZED) {
/* enable beacon filtering */
- WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif));
+ WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, CMD_SYNC));
ret = 0;
} else if (old_state == IEEE80211_STA_AUTHORIZED &&
new_state == IEEE80211_STA_ASSOC) {
/* disable beacon filtering */
- WARN_ON(iwl_mvm_disable_beacon_filter(mvm, vif));
+ WARN_ON(iwl_mvm_disable_beacon_filter(mvm, vif, CMD_SYNC));
ret = 0;
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTH) {
@@ -1774,7 +2110,7 @@ static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
* otherwise fw will complain.
*/
mvm->bound_vif_cnt++;
- iwl_mvm_power_update_binding(mvm, vif, true);
+ iwl_mvm_power_update_mac(mvm, vif);
/* Setting the quota at this stage is only required for monitor
* interfaces. For the other types, the bss_info changed flow
@@ -1792,7 +2128,7 @@ static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw,
out_remove_binding:
iwl_mvm_binding_remove_vif(mvm, vif);
mvm->bound_vif_cnt--;
- iwl_mvm_power_update_binding(mvm, vif, false);
+ iwl_mvm_power_update_mac(mvm, vif);
out_unlock:
mutex_unlock(&mvm->mutex);
if (ret)
@@ -1825,7 +2161,7 @@ static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw,
iwl_mvm_binding_remove_vif(mvm, vif);
mvm->bound_vif_cnt--;
- iwl_mvm_power_update_binding(mvm, vif, false);
+ iwl_mvm_power_update_mac(mvm, vif);
out_unlock:
mvmvif->phy_ctxt = NULL;
@@ -1892,8 +2228,9 @@ static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm,
return -EINVAL;
if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]))
- return iwl_mvm_enable_beacon_filter(mvm, vif);
- return iwl_mvm_disable_beacon_filter(mvm, vif);
+ return iwl_mvm_enable_beacon_filter(mvm, vif,
+ CMD_SYNC);
+ return iwl_mvm_disable_beacon_filter(mvm, vif, CMD_SYNC);
}
return -EOPNOTSUPP;
@@ -1932,6 +2269,7 @@ struct ieee80211_ops iwl_mvm_hw_ops = {
.sta_state = iwl_mvm_mac_sta_state,
.sta_notify = iwl_mvm_mac_sta_notify,
.allow_buffered_frames = iwl_mvm_mac_allow_buffered_frames,
+ .release_buffered_frames = iwl_mvm_mac_release_buffered_frames,
.set_rts_threshold = iwl_mvm_mac_set_rts_threshold,
.sta_rc_update = iwl_mvm_sta_rc_update,
.conf_tx = iwl_mvm_mac_conf_tx,
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index 2b0ba1f..ad0315a 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -92,7 +92,6 @@ enum iwl_mvm_tx_fifo {
};
extern struct ieee80211_ops iwl_mvm_hw_ops;
-extern const struct iwl_mvm_power_ops pm_legacy_ops;
extern const struct iwl_mvm_power_ops pm_mac_ops;
/**
@@ -159,20 +158,6 @@ enum iwl_power_scheme {
IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
#define IWL_UAPSD_MAX_SP IEEE80211_WMM_IE_STA_QOSINFO_SP_2
-struct iwl_mvm_power_ops {
- int (*power_update_mode)(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif);
- int (*power_update_device_mode)(struct iwl_mvm *mvm);
- int (*power_disable)(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
- void (*power_update_binding)(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif, bool assign);
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- int (*power_dbgfs_read)(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
- char *buf, int bufsz);
-#endif
-};
-
-
#ifdef CONFIG_IWLWIFI_DEBUGFS
enum iwl_dbgfs_pm_mask {
MVM_DEBUGFS_PM_KEEP_ALIVE = BIT(0),
@@ -239,6 +224,17 @@ enum iwl_mvm_smps_type_request {
NUM_IWL_MVM_SMPS_REQ,
};
+enum iwl_mvm_ref_type {
+ IWL_MVM_REF_UCODE_DOWN,
+ IWL_MVM_REF_SCAN,
+ IWL_MVM_REF_ROC,
+ IWL_MVM_REF_P2P_CLIENT,
+ IWL_MVM_REF_AP_IBSS,
+ IWL_MVM_REF_USER,
+
+ IWL_MVM_REF_COUNT,
+};
+
/**
* struct iwl_mvm_vif_bf_data - beacon filtering related data
* @bf_enabled: indicates if beacon filtering is enabled
@@ -269,7 +265,9 @@ struct iwl_mvm_vif_bf_data {
* @ap_ibss_active: indicates that AP/IBSS is configured and that the interface
* should get quota etc.
* @monitor_active: indicates that monitor context is configured, and that the
- * interface should get quota etc.
+ * interface should get quota etc.
+ * @low_latency: indicates that this interface is in low-latency mode
+ * (VMACLowLatencyMode)
* @queue_params: QoS params for this MAC
* @bcast_sta: station used for broadcast packets. Used by the following
* vifs: P2P_DEVICE, GO and AP.
@@ -285,6 +283,7 @@ struct iwl_mvm_vif {
bool uploaded;
bool ap_ibss_active;
bool monitor_active;
+ bool low_latency;
struct iwl_mvm_vif_bf_data bf_data;
u32 ap_beacon_time;
@@ -333,14 +332,13 @@ struct iwl_mvm_vif {
struct dentry *dbgfs_slink;
struct iwl_dbgfs_pm dbgfs_pm;
struct iwl_dbgfs_bf dbgfs_bf;
+ struct iwl_mac_power_cmd mac_pwr_cmd;
#endif
enum ieee80211_smps_mode smps_requests[NUM_IWL_MVM_SMPS_REQ];
/* FW identified misbehaving AP */
u8 uapsd_misbehaving_bssid[ETH_ALEN];
-
- bool pm_prevented;
};
static inline struct iwl_mvm_vif *
@@ -415,6 +413,7 @@ struct iwl_tt_params {
* @ct_kill_exit: worker to exit thermal kill
* @dynamic_smps: Is thermal throttling enabled dynamic_smps?
* @tx_backoff: The current thremal throttling tx backoff in uSec.
+ * @min_backoff: The minimal tx backoff due to power restrictions
* @params: Parameters to configure the thermal throttling algorithm.
* @throttle: Is thermal throttling is active?
*/
@@ -422,6 +421,7 @@ struct iwl_mvm_tt_mgmt {
struct delayed_work ct_kill_exit;
bool dynamic_smps;
u32 tx_backoff;
+ u32 min_backoff;
const struct iwl_tt_params *params;
bool throttle;
};
@@ -457,6 +457,8 @@ struct iwl_mvm {
bool init_ucode_complete;
u32 error_event_table;
u32 log_event_table;
+ u32 umac_error_event_table;
+ bool support_umac_log;
u32 ampdu_ref;
@@ -470,7 +472,7 @@ struct iwl_mvm {
struct iwl_nvm_data *nvm_data;
/* NVM sections */
- struct iwl_nvm_section nvm_sections[NVM_NUM_OF_SECTIONS];
+ struct iwl_nvm_section nvm_sections[NVM_MAX_NUM_SECTIONS];
/* EEPROM MAC addresses */
struct mac_address addresses[IWL_MVM_MAX_ADDRESSES];
@@ -494,6 +496,17 @@ struct iwl_mvm {
/* rx chain antennas set through debugfs for the scan command */
u8 scan_rx_ant;
+#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
+ /* broadcast filters to configure for each associated station */
+ const struct iwl_fw_bcast_filter *bcast_filters;
+#ifdef CONFIG_IWLWIFI_DEBUGFS
+ struct {
+ u32 override; /* u32 for debugfs_create_bool */
+ struct iwl_bcast_filter_cmd cmd;
+ } dbgfs_bcast_filtering;
+#endif
+#endif
+
/* Internal station */
struct iwl_mvm_int_sta aux_sta;
@@ -526,6 +539,9 @@ struct iwl_mvm {
*/
unsigned long fw_key_table[BITS_TO_LONGS(STA_KEY_MAX_NUM)];
+ /* A bitmap of reference types taken by the driver. */
+ unsigned long ref_bitmap[BITS_TO_LONGS(IWL_MVM_REF_COUNT)];
+
u8 vif_count;
/* -1 for always, 0 for never, >0 for that many times */
@@ -548,6 +564,10 @@ struct iwl_mvm {
#endif
#endif
+ /* d0i3 */
+ u8 d0i3_ap_sta_id;
+ struct work_struct d0i3_exit_work;
+
/* BT-Coex */
u8 bt_kill_msk;
struct iwl_bt_coex_profile_notif last_bt_notif;
@@ -557,8 +577,6 @@ struct iwl_mvm {
struct iwl_mvm_tt_mgmt thermal_throttle;
s32 temperature; /* Celsius */
- const struct iwl_mvm_power_ops *pm_ops;
-
#ifdef CONFIG_NL80211_TESTMODE
u32 noa_duration;
struct ieee80211_vif *noa_vif;
@@ -572,7 +590,9 @@ struct iwl_mvm {
u8 bound_vif_cnt;
/* Indicate if device power save is allowed */
- bool ps_prevented;
+ bool ps_disabled;
+ /* Indicate if device power management is allowed */
+ bool pm_disabled;
};
/* Extract MVM priv from op_mode and _hw */
@@ -595,6 +615,24 @@ static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm)
test_bit(IWL_MVM_STATUS_HW_CTKILL, &mvm->status);
}
+static inline struct iwl_mvm_sta *
+iwl_mvm_sta_from_staid_protected(struct iwl_mvm *mvm, u8 sta_id)
+{
+ struct ieee80211_sta *sta;
+
+ if (sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))
+ return NULL;
+
+ sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
+ lockdep_is_held(&mvm->mutex));
+
+ /* This can happen if the station has been removed right now */
+ if (IS_ERR_OR_NULL(sta))
+ return NULL;
+
+ return iwl_mvm_sta_from_mac80211(sta);
+}
+
extern const u8 iwl_mvm_ac_to_tx_fifo[];
struct iwl_rate_info {
@@ -661,6 +699,8 @@ int iwl_mvm_up(struct iwl_mvm *mvm);
int iwl_mvm_load_d3_fw(struct iwl_mvm *mvm);
int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm);
+bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm,
+ struct iwl_bcast_filter_cmd *cmd);
/*
* FW notifications / CMD responses handlers
@@ -773,48 +813,19 @@ iwl_mvm_vif_dbgfs_clean(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
/* rate scaling */
int iwl_mvm_send_lq_cmd(struct iwl_mvm *mvm, struct iwl_lq_cmd *lq, bool init);
-/* power managment */
-static inline int iwl_mvm_power_update_mode(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
-{
- return mvm->pm_ops->power_update_mode(mvm, vif);
-}
-
-static inline int iwl_mvm_power_disable(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
-{
- return mvm->pm_ops->power_disable(mvm, vif);
-}
-
-static inline int iwl_mvm_power_update_device_mode(struct iwl_mvm *mvm)
-{
- if (mvm->pm_ops->power_update_device_mode)
- return mvm->pm_ops->power_update_device_mode(mvm);
- return 0;
-}
+/* power management */
+int iwl_power_legacy_set_cam_mode(struct iwl_mvm *mvm);
-static inline void iwl_mvm_power_update_binding(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- bool assign)
-{
- if (mvm->pm_ops->power_update_binding)
- mvm->pm_ops->power_update_binding(mvm, vif, assign);
-}
+int iwl_mvm_power_update_device(struct iwl_mvm *mvm);
+int iwl_mvm_power_update_mac(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
+int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ char *buf, int bufsz);
void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif);
int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-static inline int iwl_mvm_power_dbgfs_read(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- char *buf, int bufsz)
-{
- return mvm->pm_ops->power_dbgfs_read(mvm, vif, buf, bufsz);
-}
-#endif
-
int iwl_mvm_leds_init(struct iwl_mvm *mvm);
void iwl_mvm_leds_exit(struct iwl_mvm *mvm);
@@ -841,6 +852,10 @@ iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
}
#endif
+/* D0i3 */
+void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
+void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
+
/* BT Coex */
int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm);
int iwl_send_bt_init_conf(struct iwl_mvm *mvm);
@@ -854,6 +869,7 @@ u16 iwl_mvm_bt_coex_agg_time_limit(struct iwl_mvm *mvm,
struct ieee80211_sta *sta);
bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
struct ieee80211_sta *sta);
+int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, bool enable);
enum iwl_bt_kill_msk {
BT_KILL_MSK_DEFAULT,
@@ -875,25 +891,51 @@ iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
struct iwl_beacon_filter_cmd *cmd)
{}
#endif
+int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ bool enable, u32 flags);
int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif);
+ struct ieee80211_vif *vif,
+ u32 flags);
int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif);
-int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
- struct iwl_beacon_filter_cmd *cmd);
+ struct ieee80211_vif *vif,
+ u32 flags);
int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm,
struct ieee80211_vif *vif, bool enable);
int iwl_mvm_update_beacon_filter(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif);
+ struct ieee80211_vif *vif,
+ bool force,
+ u32 flags);
/* SMPS */
void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
enum iwl_mvm_smps_type_request req_type,
enum ieee80211_smps_mode smps_request);
+/* Low latency */
+int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ bool value);
+/* get VMACLowLatencyMode */
+static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif)
+{
+ /*
+ * should this consider associated/active/... state?
+ *
+ * Normally low-latency should only be active on interfaces
+ * that are active, but at least with debugfs it can also be
+ * enabled on interfaces that aren't active. However, when
+ * interface aren't active then they aren't added into the
+ * binding, so this has no real impact. For now, just return
+ * the current desired low-latency state.
+ */
+
+ return mvmvif->low_latency;
+}
+
/* Thermal management and CT-kill */
+void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff);
void iwl_mvm_tt_handler(struct iwl_mvm *mvm);
-void iwl_mvm_tt_initialize(struct iwl_mvm *mvm);
+void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff);
void iwl_mvm_tt_exit(struct iwl_mvm *mvm);
void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state);
diff --git a/drivers/net/wireless/iwlwifi/mvm/nvm.c b/drivers/net/wireless/iwlwifi/mvm/nvm.c
index 35b71af..2d5251b 100644
--- a/drivers/net/wireless/iwlwifi/mvm/nvm.c
+++ b/drivers/net/wireless/iwlwifi/mvm/nvm.c
@@ -67,14 +67,6 @@
#include "iwl-eeprom-read.h"
#include "iwl-nvm-parse.h"
-/* list of NVM sections we are allowed/need to read */
-static const int nvm_to_read[] = {
- NVM_SECTION_TYPE_HW,
- NVM_SECTION_TYPE_SW,
- NVM_SECTION_TYPE_CALIBRATION,
- NVM_SECTION_TYPE_PRODUCTION,
-};
-
/* Default NVM size to read */
#define IWL_NVM_DEFAULT_CHUNK_SIZE (2*1024)
#define IWL_MAX_NVM_SECTION_SIZE 7000
@@ -240,7 +232,7 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
/* Checking for required sections */
if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data ||
- !mvm->nvm_sections[NVM_SECTION_TYPE_HW].data) {
+ !mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data) {
IWL_ERR(mvm, "Can't parse empty NVM sections\n");
return NULL;
}
@@ -248,7 +240,7 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm)
if (WARN_ON(!mvm->cfg))
return NULL;
- hw = (const __le16 *)sections[NVM_SECTION_TYPE_HW].data;
+ hw = (const __le16 *)sections[mvm->cfg->nvm_hw_section_num].data;
sw = (const __le16 *)sections[NVM_SECTION_TYPE_SW].data;
calib = (const __le16 *)sections[NVM_SECTION_TYPE_CALIBRATION].data;
return iwl_parse_nvm_data(mvm->trans->dev, mvm->cfg, hw, sw, calib,
@@ -367,7 +359,7 @@ static int iwl_mvm_read_external_nvm(struct iwl_mvm *mvm)
break;
}
- if (WARN(section_id >= NVM_NUM_OF_SECTIONS,
+ if (WARN(section_id >= NVM_MAX_NUM_SECTIONS,
"Invalid NVM section ID %d\n", section_id)) {
ret = -EINVAL;
break;
@@ -415,6 +407,9 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
int ret, i, section;
u8 *nvm_buffer, *temp;
+ if (WARN_ON_ONCE(mvm->cfg->nvm_hw_section_num >= NVM_MAX_NUM_SECTIONS))
+ return -EINVAL;
+
/* load external NVM if configured */
if (iwlwifi_mod_params.nvm_file) {
/* move to External NVM flow */
@@ -422,6 +417,14 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
if (ret)
return ret;
} else {
+ /* list of NVM sections we are allowed/need to read */
+ int nvm_to_read[] = {
+ mvm->cfg->nvm_hw_section_num,
+ NVM_SECTION_TYPE_SW,
+ NVM_SECTION_TYPE_CALIBRATION,
+ NVM_SECTION_TYPE_PRODUCTION,
+ };
+
/* Read From FW NVM */
IWL_DEBUG_EEPROM(mvm->trans->dev, "Read from NVM\n");
@@ -446,10 +449,6 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
#ifdef CONFIG_IWLWIFI_DEBUGFS
switch (section) {
- case NVM_SECTION_TYPE_HW:
- mvm->nvm_hw_blob.data = temp;
- mvm->nvm_hw_blob.size = ret;
- break;
case NVM_SECTION_TYPE_SW:
mvm->nvm_sw_blob.data = temp;
mvm->nvm_sw_blob.size = ret;
@@ -463,6 +462,11 @@ int iwl_nvm_init(struct iwl_mvm *mvm)
mvm->nvm_prod_blob.size = ret;
break;
default:
+ if (section == mvm->cfg->nvm_hw_section_num) {
+ mvm->nvm_hw_blob.data = temp;
+ mvm->nvm_hw_blob.size = ret;
+ break;
+ }
WARN(1, "section: %d", section);
}
#endif
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index a3d43de..a46f0b8 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -185,9 +185,10 @@ static void iwl_mvm_nic_config(struct iwl_op_mode *op_mode)
* (PCIe power is lost before PERST# is asserted), causing ME FW
* to lose ownership and not being able to obtain it back.
*/
- iwl_set_bits_mask_prph(mvm->trans, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
- ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
+ if (mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
+ iwl_set_bits_mask_prph(mvm->trans, APMG_PS_CTRL_REG,
+ APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
+ ~APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
}
struct iwl_rx_handlers {
@@ -222,10 +223,12 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif, false),
+ RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, false),
+
RX_HANDLER(SCAN_REQUEST_CMD, iwl_mvm_rx_scan_response, false),
RX_HANDLER(SCAN_COMPLETE_NOTIFICATION, iwl_mvm_rx_scan_complete, false),
RX_HANDLER(SCAN_OFFLOAD_COMPLETE,
- iwl_mvm_rx_scan_offload_complete_notif, false),
+ iwl_mvm_rx_scan_offload_complete_notif, true),
RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_sched_scan_results,
false),
@@ -284,9 +287,11 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
CMD(BEACON_NOTIFICATION),
CMD(BEACON_TEMPLATE_CMD),
CMD(STATISTICS_NOTIFICATION),
+ CMD(EOSP_NOTIFICATION),
CMD(REDUCE_TX_POWER_CMD),
CMD(TX_ANT_CONFIGURATION_CMD),
CMD(D3_CONFIG_CMD),
+ CMD(D0I3_END_CMD),
CMD(PROT_OFFLOAD_CONFIG_CMD),
CMD(OFFLOADS_QUERY_CMD),
CMD(REMOTE_WAKE_CONFIG_CMD),
@@ -309,6 +314,7 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
CMD(BT_PROFILE_NOTIFICATION),
CMD(BT_CONFIG),
CMD(MCAST_FILTER_CMD),
+ CMD(BCAST_FILTER_CMD),
CMD(REPLY_SF_CFG_CMD),
CMD(REPLY_BEACON_FILTERING_CMD),
CMD(REPLY_THERMAL_MNG_BACKOFF),
@@ -320,6 +326,24 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
/* this forward declaration can avoid to export the function */
static void iwl_mvm_async_handlers_wk(struct work_struct *wk);
+static void iwl_mvm_d0i3_exit_work(struct work_struct *wk);
+
+static u32 calc_min_backoff(struct iwl_trans *trans, const struct iwl_cfg *cfg)
+{
+ const struct iwl_pwr_tx_backoff *pwr_tx_backoff = cfg->pwr_tx_backoffs;
+
+ if (!pwr_tx_backoff)
+ return 0;
+
+ while (pwr_tx_backoff->pwr) {
+ if (trans->dflt_pwr_limit >= pwr_tx_backoff->pwr)
+ return pwr_tx_backoff->backoff;
+
+ pwr_tx_backoff++;
+ }
+
+ return 0;
+}
static struct iwl_op_mode *
iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
@@ -333,6 +357,14 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
TX_CMD,
};
int err, scan_size;
+ u32 min_backoff;
+
+ /*
+ * We use IWL_MVM_STATION_COUNT to check the validity of the station
+ * index all over the driver - check that its value corresponds to the
+ * array size.
+ */
+ BUILD_BUG_ON(ARRAY_SIZE(mvm->fw_id_to_mac_id) != IWL_MVM_STATION_COUNT);
/********************************
* 1. Allocating and configuring HW data
@@ -373,6 +405,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk);
INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk);
INIT_WORK(&mvm->sta_drained_wk, iwl_mvm_sta_drained_wk);
+ INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work);
SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev);
@@ -421,7 +454,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
IWL_INFO(mvm, "Detected %s, REV=0x%X\n",
mvm->cfg->name, mvm->trans->hw_rev);
- iwl_mvm_tt_initialize(mvm);
+ min_backoff = calc_min_backoff(trans, cfg);
+ iwl_mvm_tt_initialize(mvm, min_backoff);
/*
* If the NVM exists in an external file,
@@ -462,13 +496,11 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
if (err)
goto out_unregister;
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)
- mvm->pm_ops = &pm_mac_ops;
- else
- mvm->pm_ops = &pm_legacy_ops;
-
memset(&mvm->rx_stats, 0, sizeof(struct mvm_statistics_rx));
+ /* rpm starts with a taken ref. only set the appropriate bit here. */
+ set_bit(IWL_MVM_REF_UCODE_DOWN, mvm->ref_bitmap);
+
return op_mode;
out_unregister:
@@ -508,7 +540,7 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
mvm->phy_db = NULL;
iwl_free_nvm_data(mvm->nvm_data);
- for (i = 0; i < NVM_NUM_OF_SECTIONS; i++)
+ for (i = 0; i < NVM_MAX_NUM_SECTIONS; i++)
kfree(mvm->nvm_sections[i].data);
ieee80211_free_hw(mvm->hw);
@@ -703,6 +735,29 @@ static void iwl_mvm_nic_restart(struct iwl_mvm *mvm)
iwl_abort_notification_waits(&mvm->notif_wait);
/*
+ * This is a bit racy, but worst case we tell mac80211 about
+ * a stopped/aborted scan when that was already done which
+ * is not a problem. It is necessary to abort any os scan
+ * here because mac80211 requires having the scan cleared
+ * before restarting.
+ * We'll reset the scan_status to NONE in restart cleanup in
+ * the next start() call from mac80211. If restart isn't called
+ * (no fw restart) scan status will stay busy.
+ */
+ switch (mvm->scan_status) {
+ case IWL_MVM_SCAN_NONE:
+ break;
+ case IWL_MVM_SCAN_OS:
+ ieee80211_scan_completed(mvm->hw, true);
+ break;
+ case IWL_MVM_SCAN_SCHED:
+ /* Sched scan will be restarted by mac80211 in restart_hw. */
+ if (!mvm->restart_fw)
+ ieee80211_sched_scan_stopped(mvm->hw);
+ break;
+ }
+
+ /*
* If we're restarting already, don't cycle restarts.
* If INIT fw asserted, it will likely fail again.
* If WoWLAN fw asserted, don't restart either, mac80211
@@ -733,25 +788,8 @@ static void iwl_mvm_nic_restart(struct iwl_mvm *mvm)
INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk);
schedule_work(&reprobe->work);
} else if (mvm->cur_ucode == IWL_UCODE_REGULAR && mvm->restart_fw) {
- /*
- * This is a bit racy, but worst case we tell mac80211 about
- * a stopped/aborted (sched) scan when that was already done
- * which is not a problem. It is necessary to abort any scan
- * here because mac80211 requires having the scan cleared
- * before restarting.
- * We'll reset the scan_status to NONE in restart cleanup in
- * the next start() call from mac80211.
- */
- switch (mvm->scan_status) {
- case IWL_MVM_SCAN_NONE:
- break;
- case IWL_MVM_SCAN_OS:
- ieee80211_scan_completed(mvm->hw, true);
- break;
- case IWL_MVM_SCAN_SCHED:
- ieee80211_sched_scan_stopped(mvm->hw);
- break;
- }
+ /* don't let the transport/FW power down */
+ iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
if (mvm->restart_fw > 0)
mvm->restart_fw--;
@@ -778,6 +816,163 @@ static void iwl_mvm_cmd_queue_full(struct iwl_op_mode *op_mode)
iwl_mvm_nic_restart(mvm);
}
+struct iwl_d0i3_iter_data {
+ struct iwl_mvm *mvm;
+ u8 ap_sta_id;
+ u8 vif_count;
+};
+
+static void iwl_mvm_enter_d0i3_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_d0i3_iter_data *data = _data;
+ struct iwl_mvm *mvm = data->mvm;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE;
+
+ IWL_DEBUG_RPM(mvm, "entering D0i3 - vif %pM\n", vif->addr);
+ if (vif->type != NL80211_IFTYPE_STATION ||
+ !vif->bss_conf.assoc)
+ return;
+
+ iwl_mvm_update_d0i3_power_mode(mvm, vif, true, flags);
+
+ /*
+ * on init/association, mvm already configures POWER_TABLE_CMD
+ * and REPLY_MCAST_FILTER_CMD, so currently don't
+ * reconfigure them (we might want to use different
+ * params later on, though).
+ */
+ data->ap_sta_id = mvmvif->ap_sta_id;
+ data->vif_count++;
+}
+
+static int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
+{
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE;
+ int ret;
+ struct iwl_d0i3_iter_data d0i3_iter_data = {
+ .mvm = mvm,
+ };
+ struct iwl_wowlan_config_cmd wowlan_config_cmd = {
+ .wakeup_filter = cpu_to_le32(IWL_WOWLAN_WAKEUP_RX_FRAME |
+ IWL_WOWLAN_WAKEUP_BEACON_MISS |
+ IWL_WOWLAN_WAKEUP_LINK_CHANGE |
+ IWL_WOWLAN_WAKEUP_BCN_FILTERING),
+ };
+ struct iwl_d3_manager_config d3_cfg_cmd = {
+ .min_sleep_time = cpu_to_le32(1000),
+ };
+
+ IWL_DEBUG_RPM(mvm, "MVM entering D0i3\n");
+
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_enter_d0i3_iterator,
+ &d0i3_iter_data);
+ if (d0i3_iter_data.vif_count == 1) {
+ mvm->d0i3_ap_sta_id = d0i3_iter_data.ap_sta_id;
+ } else {
+ WARN_ON_ONCE(d0i3_iter_data.vif_count > 1);
+ mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
+ }
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, flags,
+ sizeof(wowlan_config_cmd),
+ &wowlan_config_cmd);
+ if (ret)
+ return ret;
+
+ return iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD,
+ flags | CMD_MAKE_TRANS_IDLE,
+ sizeof(d3_cfg_cmd), &d3_cfg_cmd);
+}
+
+static void iwl_mvm_exit_d0i3_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm *mvm = _data;
+ u32 flags = CMD_ASYNC | CMD_HIGH_PRIO;
+
+ IWL_DEBUG_RPM(mvm, "exiting D0i3 - vif %pM\n", vif->addr);
+ if (vif->type != NL80211_IFTYPE_STATION ||
+ !vif->bss_conf.assoc)
+ return;
+
+ iwl_mvm_update_d0i3_power_mode(mvm, vif, false, flags);
+}
+
+static void iwl_mvm_d0i3_disconnect_iter(void *data, u8 *mac,
+ struct ieee80211_vif *vif)
+{
+ struct iwl_mvm *mvm = data;
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (vif->type == NL80211_IFTYPE_STATION && vif->bss_conf.assoc &&
+ mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id)
+ ieee80211_connection_loss(vif);
+}
+
+static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
+{
+ struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, d0i3_exit_work);
+ struct iwl_host_cmd get_status_cmd = {
+ .id = WOWLAN_GET_STATUSES,
+ .flags = CMD_SYNC | CMD_HIGH_PRIO | CMD_WANT_SKB,
+ };
+ struct iwl_wowlan_status_v6 *status;
+ int ret;
+ u32 disconnection_reasons, wakeup_reasons;
+
+ mutex_lock(&mvm->mutex);
+ ret = iwl_mvm_send_cmd(mvm, &get_status_cmd);
+ if (ret)
+ goto out;
+
+ if (!get_status_cmd.resp_pkt)
+ goto out;
+
+ status = (void *)get_status_cmd.resp_pkt->data;
+ wakeup_reasons = le32_to_cpu(status->wakeup_reasons);
+
+ IWL_DEBUG_RPM(mvm, "wakeup reasons: 0x%x\n", wakeup_reasons);
+
+ disconnection_reasons =
+ IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_MISSED_BEACON |
+ IWL_WOWLAN_WAKEUP_BY_DISCONNECTION_ON_DEAUTH;
+ if (wakeup_reasons & disconnection_reasons)
+ ieee80211_iterate_active_interfaces(
+ mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_d0i3_disconnect_iter, mvm);
+
+ iwl_free_resp(&get_status_cmd);
+out:
+ mutex_unlock(&mvm->mutex);
+}
+
+static int iwl_mvm_exit_d0i3(struct iwl_op_mode *op_mode)
+{
+ struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
+ u32 flags = CMD_ASYNC | CMD_HIGH_PRIO | CMD_SEND_IN_IDLE |
+ CMD_WAKE_UP_TRANS;
+ int ret;
+
+ IWL_DEBUG_RPM(mvm, "MVM exiting D0i3\n");
+
+ ret = iwl_mvm_send_cmd_pdu(mvm, D0I3_END_CMD, flags, 0, NULL);
+ if (ret)
+ goto out;
+
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_exit_d0i3_iterator,
+ mvm);
+out:
+ schedule_work(&mvm->d0i3_exit_work);
+ return ret;
+}
+
static const struct iwl_op_mode_ops iwl_mvm_ops = {
.start = iwl_op_mode_mvm_start,
.stop = iwl_op_mode_mvm_stop,
@@ -789,4 +984,6 @@ static const struct iwl_op_mode_ops iwl_mvm_ops = {
.nic_error = iwl_mvm_nic_error,
.cmd_queue_full = iwl_mvm_cmd_queue_full,
.nic_config = iwl_mvm_nic_config,
+ .enter_d0i3 = iwl_mvm_enter_d0i3,
+ .exit_d0i3 = iwl_mvm_exit_d0i3,
};
diff --git a/drivers/net/wireless/iwlwifi/mvm/power.c b/drivers/net/wireless/iwlwifi/mvm/power.c
index d9eab3b..4da1ea4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/power.c
+++ b/drivers/net/wireless/iwlwifi/mvm/power.c
@@ -74,39 +74,36 @@
#define POWER_KEEP_ALIVE_PERIOD_SEC 25
+static
int iwl_mvm_beacon_filter_send_cmd(struct iwl_mvm *mvm,
- struct iwl_beacon_filter_cmd *cmd)
+ struct iwl_beacon_filter_cmd *cmd,
+ u32 flags)
{
- int ret;
-
- ret = iwl_mvm_send_cmd_pdu(mvm, REPLY_BEACON_FILTERING_CMD, CMD_SYNC,
- sizeof(struct iwl_beacon_filter_cmd), cmd);
-
- if (!ret) {
- IWL_DEBUG_POWER(mvm, "ba_enable_beacon_abort is: %d\n",
- le32_to_cpu(cmd->ba_enable_beacon_abort));
- IWL_DEBUG_POWER(mvm, "ba_escape_timer is: %d\n",
- le32_to_cpu(cmd->ba_escape_timer));
- IWL_DEBUG_POWER(mvm, "bf_debug_flag is: %d\n",
- le32_to_cpu(cmd->bf_debug_flag));
- IWL_DEBUG_POWER(mvm, "bf_enable_beacon_filter is: %d\n",
- le32_to_cpu(cmd->bf_enable_beacon_filter));
- IWL_DEBUG_POWER(mvm, "bf_energy_delta is: %d\n",
- le32_to_cpu(cmd->bf_energy_delta));
- IWL_DEBUG_POWER(mvm, "bf_escape_timer is: %d\n",
- le32_to_cpu(cmd->bf_escape_timer));
- IWL_DEBUG_POWER(mvm, "bf_roaming_energy_delta is: %d\n",
- le32_to_cpu(cmd->bf_roaming_energy_delta));
- IWL_DEBUG_POWER(mvm, "bf_roaming_state is: %d\n",
- le32_to_cpu(cmd->bf_roaming_state));
- IWL_DEBUG_POWER(mvm, "bf_temp_threshold is: %d\n",
- le32_to_cpu(cmd->bf_temp_threshold));
- IWL_DEBUG_POWER(mvm, "bf_temp_fast_filter is: %d\n",
- le32_to_cpu(cmd->bf_temp_fast_filter));
- IWL_DEBUG_POWER(mvm, "bf_temp_slow_filter is: %d\n",
- le32_to_cpu(cmd->bf_temp_slow_filter));
- }
- return ret;
+ IWL_DEBUG_POWER(mvm, "ba_enable_beacon_abort is: %d\n",
+ le32_to_cpu(cmd->ba_enable_beacon_abort));
+ IWL_DEBUG_POWER(mvm, "ba_escape_timer is: %d\n",
+ le32_to_cpu(cmd->ba_escape_timer));
+ IWL_DEBUG_POWER(mvm, "bf_debug_flag is: %d\n",
+ le32_to_cpu(cmd->bf_debug_flag));
+ IWL_DEBUG_POWER(mvm, "bf_enable_beacon_filter is: %d\n",
+ le32_to_cpu(cmd->bf_enable_beacon_filter));
+ IWL_DEBUG_POWER(mvm, "bf_energy_delta is: %d\n",
+ le32_to_cpu(cmd->bf_energy_delta));
+ IWL_DEBUG_POWER(mvm, "bf_escape_timer is: %d\n",
+ le32_to_cpu(cmd->bf_escape_timer));
+ IWL_DEBUG_POWER(mvm, "bf_roaming_energy_delta is: %d\n",
+ le32_to_cpu(cmd->bf_roaming_energy_delta));
+ IWL_DEBUG_POWER(mvm, "bf_roaming_state is: %d\n",
+ le32_to_cpu(cmd->bf_roaming_state));
+ IWL_DEBUG_POWER(mvm, "bf_temp_threshold is: %d\n",
+ le32_to_cpu(cmd->bf_temp_threshold));
+ IWL_DEBUG_POWER(mvm, "bf_temp_fast_filter is: %d\n",
+ le32_to_cpu(cmd->bf_temp_fast_filter));
+ IWL_DEBUG_POWER(mvm, "bf_temp_slow_filter is: %d\n",
+ le32_to_cpu(cmd->bf_temp_slow_filter));
+
+ return iwl_mvm_send_cmd_pdu(mvm, REPLY_BEACON_FILTERING_CMD, flags,
+ sizeof(struct iwl_beacon_filter_cmd), cmd);
}
static
@@ -145,7 +142,7 @@ int iwl_mvm_update_beacon_abort(struct iwl_mvm *mvm,
mvmvif->bf_data.ba_enabled = enable;
iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, &cmd);
iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
- return iwl_mvm_beacon_filter_send_cmd(mvm, &cmd);
+ return iwl_mvm_beacon_filter_send_cmd(mvm, &cmd, CMD_SYNC);
}
static void iwl_mvm_power_log(struct iwl_mvm *mvm,
@@ -301,8 +298,7 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
keep_alive = DIV_ROUND_UP(keep_alive, MSEC_PER_SEC);
cmd->keep_alive_seconds = cpu_to_le16(keep_alive);
- if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM ||
- mvm->ps_prevented)
+ if (mvm->ps_disabled)
return;
cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
@@ -312,7 +308,8 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
mvmvif->dbgfs_pm.disable_power_off)
cmd->flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK);
#endif
- if (!vif->bss_conf.ps || mvmvif->pm_prevented)
+ if (!vif->bss_conf.ps || iwl_mvm_vif_low_latency(mvmvif) ||
+ mvm->pm_disabled)
return;
cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
@@ -419,11 +416,9 @@ static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
#endif /* CONFIG_IWLWIFI_DEBUGFS */
}
-static int iwl_mvm_power_mac_update_mode(struct iwl_mvm *mvm,
+static int iwl_mvm_power_send_cmd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
- int ret;
- bool ba_enable;
struct iwl_mac_power_cmd cmd = {};
if (vif->type != NL80211_IFTYPE_STATION)
@@ -435,56 +430,30 @@ static int iwl_mvm_power_mac_update_mode(struct iwl_mvm *mvm,
iwl_mvm_power_build_cmd(mvm, vif, &cmd);
iwl_mvm_power_log(mvm, &cmd);
-
- ret = iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, CMD_SYNC,
- sizeof(cmd), &cmd);
- if (ret)
- return ret;
-
- ba_enable = !!(cmd.flags &
- cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
-
- return iwl_mvm_update_beacon_abort(mvm, vif, ba_enable);
-}
-
-static int iwl_mvm_power_mac_disable(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
-{
- struct iwl_mac_power_cmd cmd = {};
- struct iwl_mvm_vif *mvmvif __maybe_unused =
- iwl_mvm_vif_from_mac80211(vif);
-
- if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
- return 0;
-
- cmd.id_and_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id,
- mvmvif->color));
-
- if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
- cmd.flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
-
#ifdef CONFIG_IWLWIFI_DEBUGFS
- if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_DISABLE_POWER_OFF &&
- mvmvif->dbgfs_pm.disable_power_off)
- cmd.flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK);
+ memcpy(&iwl_mvm_vif_from_mac80211(vif)->mac_pwr_cmd, &cmd, sizeof(cmd));
#endif
- iwl_mvm_power_log(mvm, &cmd);
- return iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, CMD_ASYNC,
+ return iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, CMD_SYNC,
sizeof(cmd), &cmd);
}
-static int _iwl_mvm_power_update_device(struct iwl_mvm *mvm, bool force_disable)
+int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
{
struct iwl_device_power_cmd cmd = {
.flags = cpu_to_le16(DEVICE_POWER_FLAGS_POWER_SAVE_ENA_MSK),
};
+ if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT))
+ return 0;
+
if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
return 0;
- if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM ||
- force_disable)
+ if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)
+ mvm->ps_disabled = true;
+
+ if (mvm->ps_disabled)
cmd.flags |= cpu_to_le16(DEVICE_POWER_FLAGS_CAM_MSK);
#ifdef CONFIG_IWLWIFI_DEBUGFS
@@ -501,11 +470,6 @@ static int _iwl_mvm_power_update_device(struct iwl_mvm *mvm, bool force_disable)
&cmd);
}
-static int iwl_mvm_power_update_device(struct iwl_mvm *mvm)
-{
- return _iwl_mvm_power_update_device(mvm, false);
-}
-
void iwl_mvm_power_vif_assoc(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -544,44 +508,137 @@ int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
return 0;
}
-static void iwl_mvm_power_binding_iterator(void *_data, u8 *mac,
- struct ieee80211_vif *vif)
+struct iwl_power_constraint {
+ struct ieee80211_vif *bf_vif;
+ struct ieee80211_vif *bss_vif;
+ bool pm_disabled;
+ bool ps_disabled;
+};
+
+static void iwl_mvm_power_iterator(void *_data, u8 *mac,
+ struct ieee80211_vif *vif)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_mvm *mvm = _data;
- int ret;
+ struct iwl_power_constraint *power_iterator = _data;
+
+ switch (ieee80211_vif_type_p2p(vif)) {
+ case NL80211_IFTYPE_P2P_DEVICE:
+ break;
+
+ case NL80211_IFTYPE_P2P_GO:
+ case NL80211_IFTYPE_AP:
+ /* no BSS power mgmt if we have an active AP */
+ if (mvmvif->ap_ibss_active)
+ power_iterator->pm_disabled = true;
+ break;
+
+ case NL80211_IFTYPE_MONITOR:
+ /* no BSS power mgmt and no device power save */
+ power_iterator->pm_disabled = true;
+ power_iterator->ps_disabled = true;
+ break;
+
+ case NL80211_IFTYPE_P2P_CLIENT:
+ /* no BSS power mgmt if we have a P2P client*/
+ power_iterator->pm_disabled = true;
+ break;
+
+ case NL80211_IFTYPE_STATION:
+ /* we should have only one BSS vif */
+ WARN_ON(power_iterator->bss_vif);
+ power_iterator->bss_vif = vif;
+
+ if (mvmvif->bf_data.bf_enabled &&
+ !WARN_ON(power_iterator->bf_vif))
+ power_iterator->bf_vif = vif;
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void
+iwl_mvm_power_get_global_constraint(struct iwl_mvm *mvm,
+ struct iwl_power_constraint *constraint)
+{
+ lockdep_assert_held(&mvm->mutex);
+
+ if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM) {
+ constraint->pm_disabled = true;
+ constraint->ps_disabled = true;
+ }
- mvmvif->pm_prevented = (mvm->bound_vif_cnt <= 1) ? false : true;
+ ieee80211_iterate_active_interfaces_atomic(mvm->hw,
+ IEEE80211_IFACE_ITER_NORMAL,
+ iwl_mvm_power_iterator, constraint);
- ret = iwl_mvm_power_mac_update_mode(mvm, vif);
- WARN_ONCE(ret, "Failed to update power parameters on a specific vif\n");
+ /* TODO: remove this and determine this variable in the iterator */
+ if (mvm->bound_vif_cnt > 1)
+ constraint->pm_disabled = true;
}
-static void _iwl_mvm_power_update_binding(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- bool assign)
+int iwl_mvm_power_update_mac(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_power_constraint constraint = {};
+ bool ba_enable;
+ int ret;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT))
+ return 0;
+
+ iwl_mvm_power_get_global_constraint(mvm, &constraint);
+ mvm->ps_disabled = constraint.ps_disabled;
+ mvm->pm_disabled = constraint.pm_disabled;
+
+ /* don't update device power state unless we add / remove monitor */
if (vif->type == NL80211_IFTYPE_MONITOR) {
- int ret = _iwl_mvm_power_update_device(mvm, assign);
- mvm->ps_prevented = assign;
- WARN_ONCE(ret, "Failed to update power device state\n");
+ ret = iwl_mvm_power_update_device(mvm);
+ if (ret)
+ return ret;
}
- ieee80211_iterate_active_interfaces(mvm->hw,
- IEEE80211_IFACE_ITER_NORMAL,
- iwl_mvm_power_binding_iterator,
- mvm);
+ ret = iwl_mvm_power_send_cmd(mvm, vif);
+ if (ret)
+ return ret;
+
+ if (constraint.bss_vif && vif != constraint.bss_vif) {
+ ret = iwl_mvm_power_send_cmd(mvm, constraint.bss_vif);
+ if (ret)
+ return ret;
+ }
+
+ if (!constraint.bf_vif)
+ return 0;
+
+ vif = constraint.bf_vif;
+ mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ ba_enable = !(constraint.pm_disabled || constraint.ps_disabled ||
+ !vif->bss_conf.ps || iwl_mvm_vif_low_latency(mvmvif));
+
+ return iwl_mvm_update_beacon_abort(mvm, constraint.bf_vif, ba_enable);
}
#ifdef CONFIG_IWLWIFI_DEBUGFS
-static int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif, char *buf,
- int bufsz)
+int iwl_mvm_power_mac_dbgfs_read(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif, char *buf,
+ int bufsz)
{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_mac_power_cmd cmd = {};
int pos = 0;
- iwl_mvm_power_build_cmd(mvm, vif, &cmd);
+ if (WARN_ON(!(mvm->fw->ucode_capa.flags &
+ IWL_UCODE_TLV_FLAGS_PM_CMD_SUPPORT)))
+ return 0;
+
+ mutex_lock(&mvm->mutex);
+ memcpy(&cmd, &mvmvif->mac_pwr_cmd, sizeof(cmd));
+ mutex_unlock(&mvm->mutex);
if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD))
pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off = %d\n",
@@ -685,32 +742,46 @@ iwl_mvm_beacon_filter_debugfs_parameters(struct ieee80211_vif *vif,
}
#endif
-int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
+static int _iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ struct iwl_beacon_filter_cmd *cmd,
+ u32 cmd_flags,
+ bool d0i3)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
- struct iwl_beacon_filter_cmd cmd = {
- IWL_BF_CMD_CONFIG_DEFAULTS,
- .bf_enable_beacon_filter = cpu_to_le32(1),
- };
int ret;
if (mvmvif != mvm->bf_allowed_vif ||
vif->type != NL80211_IFTYPE_STATION || vif->p2p)
return 0;
- iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, &cmd);
- iwl_mvm_beacon_filter_debugfs_parameters(vif, &cmd);
- ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd);
+ iwl_mvm_beacon_filter_set_cqm_params(mvm, vif, cmd);
+ if (!d0i3)
+ iwl_mvm_beacon_filter_debugfs_parameters(vif, cmd);
+ ret = iwl_mvm_beacon_filter_send_cmd(mvm, cmd, cmd_flags);
- if (!ret)
+ /* don't change bf_enabled in case of temporary d0i3 configuration */
+ if (!ret && !d0i3)
mvmvif->bf_data.bf_enabled = true;
return ret;
}
+int iwl_mvm_enable_beacon_filter(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ u32 flags)
+{
+ struct iwl_beacon_filter_cmd cmd = {
+ IWL_BF_CMD_CONFIG_DEFAULTS,
+ .bf_enable_beacon_filter = cpu_to_le32(1),
+ };
+
+ return _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd, flags, false);
+}
+
int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
+ struct ieee80211_vif *vif,
+ u32 flags)
{
struct iwl_beacon_filter_cmd cmd = {};
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
@@ -720,7 +791,7 @@ int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
vif->type != NL80211_IFTYPE_STATION || vif->p2p)
return 0;
- ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd);
+ ret = iwl_mvm_beacon_filter_send_cmd(mvm, &cmd, flags);
if (!ret)
mvmvif->bf_data.bf_enabled = false;
@@ -728,23 +799,89 @@ int iwl_mvm_disable_beacon_filter(struct iwl_mvm *mvm,
return ret;
}
-int iwl_mvm_update_beacon_filter(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
+int iwl_mvm_update_d0i3_power_mode(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ bool enable, u32 flags)
{
+ int ret;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ struct iwl_mac_power_cmd cmd = {};
- if (!mvmvif->bf_data.bf_enabled)
+ if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
return 0;
- return iwl_mvm_enable_beacon_filter(mvm, vif);
-}
+ if (!vif->bss_conf.assoc)
+ return 0;
-const struct iwl_mvm_power_ops pm_mac_ops = {
- .power_update_mode = iwl_mvm_power_mac_update_mode,
- .power_update_device_mode = iwl_mvm_power_update_device,
- .power_disable = iwl_mvm_power_mac_disable,
- .power_update_binding = _iwl_mvm_power_update_binding,
+ iwl_mvm_power_build_cmd(mvm, vif, &cmd);
+ if (enable) {
+ /* configure skip over dtim up to 300 msec */
+ int dtimper = mvm->hw->conf.ps_dtim_period ?: 1;
+ int dtimper_msec = dtimper * vif->bss_conf.beacon_int;
+
+ if (WARN_ON(!dtimper_msec))
+ return 0;
+
+ cmd.flags |=
+ cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
+ cmd.skip_dtim_periods = 300 / dtimper_msec;
+ }
+ iwl_mvm_power_log(mvm, &cmd);
#ifdef CONFIG_IWLWIFI_DEBUGFS
- .power_dbgfs_read = iwl_mvm_power_mac_dbgfs_read,
+ memcpy(&mvmvif->mac_pwr_cmd, &cmd, sizeof(cmd));
#endif
-};
+ ret = iwl_mvm_send_cmd_pdu(mvm, MAC_PM_POWER_TABLE, flags,
+ sizeof(cmd), &cmd);
+ if (ret)
+ return ret;
+
+ /* configure beacon filtering */
+ if (mvmvif != mvm->bf_allowed_vif)
+ return 0;
+
+ if (enable) {
+ struct iwl_beacon_filter_cmd cmd_bf = {
+ IWL_BF_CMD_CONFIG_D0I3,
+ .bf_enable_beacon_filter = cpu_to_le32(1),
+ };
+ ret = _iwl_mvm_enable_beacon_filter(mvm, vif, &cmd_bf,
+ flags, true);
+ } else {
+ if (mvmvif->bf_data.bf_enabled)
+ ret = iwl_mvm_enable_beacon_filter(mvm, vif, flags);
+ else
+ ret = iwl_mvm_disable_beacon_filter(mvm, vif, flags);
+ }
+
+ return ret;
+}
+
+int iwl_mvm_update_beacon_filter(struct iwl_mvm *mvm,
+ struct ieee80211_vif *vif,
+ bool force,
+ u32 flags)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+
+ if (mvmvif != mvm->bf_allowed_vif)
+ return 0;
+
+ if (!mvmvif->bf_data.bf_enabled) {
+ /* disable beacon filtering explicitly if force is true */
+ if (force)
+ return iwl_mvm_disable_beacon_filter(mvm, vif, flags);
+ return 0;
+ }
+
+ return iwl_mvm_enable_beacon_filter(mvm, vif, flags);
+}
+
+int iwl_power_legacy_set_cam_mode(struct iwl_mvm *mvm)
+{
+ struct iwl_powertable_cmd cmd = {
+ .keep_alive_seconds = POWER_KEEP_ALIVE_PERIOD_SEC,
+ };
+
+ return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC,
+ sizeof(cmd), &cmd);
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/power_legacy.c b/drivers/net/wireless/iwlwifi/mvm/power_legacy.c
deleted file mode 100644
index ef712ae..0000000
--- a/drivers/net/wireless/iwlwifi/mvm/power_legacy.c
+++ /dev/null
@@ -1,319 +0,0 @@
-/******************************************************************************
- *
- * This file is provided under a dual BSD/GPLv2 license. When using or
- * redistributing this file, you may do so under either license.
- *
- * GPL LICENSE SUMMARY
- *
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
- * USA
- *
- * The full GNU General Public License is included in this distribution
- * in the file called COPYING.
- *
- * Contact Information:
- * Intel Linux Wireless <ilw@linux.intel.com>
- * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
- *
- * BSD LICENSE
- *
- * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *****************************************************************************/
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-
-#include <net/mac80211.h>
-
-#include "iwl-debug.h"
-#include "mvm.h"
-#include "iwl-modparams.h"
-#include "fw-api-power.h"
-
-#define POWER_KEEP_ALIVE_PERIOD_SEC 25
-
-static void iwl_mvm_power_log(struct iwl_mvm *mvm,
- struct iwl_powertable_cmd *cmd)
-{
- IWL_DEBUG_POWER(mvm,
- "Sending power table command for power level %d, flags = 0x%X\n",
- iwlmvm_mod_params.power_scheme,
- le16_to_cpu(cmd->flags));
- IWL_DEBUG_POWER(mvm, "Keep alive = %u sec\n", cmd->keep_alive_seconds);
-
- if (cmd->flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) {
- IWL_DEBUG_POWER(mvm, "Rx timeout = %u usec\n",
- le32_to_cpu(cmd->rx_data_timeout));
- IWL_DEBUG_POWER(mvm, "Tx timeout = %u usec\n",
- le32_to_cpu(cmd->tx_data_timeout));
- if (cmd->flags & cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK))
- IWL_DEBUG_POWER(mvm, "DTIM periods to skip = %u\n",
- le32_to_cpu(cmd->skip_dtim_periods));
- if (cmd->flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
- IWL_DEBUG_POWER(mvm, "LP RX RSSI threshold = %u\n",
- le32_to_cpu(cmd->lprx_rssi_threshold));
- }
-}
-
-static void iwl_mvm_power_build_cmd(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif,
- struct iwl_powertable_cmd *cmd)
-{
- struct ieee80211_hw *hw = mvm->hw;
- struct ieee80211_chanctx_conf *chanctx_conf;
- struct ieee80211_channel *chan;
- int dtimper, dtimper_msec;
- int keep_alive;
- bool radar_detect = false;
- struct iwl_mvm_vif *mvmvif __maybe_unused =
- iwl_mvm_vif_from_mac80211(vif);
-
- /*
- * Regardless of power management state the driver must set
- * keep alive period. FW will use it for sending keep alive NDPs
- * immediately after association.
- */
- cmd->keep_alive_seconds = POWER_KEEP_ALIVE_PERIOD_SEC;
-
- if (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_CAM)
- return;
-
- cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
- if (!vif->bss_conf.assoc)
- cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_DISABLE_POWER_OFF &&
- mvmvif->dbgfs_pm.disable_power_off)
- cmd->flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK);
-#endif
- if (!vif->bss_conf.ps)
- return;
-
- cmd->flags |= cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK);
-
- if (vif->bss_conf.beacon_rate &&
- (vif->bss_conf.beacon_rate->bitrate == 10 ||
- vif->bss_conf.beacon_rate->bitrate == 60)) {
- cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK);
- cmd->lprx_rssi_threshold =
- cpu_to_le32(POWER_LPRX_RSSI_THRESHOLD);
- }
-
- dtimper = hw->conf.ps_dtim_period ?: 1;
-
- /* Check if radar detection is required on current channel */
- rcu_read_lock();
- chanctx_conf = rcu_dereference(vif->chanctx_conf);
- WARN_ON(!chanctx_conf);
- if (chanctx_conf) {
- chan = chanctx_conf->def.chan;
- radar_detect = chan->flags & IEEE80211_CHAN_RADAR;
- }
- rcu_read_unlock();
-
- /* Check skip over DTIM conditions */
- if (!radar_detect && (dtimper <= 10) &&
- (iwlmvm_mod_params.power_scheme == IWL_POWER_SCHEME_LP ||
- mvm->cur_ucode == IWL_UCODE_WOWLAN)) {
- cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
- cmd->skip_dtim_periods = cpu_to_le32(3);
- }
-
- /* Check that keep alive period is at least 3 * DTIM */
- dtimper_msec = dtimper * vif->bss_conf.beacon_int;
- keep_alive = max_t(int, 3 * dtimper_msec,
- MSEC_PER_SEC * cmd->keep_alive_seconds);
- keep_alive = DIV_ROUND_UP(keep_alive, MSEC_PER_SEC);
- cmd->keep_alive_seconds = keep_alive;
-
- if (mvm->cur_ucode != IWL_UCODE_WOWLAN) {
- cmd->rx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC);
- cmd->tx_data_timeout = cpu_to_le32(100 * USEC_PER_MSEC);
- } else {
- cmd->rx_data_timeout = cpu_to_le32(10 * USEC_PER_MSEC);
- cmd->tx_data_timeout = cpu_to_le32(10 * USEC_PER_MSEC);
- }
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_KEEP_ALIVE)
- cmd->keep_alive_seconds = mvmvif->dbgfs_pm.keep_alive_seconds;
- if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_OVER_DTIM) {
- if (mvmvif->dbgfs_pm.skip_over_dtim)
- cmd->flags |=
- cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK);
- else
- cmd->flags &=
- cpu_to_le16(~POWER_FLAGS_SKIP_OVER_DTIM_MSK);
- }
- if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_RX_DATA_TIMEOUT)
- cmd->rx_data_timeout =
- cpu_to_le32(mvmvif->dbgfs_pm.rx_data_timeout);
- if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_TX_DATA_TIMEOUT)
- cmd->tx_data_timeout =
- cpu_to_le32(mvmvif->dbgfs_pm.tx_data_timeout);
- if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_SKIP_DTIM_PERIODS)
- cmd->skip_dtim_periods =
- cpu_to_le32(mvmvif->dbgfs_pm.skip_dtim_periods);
- if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_ENA) {
- if (mvmvif->dbgfs_pm.lprx_ena)
- cmd->flags |= cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK);
- else
- cmd->flags &= cpu_to_le16(~POWER_FLAGS_LPRX_ENA_MSK);
- }
- if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_LPRX_RSSI_THRESHOLD)
- cmd->lprx_rssi_threshold =
- cpu_to_le32(mvmvif->dbgfs_pm.lprx_rssi_threshold);
-#endif /* CONFIG_IWLWIFI_DEBUGFS */
-}
-
-static int iwl_mvm_power_legacy_update_mode(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
-{
- int ret;
- bool ba_enable;
- struct iwl_powertable_cmd cmd = {};
-
- if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
- return 0;
-
- /*
- * TODO: The following vif_count verification is temporary condition.
- * Avoid power mode update if more than one interface is currently
- * active. Remove this condition when FW will support power management
- * on multiple MACs.
- */
- IWL_DEBUG_POWER(mvm, "Currently %d interfaces active\n",
- mvm->vif_count);
- if (mvm->vif_count > 1)
- return 0;
-
- iwl_mvm_power_build_cmd(mvm, vif, &cmd);
- iwl_mvm_power_log(mvm, &cmd);
-
- ret = iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_SYNC,
- sizeof(cmd), &cmd);
- if (ret)
- return ret;
-
- ba_enable = !!(cmd.flags &
- cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK));
-
- return iwl_mvm_update_beacon_abort(mvm, vif, ba_enable);
-}
-
-static int iwl_mvm_power_legacy_disable(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif)
-{
- struct iwl_powertable_cmd cmd = {};
- struct iwl_mvm_vif *mvmvif __maybe_unused =
- iwl_mvm_vif_from_mac80211(vif);
-
- if (vif->type != NL80211_IFTYPE_STATION || vif->p2p)
- return 0;
-
- if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM)
- cmd.flags |= cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK);
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- if (mvmvif->dbgfs_pm.mask & MVM_DEBUGFS_PM_DISABLE_POWER_OFF &&
- mvmvif->dbgfs_pm.disable_power_off)
- cmd.flags &= cpu_to_le16(~POWER_FLAGS_POWER_SAVE_ENA_MSK);
-#endif
- iwl_mvm_power_log(mvm, &cmd);
-
- return iwl_mvm_send_cmd_pdu(mvm, POWER_TABLE_CMD, CMD_ASYNC,
- sizeof(cmd), &cmd);
-}
-
-#ifdef CONFIG_IWLWIFI_DEBUGFS
-static int iwl_mvm_power_legacy_dbgfs_read(struct iwl_mvm *mvm,
- struct ieee80211_vif *vif, char *buf,
- int bufsz)
-{
- struct iwl_powertable_cmd cmd = {};
- int pos = 0;
-
- iwl_mvm_power_build_cmd(mvm, vif, &cmd);
-
- pos += scnprintf(buf+pos, bufsz-pos, "disable_power_off = %d\n",
- (cmd.flags &
- cpu_to_le16(POWER_FLAGS_POWER_SAVE_ENA_MSK)) ?
- 0 : 1);
- pos += scnprintf(buf+pos, bufsz-pos, "skip_dtim_periods = %d\n",
- le32_to_cpu(cmd.skip_dtim_periods));
- pos += scnprintf(buf+pos, bufsz-pos, "power_scheme = %d\n",
- iwlmvm_mod_params.power_scheme);
- pos += scnprintf(buf+pos, bufsz-pos, "flags = 0x%x\n",
- le16_to_cpu(cmd.flags));
- pos += scnprintf(buf+pos, bufsz-pos, "keep_alive = %d\n",
- cmd.keep_alive_seconds);
-
- if (cmd.flags & cpu_to_le16(POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK)) {
- pos += scnprintf(buf+pos, bufsz-pos, "skip_over_dtim = %d\n",
- (cmd.flags &
- cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK)) ?
- 1 : 0);
- pos += scnprintf(buf+pos, bufsz-pos, "rx_data_timeout = %d\n",
- le32_to_cpu(cmd.rx_data_timeout));
- pos += scnprintf(buf+pos, bufsz-pos, "tx_data_timeout = %d\n",
- le32_to_cpu(cmd.tx_data_timeout));
- if (cmd.flags & cpu_to_le16(POWER_FLAGS_LPRX_ENA_MSK))
- pos += scnprintf(buf+pos, bufsz-pos,
- "lprx_rssi_threshold = %d\n",
- le32_to_cpu(cmd.lprx_rssi_threshold));
- }
- return pos;
-}
-#endif
-
-const struct iwl_mvm_power_ops pm_legacy_ops = {
- .power_update_mode = iwl_mvm_power_legacy_update_mode,
- .power_disable = iwl_mvm_power_legacy_disable,
-#ifdef CONFIG_IWLWIFI_DEBUGFS
- .power_dbgfs_read = iwl_mvm_power_legacy_dbgfs_read,
-#endif
-};
diff --git a/drivers/net/wireless/iwlwifi/mvm/quota.c b/drivers/net/wireless/iwlwifi/mvm/quota.c
index ce5db6c..06d8429 100644
--- a/drivers/net/wireless/iwlwifi/mvm/quota.c
+++ b/drivers/net/wireless/iwlwifi/mvm/quota.c
@@ -65,9 +65,14 @@
#include "fw-api.h"
#include "mvm.h"
+#define QUOTA_100 IWL_MVM_MAX_QUOTA
+#define QUOTA_LOWLAT_MIN ((QUOTA_100 * IWL_MVM_LOWLAT_QUOTA_MIN_PERCENT) / 100)
+
struct iwl_mvm_quota_iterator_data {
int n_interfaces[MAX_BINDINGS];
int colors[MAX_BINDINGS];
+ int low_latency[MAX_BINDINGS];
+ int n_low_latency_bindings;
struct ieee80211_vif *new_vif;
};
@@ -107,22 +112,29 @@ static void iwl_mvm_quota_iterator(void *_data, u8 *mac,
switch (vif->type) {
case NL80211_IFTYPE_STATION:
if (vif->bss_conf.assoc)
- data->n_interfaces[id]++;
- break;
+ break;
+ return;
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_ADHOC:
if (mvmvif->ap_ibss_active)
- data->n_interfaces[id]++;
- break;
+ break;
+ return;
case NL80211_IFTYPE_MONITOR:
if (mvmvif->monitor_active)
- data->n_interfaces[id]++;
- break;
+ break;
+ return;
case NL80211_IFTYPE_P2P_DEVICE:
- break;
+ return;
default:
WARN_ON_ONCE(1);
- break;
+ return;
+ }
+
+ data->n_interfaces[id]++;
+
+ if (iwl_mvm_vif_low_latency(mvmvif) && !data->low_latency[id]) {
+ data->n_low_latency_bindings++;
+ data->low_latency[id] = true;
}
}
@@ -162,12 +174,13 @@ static void iwl_mvm_adjust_quota_for_noa(struct iwl_mvm *mvm,
int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
{
struct iwl_time_quota_cmd cmd = {};
- int i, idx, ret, num_active_macs, quota, quota_rem;
+ int i, idx, ret, num_active_macs, quota, quota_rem, n_non_lowlat;
struct iwl_mvm_quota_iterator_data data = {
.n_interfaces = {},
.colors = { -1, -1, -1, -1 },
.new_vif = newvif,
};
+ u32 ll_max_duration;
lockdep_assert_held(&mvm->mutex);
@@ -186,6 +199,21 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
iwl_mvm_quota_iterator(&data, newvif->addr, newvif);
}
+ switch (data.n_low_latency_bindings) {
+ case 0: /* no low latency - use default */
+ ll_max_duration = 0;
+ break;
+ case 1: /* SingleBindingLowLatencyMode */
+ ll_max_duration = IWL_MVM_LOWLAT_SINGLE_BINDING_MAXDUR;
+ break;
+ case 2: /* DualBindingLowLatencyMode */
+ ll_max_duration = IWL_MVM_LOWLAT_DUAL_BINDING_MAXDUR;
+ break;
+ default: /* MultiBindingLowLatencyMode */
+ ll_max_duration = 0;
+ break;
+ }
+
/*
* The FW's scheduling session consists of
* IWL_MVM_MAX_QUOTA fragments. Divide these fragments
@@ -197,11 +225,39 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
num_active_macs += data.n_interfaces[i];
}
- quota = 0;
- quota_rem = 0;
- if (num_active_macs) {
- quota = IWL_MVM_MAX_QUOTA / num_active_macs;
- quota_rem = IWL_MVM_MAX_QUOTA % num_active_macs;
+ n_non_lowlat = num_active_macs;
+
+ if (data.n_low_latency_bindings == 1) {
+ for (i = 0; i < MAX_BINDINGS; i++) {
+ if (data.low_latency[i]) {
+ n_non_lowlat -= data.n_interfaces[i];
+ break;
+ }
+ }
+ }
+
+ if (data.n_low_latency_bindings == 1 && n_non_lowlat) {
+ /*
+ * Reserve quota for the low latency binding in case that
+ * there are several data bindings but only a single
+ * low latency one. Split the rest of the quota equally
+ * between the other data interfaces.
+ */
+ quota = (QUOTA_100 - QUOTA_LOWLAT_MIN) / n_non_lowlat;
+ quota_rem = QUOTA_100 - n_non_lowlat * quota -
+ QUOTA_LOWLAT_MIN;
+ } else if (num_active_macs) {
+ /*
+ * There are 0 or more than 1 low latency bindings, or all the
+ * data interfaces belong to the single low latency binding.
+ * Split the quota equally between the data interfaces.
+ */
+ quota = QUOTA_100 / num_active_macs;
+ quota_rem = QUOTA_100 % num_active_macs;
+ } else {
+ /* values don't really matter - won't be used */
+ quota = 0;
+ quota_rem = 0;
}
for (idx = 0, i = 0; i < MAX_BINDINGS; i++) {
@@ -211,19 +267,42 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
cmd.quotas[idx].id_and_color =
cpu_to_le32(FW_CMD_ID_AND_COLOR(i, data.colors[i]));
- if (data.n_interfaces[i] <= 0) {
+ if (data.n_interfaces[i] <= 0)
cmd.quotas[idx].quota = cpu_to_le32(0);
- cmd.quotas[idx].max_duration = cpu_to_le32(0);
- } else {
+ else if (data.n_low_latency_bindings == 1 && n_non_lowlat &&
+ data.low_latency[i])
+ /*
+ * There is more than one binding, but only one of the
+ * bindings is in low latency. For this case, allocate
+ * the minimal required quota for the low latency
+ * binding.
+ */
+ cmd.quotas[idx].quota = cpu_to_le32(QUOTA_LOWLAT_MIN);
+
+ else
cmd.quotas[idx].quota =
cpu_to_le32(quota * data.n_interfaces[i]);
+
+ WARN_ONCE(le32_to_cpu(cmd.quotas[idx].quota) > QUOTA_100,
+ "Binding=%d, quota=%u > max=%u\n",
+ idx, le32_to_cpu(cmd.quotas[idx].quota), QUOTA_100);
+
+ if (data.n_interfaces[i] && !data.low_latency[i])
+ cmd.quotas[idx].max_duration =
+ cpu_to_le32(ll_max_duration);
+ else
cmd.quotas[idx].max_duration = cpu_to_le32(0);
- }
+
idx++;
}
- /* Give the remainder of the session to the first binding */
- le32_add_cpu(&cmd.quotas[0].quota, quota_rem);
+ /* Give the remainder of the session to the first data binding */
+ for (i = 0; i < MAX_BINDINGS; i++) {
+ if (le32_to_cpu(cmd.quotas[i].quota) != 0) {
+ le32_add_cpu(&cmd.quotas[i].quota, quota_rem);
+ break;
+ }
+ }
iwl_mvm_adjust_quota_for_noa(mvm, &cmd);
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.c b/drivers/net/wireless/iwlwifi/mvm/rs.c
index 6abf74e..32bb807 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.c
@@ -380,49 +380,49 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search);
* (2.4 GHz) band.
*/
-static s32 expected_tpt_legacy[IWL_RATE_COUNT] = {
+static const u16 expected_tpt_legacy[IWL_RATE_COUNT] = {
7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0, 0, 0
};
/* Expected TpT tables. 4 indexes:
* 0 - NGI, 1 - SGI, 2 - AGG+NGI, 3 - AGG+SGI
*/
-static s32 expected_tpt_siso_20MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_siso_20MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 42, 0, 76, 102, 124, 159, 183, 193, 202, 216, 0},
{0, 0, 0, 0, 46, 0, 82, 110, 132, 168, 192, 202, 210, 225, 0},
{0, 0, 0, 0, 49, 0, 97, 145, 192, 285, 375, 420, 464, 551, 0},
{0, 0, 0, 0, 54, 0, 108, 160, 213, 315, 415, 465, 513, 608, 0},
};
-static s32 expected_tpt_siso_40MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_siso_40MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257, 269, 275},
{0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264, 275, 280},
{0, 0, 0, 0, 101, 0, 199, 295, 389, 570, 744, 828, 911, 1070, 1173},
{0, 0, 0, 0, 112, 0, 220, 326, 429, 629, 819, 912, 1000, 1173, 1284},
};
-static s32 expected_tpt_siso_80MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_siso_80MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 130, 0, 191, 223, 244, 273, 288, 294, 298, 305, 308},
{0, 0, 0, 0, 138, 0, 200, 231, 251, 279, 293, 298, 302, 308, 312},
{0, 0, 0, 0, 217, 0, 429, 634, 834, 1220, 1585, 1760, 1931, 2258, 2466},
{0, 0, 0, 0, 241, 0, 475, 701, 921, 1343, 1741, 1931, 2117, 2468, 2691},
};
-static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250, 261, 0},
{0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256, 267, 0},
{0, 0, 0, 0, 98, 0, 193, 286, 375, 550, 718, 799, 878, 1032, 0},
{0, 0, 0, 0, 109, 0, 214, 316, 414, 607, 790, 879, 965, 1132, 0},
};
-static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289, 296, 300},
{0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293, 300, 303},
{0, 0, 0, 0, 200, 0, 390, 571, 741, 1067, 1365, 1505, 1640, 1894, 2053},
{0, 0, 0, 0, 221, 0, 430, 630, 816, 1169, 1490, 1641, 1784, 2053, 2221},
};
-static s32 expected_tpt_mimo2_80MHz[4][IWL_RATE_COUNT] = {
+static const u16 expected_tpt_mimo2_80MHz[4][IWL_RATE_COUNT] = {
{0, 0, 0, 0, 182, 0, 240, 264, 278, 299, 308, 311, 313, 317, 319},
{0, 0, 0, 0, 190, 0, 247, 269, 282, 302, 310, 313, 315, 319, 320},
{0, 0, 0, 0, 428, 0, 833, 1215, 1577, 2254, 2863, 3147, 3418, 3913, 4219},
@@ -905,7 +905,7 @@ static void rs_get_lower_rate_down_column(struct iwl_lq_sta *lq_sta,
rate->bw = RATE_MCS_CHAN_WIDTH_20;
- WARN_ON_ONCE(rate->index < IWL_RATE_MCS_0_INDEX &&
+ WARN_ON_ONCE(rate->index < IWL_RATE_MCS_0_INDEX ||
rate->index > IWL_RATE_MCS_9_INDEX);
rate->index = rs_ht_to_legacy[rate->index];
@@ -1169,12 +1169,12 @@ static void rs_set_stay_in_table(struct iwl_mvm *mvm, u8 is_legacy,
lq_sta->visited_columns = 0;
}
-static s32 *rs_get_expected_tpt_table(struct iwl_lq_sta *lq_sta,
+static const u16 *rs_get_expected_tpt_table(struct iwl_lq_sta *lq_sta,
const struct rs_tx_column *column,
u32 bw)
{
/* Used to choose among HT tables */
- s32 (*ht_tbl_pointer)[IWL_RATE_COUNT];
+ const u16 (*ht_tbl_pointer)[IWL_RATE_COUNT];
if (WARN_ON_ONCE(column->mode != RS_LEGACY &&
column->mode != RS_SISO &&
@@ -1262,9 +1262,8 @@ static s32 rs_get_best_rate(struct iwl_mvm *mvm,
&(lq_sta->lq_info[lq_sta->active_tbl]);
s32 active_sr = active_tbl->win[index].success_ratio;
s32 active_tpt = active_tbl->expected_tpt[index];
-
/* expected "search" throughput */
- s32 *tpt_tbl = tbl->expected_tpt;
+ const u16 *tpt_tbl = tbl->expected_tpt;
s32 new_rate, high, low, start_hi;
u16 high_low;
@@ -1479,7 +1478,7 @@ static enum rs_column rs_get_next_column(struct iwl_mvm *mvm,
const struct rs_tx_column *next_col;
allow_column_func_t allow_func;
u8 valid_ants = iwl_fw_valid_tx_ant(mvm->fw);
- s32 *expected_tpt_tbl;
+ const u16 *expected_tpt_tbl;
s32 tpt, max_expected_tpt;
for (i = 0; i < MAX_NEXT_COLUMNS; i++) {
@@ -2815,8 +2814,8 @@ static void rs_rate_init_stub(void *mvm_r,
struct ieee80211_sta *sta, void *mvm_sta)
{
}
-static struct rate_control_ops rs_mvm_ops = {
- .module = NULL,
+
+static const struct rate_control_ops rs_mvm_ops = {
.name = RS_NAME,
.tx_status = rs_tx_status,
.get_rate = rs_get_rate,
diff --git a/drivers/net/wireless/iwlwifi/mvm/rs.h b/drivers/net/wireless/iwlwifi/mvm/rs.h
index 7bc6404..3332b39 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rs.h
+++ b/drivers/net/wireless/iwlwifi/mvm/rs.h
@@ -277,7 +277,7 @@ enum rs_column {
struct iwl_scale_tbl_info {
struct rs_rate rate;
enum rs_column column;
- s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
+ const u16 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
};
diff --git a/drivers/net/wireless/iwlwifi/mvm/rx.c b/drivers/net/wireless/iwlwifi/mvm/rx.c
index a85b60f..fa3c139 100644
--- a/drivers/net/wireless/iwlwifi/mvm/rx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/rx.c
@@ -129,22 +129,16 @@ static void iwl_mvm_calc_rssi(struct iwl_mvm *mvm,
struct ieee80211_rx_status *rx_status)
{
int rssi_a, rssi_b, rssi_a_dbm, rssi_b_dbm, max_rssi_dbm;
- int rssi_all_band_a, rssi_all_band_b;
- u32 agc_a, agc_b, max_agc;
+ u32 agc_a, agc_b;
u32 val;
val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_AGC_IDX]);
agc_a = (val & IWL_OFDM_AGC_A_MSK) >> IWL_OFDM_AGC_A_POS;
agc_b = (val & IWL_OFDM_AGC_B_MSK) >> IWL_OFDM_AGC_B_POS;
- max_agc = max_t(u32, agc_a, agc_b);
val = le32_to_cpu(phy_info->non_cfg_phy[IWL_RX_INFO_RSSI_AB_IDX]);
rssi_a = (val & IWL_OFDM_RSSI_INBAND_A_MSK) >> IWL_OFDM_RSSI_A_POS;
rssi_b = (val & IWL_OFDM_RSSI_INBAND_B_MSK) >> IWL_OFDM_RSSI_B_POS;
- rssi_all_band_a = (val & IWL_OFDM_RSSI_ALLBAND_A_MSK) >>
- IWL_OFDM_RSSI_ALLBAND_A_POS;
- rssi_all_band_b = (val & IWL_OFDM_RSSI_ALLBAND_B_MSK) >>
- IWL_OFDM_RSSI_ALLBAND_B_POS;
/*
* dBm = rssi dB - agc dB - constant.
@@ -364,10 +358,10 @@ int iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
rx_status.flag |= RX_FLAG_40MHZ;
break;
case RATE_MCS_CHAN_WIDTH_80:
- rx_status.flag |= RX_FLAG_80MHZ;
+ rx_status.vht_flag |= RX_VHT_FLAG_80MHZ;
break;
case RATE_MCS_CHAN_WIDTH_160:
- rx_status.flag |= RX_FLAG_160MHZ;
+ rx_status.vht_flag |= RX_VHT_FLAG_160MHZ;
break;
}
if (rate_n_flags & RATE_MCS_SGI_MSK)
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 742afc4..eba55cc 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -408,6 +408,8 @@ int iwl_mvm_rx_scan_complete(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
mvm->scan_status = IWL_MVM_SCAN_NONE;
ieee80211_scan_completed(mvm->hw, notif->status != SCAN_COMP_STATUS_OK);
+ iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
+
return 0;
}
@@ -476,6 +478,7 @@ void iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
if (iwl_mvm_is_radio_killed(mvm)) {
ieee80211_scan_completed(mvm->hw, true);
+ iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
mvm->scan_status = IWL_MVM_SCAN_NONE;
return;
}
@@ -488,7 +491,7 @@ void iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
ret = iwl_mvm_send_cmd_pdu(mvm, SCAN_ABORT_CMD, CMD_SYNC, 0, NULL);
if (ret) {
IWL_ERR(mvm, "Couldn't send SCAN_ABORT_CMD: %d\n", ret);
- /* mac80211's state will be cleaned in the fw_restart flow */
+ /* mac80211's state will be cleaned in the nic_restart flow */
goto out_remove_notif;
}
@@ -509,11 +512,16 @@ int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_scan_offload_complete *scan_notif = (void *)pkt->data;
+ /* scan status must be locked for proper checking */
+ lockdep_assert_held(&mvm->mutex);
+
IWL_DEBUG_SCAN(mvm, "Scheduled scan completed, status %s\n",
scan_notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
"completed" : "aborted");
- mvm->scan_status = IWL_MVM_SCAN_NONE;
+ /* might already be something else again, don't reset if so */
+ if (mvm->scan_status == IWL_MVM_SCAN_SCHED)
+ mvm->scan_status = IWL_MVM_SCAN_NONE;
ieee80211_sched_scan_stopped(mvm->hw);
return 0;
@@ -596,6 +604,9 @@ static void iwl_scan_offload_build_ssid(struct cfg80211_sched_scan_request *req,
* config match list.
*/
for (i = 0; i < req->n_match_sets && i < PROBE_OPTION_MAX; i++) {
+ /* skip empty SSID matchsets */
+ if (!req->match_sets[i].ssid.ssid_len)
+ continue;
scan->direct_scan[i].id = WLAN_EID_SSID;
scan->direct_scan[i].len = req->match_sets[i].ssid.ssid_len;
memcpy(scan->direct_scan[i].ssid, req->match_sets[i].ssid.ssid,
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index 3397f59..2677d1c 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -66,27 +66,27 @@
#include "sta.h"
#include "rs.h"
-static void iwl_mvm_add_sta_cmd_v6_to_v5(struct iwl_mvm_add_sta_cmd_v6 *cmd_v6,
+static void iwl_mvm_add_sta_cmd_v7_to_v5(struct iwl_mvm_add_sta_cmd_v7 *cmd_v7,
struct iwl_mvm_add_sta_cmd_v5 *cmd_v5)
{
memset(cmd_v5, 0, sizeof(*cmd_v5));
- cmd_v5->add_modify = cmd_v6->add_modify;
- cmd_v5->tid_disable_tx = cmd_v6->tid_disable_tx;
- cmd_v5->mac_id_n_color = cmd_v6->mac_id_n_color;
- memcpy(cmd_v5->addr, cmd_v6->addr, ETH_ALEN);
- cmd_v5->sta_id = cmd_v6->sta_id;
- cmd_v5->modify_mask = cmd_v6->modify_mask;
- cmd_v5->station_flags = cmd_v6->station_flags;
- cmd_v5->station_flags_msk = cmd_v6->station_flags_msk;
- cmd_v5->add_immediate_ba_tid = cmd_v6->add_immediate_ba_tid;
- cmd_v5->remove_immediate_ba_tid = cmd_v6->remove_immediate_ba_tid;
- cmd_v5->add_immediate_ba_ssn = cmd_v6->add_immediate_ba_ssn;
- cmd_v5->sleep_tx_count = cmd_v6->sleep_tx_count;
- cmd_v5->sleep_state_flags = cmd_v6->sleep_state_flags;
- cmd_v5->assoc_id = cmd_v6->assoc_id;
- cmd_v5->beamform_flags = cmd_v6->beamform_flags;
- cmd_v5->tfd_queue_msk = cmd_v6->tfd_queue_msk;
+ cmd_v5->add_modify = cmd_v7->add_modify;
+ cmd_v5->tid_disable_tx = cmd_v7->tid_disable_tx;
+ cmd_v5->mac_id_n_color = cmd_v7->mac_id_n_color;
+ memcpy(cmd_v5->addr, cmd_v7->addr, ETH_ALEN);
+ cmd_v5->sta_id = cmd_v7->sta_id;
+ cmd_v5->modify_mask = cmd_v7->modify_mask;
+ cmd_v5->station_flags = cmd_v7->station_flags;
+ cmd_v5->station_flags_msk = cmd_v7->station_flags_msk;
+ cmd_v5->add_immediate_ba_tid = cmd_v7->add_immediate_ba_tid;
+ cmd_v5->remove_immediate_ba_tid = cmd_v7->remove_immediate_ba_tid;
+ cmd_v5->add_immediate_ba_ssn = cmd_v7->add_immediate_ba_ssn;
+ cmd_v5->sleep_tx_count = cmd_v7->sleep_tx_count;
+ cmd_v5->sleep_state_flags = cmd_v7->sleep_state_flags;
+ cmd_v5->assoc_id = cmd_v7->assoc_id;
+ cmd_v5->beamform_flags = cmd_v7->beamform_flags;
+ cmd_v5->tfd_queue_msk = cmd_v7->tfd_queue_msk;
}
static void
@@ -110,7 +110,7 @@ iwl_mvm_add_sta_key_to_add_sta_cmd_v5(struct iwl_mvm_add_sta_key_cmd *key_cmd,
}
static int iwl_mvm_send_add_sta_cmd_status(struct iwl_mvm *mvm,
- struct iwl_mvm_add_sta_cmd_v6 *cmd,
+ struct iwl_mvm_add_sta_cmd_v7 *cmd,
int *status)
{
struct iwl_mvm_add_sta_cmd_v5 cmd_v5;
@@ -119,14 +119,14 @@ static int iwl_mvm_send_add_sta_cmd_status(struct iwl_mvm *mvm,
return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(*cmd),
cmd, status);
- iwl_mvm_add_sta_cmd_v6_to_v5(cmd, &cmd_v5);
+ iwl_mvm_add_sta_cmd_v7_to_v5(cmd, &cmd_v5);
return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, sizeof(cmd_v5),
&cmd_v5, status);
}
static int iwl_mvm_send_add_sta_cmd(struct iwl_mvm *mvm, u32 flags,
- struct iwl_mvm_add_sta_cmd_v6 *cmd)
+ struct iwl_mvm_add_sta_cmd_v7 *cmd)
{
struct iwl_mvm_add_sta_cmd_v5 cmd_v5;
@@ -134,7 +134,7 @@ static int iwl_mvm_send_add_sta_cmd(struct iwl_mvm *mvm, u32 flags,
return iwl_mvm_send_cmd_pdu(mvm, ADD_STA, flags,
sizeof(*cmd), cmd);
- iwl_mvm_add_sta_cmd_v6_to_v5(cmd, &cmd_v5);
+ iwl_mvm_add_sta_cmd_v7_to_v5(cmd, &cmd_v5);
return iwl_mvm_send_cmd_pdu(mvm, ADD_STA, flags, sizeof(cmd_v5),
&cmd_v5);
@@ -175,19 +175,30 @@ static int iwl_mvm_send_add_sta_key_cmd(struct iwl_mvm *mvm,
&sta_cmd);
}
-static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm)
+static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
+ enum nl80211_iftype iftype)
{
int sta_id;
+ u32 reserved_ids = 0;
+ BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32);
WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
lockdep_assert_held(&mvm->mutex);
+ /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
+ if (iftype != NL80211_IFTYPE_STATION)
+ reserved_ids = BIT(0);
+
/* Don't take rcu_read_lock() since we are protected by mvm->mutex */
- for (sta_id = 0; sta_id < IWL_MVM_STATION_COUNT; sta_id++)
+ for (sta_id = 0; sta_id < IWL_MVM_STATION_COUNT; sta_id++) {
+ if (BIT(sta_id) & reserved_ids)
+ continue;
+
if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
lockdep_is_held(&mvm->mutex)))
return sta_id;
+ }
return IWL_MVM_STATION_COUNT;
}
@@ -196,7 +207,7 @@ int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
bool update)
{
struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
- struct iwl_mvm_add_sta_cmd_v6 add_sta_cmd;
+ struct iwl_mvm_add_sta_cmd_v7 add_sta_cmd;
int ret;
u32 status;
u32 agg_size = 0, mpdu_dens = 0;
@@ -312,7 +323,8 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm,
lockdep_assert_held(&mvm->mutex);
if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
- sta_id = iwl_mvm_find_free_sta_id(mvm);
+ sta_id = iwl_mvm_find_free_sta_id(mvm,
+ ieee80211_vif_type_p2p(vif));
else
sta_id = mvm_sta->sta_id;
@@ -368,7 +380,7 @@ int iwl_mvm_update_sta(struct iwl_mvm *mvm,
int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
bool drain)
{
- struct iwl_mvm_add_sta_cmd_v6 cmd = {};
+ struct iwl_mvm_add_sta_cmd_v7 cmd = {};
int ret;
u32 status;
@@ -522,6 +534,10 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
/* unassoc - go ahead - remove the AP STA now */
mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT;
+
+ /* clear d0i3_ap_sta_id if no longer relevant */
+ if (mvm->d0i3_ap_sta_id == mvm_sta->sta_id)
+ mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
}
/*
@@ -560,10 +576,10 @@ int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
}
int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta,
- u32 qmask)
+ u32 qmask, enum nl80211_iftype iftype)
{
if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
- sta->sta_id = iwl_mvm_find_free_sta_id(mvm);
+ sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_STATION_COUNT))
return -ENOSPC;
}
@@ -587,13 +603,13 @@ static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
const u8 *addr,
u16 mac_id, u16 color)
{
- struct iwl_mvm_add_sta_cmd_v6 cmd;
+ struct iwl_mvm_add_sta_cmd_v7 cmd;
int ret;
u32 status;
lockdep_assert_held(&mvm->mutex);
- memset(&cmd, 0, sizeof(struct iwl_mvm_add_sta_cmd_v6));
+ memset(&cmd, 0, sizeof(struct iwl_mvm_add_sta_cmd_v7));
cmd.sta_id = sta->sta_id;
cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
color));
@@ -627,7 +643,8 @@ int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm)
lockdep_assert_held(&mvm->mutex);
/* Add the aux station, but without any queues */
- ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, 0);
+ ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, 0,
+ NL80211_IFTYPE_UNSPECIFIED);
if (ret)
return ret;
@@ -699,7 +716,8 @@ int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
lockdep_assert_held(&mvm->mutex);
qmask = iwl_mvm_mac_get_queues_mask(mvm, vif);
- ret = iwl_mvm_allocate_int_sta(mvm, bsta, qmask);
+ ret = iwl_mvm_allocate_int_sta(mvm, bsta, qmask,
+ ieee80211_vif_type_p2p(vif));
if (ret)
return ret;
@@ -735,7 +753,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, u16 ssn, bool start)
{
struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
- struct iwl_mvm_add_sta_cmd_v6 cmd = {};
+ struct iwl_mvm_add_sta_cmd_v7 cmd = {};
int ret;
u32 status;
@@ -794,7 +812,7 @@ static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, u8 queue, bool start)
{
struct iwl_mvm_sta *mvm_sta = (void *)sta->drv_priv;
- struct iwl_mvm_add_sta_cmd_v6 cmd = {};
+ struct iwl_mvm_add_sta_cmd_v7 cmd = {};
int ret;
u32 status;
@@ -833,7 +851,7 @@ static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
return ret;
}
-static const u8 tid_to_ac[] = {
+static const u8 tid_to_mac80211_ac[] = {
IEEE80211_AC_BE,
IEEE80211_AC_BK,
IEEE80211_AC_BK,
@@ -844,6 +862,17 @@ static const u8 tid_to_ac[] = {
IEEE80211_AC_VO,
};
+static const u8 tid_to_ucode_ac[] = {
+ AC_BE,
+ AC_BK,
+ AC_BK,
+ AC_BE,
+ AC_VI,
+ AC_VI,
+ AC_VO,
+ AC_VO,
+};
+
int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid, u16 *ssn)
{
@@ -874,7 +903,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
}
/* the new tx queue is still connected to the same mac80211 queue */
- mvm->queue_to_mac80211[txq_id] = vif->hw_queue[tid_to_ac[tid]];
+ mvm->queue_to_mac80211[txq_id] = vif->hw_queue[tid_to_mac80211_ac[tid]];
spin_lock_bh(&mvmsta->lock);
tid_data = &mvmsta->tid_data[tid];
@@ -916,7 +945,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
tid_data->ssn = 0xffff;
spin_unlock_bh(&mvmsta->lock);
- fifo = iwl_mvm_ac_to_tx_fifo[tid_to_ac[tid]];
+ fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
if (ret)
@@ -1411,7 +1440,7 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
struct ieee80211_sta *sta)
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
- struct iwl_mvm_add_sta_cmd_v6 cmd = {
+ struct iwl_mvm_add_sta_cmd_v7 cmd = {
.add_modify = STA_MODE_MODIFY,
.sta_id = mvmsta->sta_id,
.station_flags_msk = cpu_to_le32(STA_FLG_PS),
@@ -1427,28 +1456,102 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
enum ieee80211_frame_release_type reason,
- u16 cnt)
+ u16 cnt, u16 tids, bool more_data,
+ bool agg)
{
- u16 sleep_state_flags =
- (reason == IEEE80211_FRAME_RELEASE_UAPSD) ?
- STA_SLEEP_STATE_UAPSD : STA_SLEEP_STATE_PS_POLL;
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
- struct iwl_mvm_add_sta_cmd_v6 cmd = {
+ struct iwl_mvm_add_sta_cmd_v7 cmd = {
.add_modify = STA_MODE_MODIFY,
.sta_id = mvmsta->sta_id,
.modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
.sleep_tx_count = cpu_to_le16(cnt),
.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
- /*
- * Same modify mask for sleep_tx_count and sleep_state_flags so
- * we must set the sleep_state_flags too.
- */
- .sleep_state_flags = cpu_to_le16(sleep_state_flags),
};
- int ret;
+ int tid, ret;
+ unsigned long _tids = tids;
+
+ /* convert TIDs to ACs - we don't support TSPEC so that's OK
+ * Note that this field is reserved and unused by firmware not
+ * supporting GO uAPSD, so it's safe to always do this.
+ */
+ for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
+ cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
+
+ /* If we're releasing frames from aggregation queues then check if the
+ * all queues combined that we're releasing frames from have
+ * - more frames than the service period, in which case more_data
+ * needs to be set
+ * - fewer than 'cnt' frames, in which case we need to adjust the
+ * firmware command (but do that unconditionally)
+ */
+ if (agg) {
+ int remaining = cnt;
+
+ spin_lock_bh(&mvmsta->lock);
+ for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
+ struct iwl_mvm_tid_data *tid_data;
+ u16 n_queued;
+
+ tid_data = &mvmsta->tid_data[tid];
+ if (WARN(tid_data->state != IWL_AGG_ON &&
+ tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA,
+ "TID %d state is %d\n",
+ tid, tid_data->state)) {
+ spin_unlock_bh(&mvmsta->lock);
+ ieee80211_sta_eosp(sta);
+ return;
+ }
+
+ n_queued = iwl_mvm_tid_queued(tid_data);
+ if (n_queued > remaining) {
+ more_data = true;
+ remaining = 0;
+ break;
+ }
+ remaining -= n_queued;
+ }
+ spin_unlock_bh(&mvmsta->lock);
+
+ cmd.sleep_tx_count = cpu_to_le16(cnt - remaining);
+ if (WARN_ON(cnt - remaining == 0)) {
+ ieee80211_sta_eosp(sta);
+ return;
+ }
+ }
+
+ /* Note: this is ignored by firmware not supporting GO uAPSD */
+ if (more_data)
+ cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_MOREDATA);
+
+ if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
+ mvmsta->next_status_eosp = true;
+ cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_PS_POLL);
+ } else {
+ cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_UAPSD);
+ }
- /* TODO: somehow the fw doesn't seem to take PS_POLL into account */
ret = iwl_mvm_send_add_sta_cmd(mvm, CMD_ASYNC, &cmd);
if (ret)
IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
}
+
+int iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd)
+{
+ struct iwl_rx_packet *pkt = rxb_addr(rxb);
+ struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
+ struct ieee80211_sta *sta;
+ u32 sta_id = le32_to_cpu(notif->sta_id);
+
+ if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT))
+ return 0;
+
+ rcu_read_lock();
+ sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+ if (!IS_ERR_OR_NULL(sta))
+ ieee80211_sta_eosp(sta);
+ rcu_read_unlock();
+
+ return 0;
+}
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.h b/drivers/net/wireless/iwlwifi/mvm/sta.h
index 4968d02..2ed84c4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.h
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.h
@@ -195,24 +195,33 @@ struct iwl_mvm;
/**
* DOC: AP mode - PS
*
- * When a station is asleep, the fw will set it as "asleep". All the
- * non-aggregation frames to that station will be dropped by the fw
- * (%TX_STATUS_FAIL_DEST_PS failure code).
+ * When a station is asleep, the fw will set it as "asleep". All frames on
+ * shared queues (i.e. non-aggregation queues) to that station will be dropped
+ * by the fw (%TX_STATUS_FAIL_DEST_PS failure code).
+ *
* AMPDUs are in a separate queue that is stopped by the fw. We just need to
- * let mac80211 know how many frames we have in these queues so that it can
+ * let mac80211 know when there are frames in these queues so that it can
* properly handle trigger frames.
- * When the a trigger frame is received, mac80211 tells the driver to send
- * frames from the AMPDU queues or AC queue depending on which queue are
- * delivery-enabled and what TID has frames to transmit (Note that mac80211 has
- * all the knowledege since all the non-agg frames are buffered / filtered, and
- * the driver tells mac80211 about agg frames). The driver needs to tell the fw
- * to let frames out even if the station is asleep. This is done by
- * %iwl_mvm_sta_modify_sleep_tx_count.
- * When we receive a frame from that station with PM bit unset, the
- * driver needs to let the fw know that this station isn't alseep any more.
- * This is done by %iwl_mvm_sta_modify_ps_wake.
- *
- * TODO - EOSP handling
+ *
+ * When a trigger frame is received, mac80211 tells the driver to send frames
+ * from the AMPDU queues or sends frames to non-aggregation queues itself,
+ * depending on which ACs are delivery-enabled and what TID has frames to
+ * transmit. Note that mac80211 has all the knowledege since all the non-agg
+ * frames are buffered / filtered, and the driver tells mac80211 about agg
+ * frames). The driver needs to tell the fw to let frames out even if the
+ * station is asleep. This is done by %iwl_mvm_sta_modify_sleep_tx_count.
+ *
+ * When we receive a frame from that station with PM bit unset, the driver
+ * needs to let the fw know that this station isn't asleep any more. This is
+ * done by %iwl_mvm_sta_modify_ps_wake in response to mac80211 signalling the
+ * station's wakeup.
+ *
+ * For a GO, the Service Period might be cut short due to an absence period
+ * of the GO. In this (and all other cases) the firmware notifies us with the
+ * EOSP_NOTIFICATION, and we notify mac80211 of that. Further frames that we
+ * already sent to the device will be rejected again.
+ *
+ * See also "AP support for powersaving clients" in mac80211.h.
*/
/**
@@ -261,6 +270,12 @@ struct iwl_mvm_tid_data {
u16 ssn;
};
+static inline u16 iwl_mvm_tid_queued(struct iwl_mvm_tid_data *tid_data)
+{
+ return ieee80211_sn_sub(IEEE80211_SEQ_TO_SN(tid_data->seq_number),
+ tid_data->next_reclaimed);
+}
+
/**
* struct iwl_mvm_sta - representation of a station in the driver
* @sta_id: the index of the station in the fw (will be replaced by id_n_color)
@@ -269,7 +284,11 @@ struct iwl_mvm_tid_data {
* @tid_disable_agg: bitmap: if bit(tid) is set, the fw won't send ampdus for
* tid.
* @max_agg_bufsize: the maximal size of the AGG buffer for this station
+ * @bt_reduced_txpower_dbg: debug mode in which %bt_reduced_txpower is forced
+ * by debugfs.
* @bt_reduced_txpower: is reduced tx power enabled for this station
+ * @next_status_eosp: the next reclaimed packet is a PS-Poll response and
+ * we need to signal the EOSP
* @lock: lock to protect the whole struct. Since %tid_data is access from Tx
* and from Tx response flow, it needs a spinlock.
* @tid_data: per tid data. Look at %iwl_mvm_tid_data.
@@ -287,7 +306,9 @@ struct iwl_mvm_sta {
u32 mac_id_n_color;
u16 tid_disable_agg;
u8 max_agg_bufsize;
+ bool bt_reduced_txpower_dbg;
bool bt_reduced_txpower;
+ bool next_status_eosp;
spinlock_t lock;
struct iwl_mvm_tid_data tid_data[IWL_MAX_TID_COUNT];
struct iwl_lq_sta lq_sta;
@@ -345,6 +366,10 @@ void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
struct ieee80211_sta *sta, u32 iv32,
u16 *phase1key);
+int iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
+ struct iwl_rx_cmd_buffer *rxb,
+ struct iwl_device_cmd *cmd);
+
/* AMPDU */
int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, u16 ssn, bool start);
@@ -359,7 +384,7 @@ int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm);
int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta,
- u32 qmask);
+ u32 qmask, enum nl80211_iftype iftype);
void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm,
struct iwl_mvm_int_sta *sta);
int iwl_mvm_send_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
@@ -375,7 +400,8 @@ void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
enum ieee80211_frame_release_type reason,
- u16 cnt);
+ u16 cnt, u16 tids, bool more_data,
+ bool agg);
int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
bool drain);
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index b4c2aba..e145dd4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -126,6 +126,7 @@ static void iwl_mvm_roc_finished(struct iwl_mvm *mvm)
* in iwl_mvm_te_handle_notif).
*/
clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
+ iwl_mvm_unref(mvm, IWL_MVM_REF_ROC);
/*
* Of course, our status bit is just as racy as mac80211, so in
@@ -210,6 +211,7 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) {
set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
+ iwl_mvm_ref(mvm, IWL_MVM_REF_ROC);
ieee80211_ready_on_channel(mvm->hw);
}
} else {
diff --git a/drivers/net/wireless/iwlwifi/mvm/tt.c b/drivers/net/wireless/iwlwifi/mvm/tt.c
index 3afa6b6..7a99fa3 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tt.c
@@ -403,7 +403,7 @@ static void iwl_mvm_tt_tx_protection(struct iwl_mvm *mvm, bool enable)
}
}
-static void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff)
+void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff)
{
struct iwl_host_cmd cmd = {
.id = REPLY_THERMAL_MNG_BACKOFF,
@@ -412,6 +412,8 @@ static void iwl_mvm_tt_tx_backoff(struct iwl_mvm *mvm, u32 backoff)
.flags = CMD_SYNC,
};
+ backoff = max(backoff, mvm->thermal_throttle.min_backoff);
+
if (iwl_mvm_send_cmd(mvm, &cmd) == 0) {
IWL_DEBUG_TEMP(mvm, "Set Thermal Tx backoff to: %u\n",
backoff);
@@ -534,7 +536,7 @@ static const struct iwl_tt_params iwl7000_high_temp_tt_params = {
.support_tx_backoff = true,
};
-void iwl_mvm_tt_initialize(struct iwl_mvm *mvm)
+void iwl_mvm_tt_initialize(struct iwl_mvm *mvm, u32 min_backoff)
{
struct iwl_mvm_tt_mgmt *tt = &mvm->thermal_throttle;
@@ -546,6 +548,7 @@ void iwl_mvm_tt_initialize(struct iwl_mvm *mvm)
tt->params = &iwl7000_tt_params;
tt->throttle = false;
+ tt->min_backoff = min_backoff;
INIT_DELAYED_WORK(&tt->ct_kill_exit, check_exit_ctkill);
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 76ee486..98096fa 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -377,6 +377,13 @@ int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb,
tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
/* From now on, we cannot access info->control */
+ /*
+ * we handle that entirely ourselves -- for uAPSD the firmware
+ * will always send a notification, and for PS-Poll responses
+ * we'll notify mac80211 when getting frame status
+ */
+ info->flags &= ~IEEE80211_TX_STATUS_EOSP;
+
spin_lock(&mvmsta->lock);
if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
@@ -437,6 +444,17 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
lockdep_assert_held(&mvmsta->lock);
+ if ((tid_data->state == IWL_AGG_ON ||
+ tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA) &&
+ iwl_mvm_tid_queued(tid_data) == 0) {
+ /*
+ * Now that this aggregation queue is empty tell mac80211 so it
+ * knows we no longer have frames buffered for the station on
+ * this TID (for the TIM bitmap calculation.)
+ */
+ ieee80211_sta_set_buffered(sta, tid, false);
+ }
+
if (tid_data->ssn != tid_data->next_reclaimed)
return;
@@ -680,6 +698,11 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
iwl_mvm_check_ratid_empty(mvm, sta, tid);
spin_unlock_bh(&mvmsta->lock);
}
+
+ if (mvmsta->next_status_eosp) {
+ mvmsta->next_status_eosp = false;
+ ieee80211_sta_eosp(sta);
+ }
} else {
mvmsta = NULL;
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
index 86989df..1493f79 100644
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -376,9 +376,67 @@ struct iwl_error_event_table {
u32 flow_handler; /* FH read/write pointers, RX credit */
} __packed;
+/*
+ * UMAC error struct - relevant starting from family 8000 chip.
+ * Note: This structure is read from the device with IO accesses,
+ * and the reading already does the endian conversion. As it is
+ * read with u32-sized accesses, any members with a different size
+ * need to be ordered correctly though!
+ */
+struct iwl_umac_error_event_table {
+ u32 valid; /* (nonzero) valid, (0) log is empty */
+ u32 error_id; /* type of error */
+ u32 pc; /* program counter */
+ u32 blink1; /* branch link */
+ u32 blink2; /* branch link */
+ u32 ilink1; /* interrupt link */
+ u32 ilink2; /* interrupt link */
+ u32 data1; /* error-specific data */
+ u32 data2; /* error-specific data */
+ u32 line; /* source code line of error */
+ u32 umac_ver; /* umac version */
+} __packed;
+
#define ERROR_START_OFFSET (1 * sizeof(u32))
#define ERROR_ELEM_SIZE (7 * sizeof(u32))
+static void iwl_mvm_dump_umac_error_log(struct iwl_mvm *mvm)
+{
+ struct iwl_trans *trans = mvm->trans;
+ struct iwl_umac_error_event_table table;
+ u32 base;
+
+ base = mvm->umac_error_event_table;
+
+ if (base < 0x800000 || base >= 0x80C000) {
+ IWL_ERR(mvm,
+ "Not valid error log pointer 0x%08X for %s uCode\n",
+ base,
+ (mvm->cur_ucode == IWL_UCODE_INIT)
+ ? "Init" : "RT");
+ return;
+ }
+
+ iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
+
+ if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
+ IWL_ERR(trans, "Start IWL Error Log Dump:\n");
+ IWL_ERR(trans, "Status: 0x%08lX, count: %d\n",
+ mvm->status, table.valid);
+ }
+
+ IWL_ERR(mvm, "0x%08X | %-28s\n", table.error_id,
+ desc_lookup(table.error_id));
+ IWL_ERR(mvm, "0x%08X | umac uPc\n", table.pc);
+ IWL_ERR(mvm, "0x%08X | umac branchlink1\n", table.blink1);
+ IWL_ERR(mvm, "0x%08X | umac branchlink2\n", table.blink2);
+ IWL_ERR(mvm, "0x%08X | umac interruptlink1\n", table.ilink1);
+ IWL_ERR(mvm, "0x%08X | umac interruptlink2\n", table.ilink2);
+ IWL_ERR(mvm, "0x%08X | umac data1\n", table.data1);
+ IWL_ERR(mvm, "0x%08X | umac data2\n", table.data2);
+ IWL_ERR(mvm, "0x%08X | umac version\n", table.umac_ver);
+}
+
void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
{
struct iwl_trans *trans = mvm->trans;
@@ -394,7 +452,7 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
base = mvm->fw->inst_errlog_ptr;
}
- if (base < 0x800000 || base >= 0x80C000) {
+ if (base < 0x800000) {
IWL_ERR(mvm,
"Not valid error log pointer 0x%08X for %s uCode\n",
base,
@@ -453,13 +511,17 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
IWL_ERR(mvm, "0x%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
IWL_ERR(mvm, "0x%08X | timestamp\n", table.u_timestamp);
IWL_ERR(mvm, "0x%08X | flow_handler\n", table.flow_handler);
+
+ if (mvm->support_umac_log)
+ iwl_mvm_dump_umac_error_log(mvm);
}
void iwl_mvm_dump_sram(struct iwl_mvm *mvm)
{
const struct fw_img *img;
int ofs, len = 0;
- u8 *buf;
+ int i;
+ __le32 *buf;
if (!mvm->ucode_loaded)
return;
@@ -473,7 +535,12 @@ void iwl_mvm_dump_sram(struct iwl_mvm *mvm)
return;
iwl_trans_read_mem_bytes(mvm->trans, ofs, buf, len);
- iwl_print_hex_error(mvm->trans, buf, len);
+ len = len >> 2;
+ for (i = 0; i < len; i++) {
+ IWL_ERR(mvm, "0x%08X\n", le32_to_cpu(buf[i]));
+ /* Add a small delay to let syslog catch up */
+ udelay(10);
+ }
kfree(buf);
}
@@ -516,7 +583,7 @@ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
enum ieee80211_smps_mode smps_request)
{
struct iwl_mvm_vif *mvmvif;
- enum ieee80211_smps_mode smps_mode = IEEE80211_SMPS_AUTOMATIC;
+ enum ieee80211_smps_mode smps_mode;
int i;
lockdep_assert_held(&mvm->mutex);
@@ -525,6 +592,11 @@ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
if (num_of_ant(iwl_fw_valid_rx_ant(mvm->fw)) == 1)
return;
+ if (vif->type == NL80211_IFTYPE_AP)
+ smps_mode = IEEE80211_SMPS_OFF;
+ else
+ smps_mode = IEEE80211_SMPS_AUTOMATIC;
+
mvmvif = iwl_mvm_vif_from_mac80211(vif);
mvmvif->smps_requests[req_type] = smps_request;
for (i = 0; i < NUM_IWL_MVM_SMPS_REQ; i++) {
@@ -538,3 +610,22 @@ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
ieee80211_request_smps(vif, smps_mode);
}
+
+int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+ bool value)
+{
+ struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
+ int res;
+
+ lockdep_assert_held(&mvm->mutex);
+
+ mvmvif->low_latency = value;
+
+ res = iwl_mvm_update_quotas(mvm, NULL);
+ if (res)
+ return res;
+
+ iwl_mvm_bt_coex_vif_change(mvm);
+
+ return iwl_mvm_power_update_mac(mvm, vif);
+}
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index f47bcbe..0f52e96 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -66,6 +66,7 @@
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/pci-aspm.h>
+#include <linux/acpi.h>
#include "iwl-trans.h"
#include "iwl-drv.h"
@@ -390,12 +391,91 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
{IWL_PCI_DEVICE(0x095A, 0x5590, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)},
+
+/* 8000 Series */
+ {IWL_PCI_DEVICE(0x24F3, 0x0010, iwl8260_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x24F4, 0x0030, iwl8260_2ac_cfg)},
#endif /* CONFIG_IWLMVM */
{0}
};
MODULE_DEVICE_TABLE(pci, iwl_hw_card_ids);
+#ifdef CONFIG_ACPI
+#define SPL_METHOD "SPLC"
+#define SPL_DOMAINTYPE_MODULE BIT(0)
+#define SPL_DOMAINTYPE_WIFI BIT(1)
+#define SPL_DOMAINTYPE_WIGIG BIT(2)
+#define SPL_DOMAINTYPE_RFEM BIT(3)
+
+static u64 splx_get_pwr_limit(struct iwl_trans *trans, union acpi_object *splx)
+{
+ union acpi_object *limits, *domain_type, *power_limit;
+
+ if (splx->type != ACPI_TYPE_PACKAGE ||
+ splx->package.count != 2 ||
+ splx->package.elements[0].type != ACPI_TYPE_INTEGER ||
+ splx->package.elements[0].integer.value != 0) {
+ IWL_ERR(trans, "Unsupported splx structure");
+ return 0;
+ }
+
+ limits = &splx->package.elements[1];
+ if (limits->type != ACPI_TYPE_PACKAGE ||
+ limits->package.count < 2 ||
+ limits->package.elements[0].type != ACPI_TYPE_INTEGER ||
+ limits->package.elements[1].type != ACPI_TYPE_INTEGER) {
+ IWL_ERR(trans, "Invalid limits element");
+ return 0;
+ }
+
+ domain_type = &limits->package.elements[0];
+ power_limit = &limits->package.elements[1];
+ if (!(domain_type->integer.value & SPL_DOMAINTYPE_WIFI)) {
+ IWL_DEBUG_INFO(trans, "WiFi power is not limited");
+ return 0;
+ }
+
+ return power_limit->integer.value;
+}
+
+static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev)
+{
+ acpi_handle pxsx_handle;
+ acpi_handle handle;
+ struct acpi_buffer splx = {ACPI_ALLOCATE_BUFFER, NULL};
+ acpi_status status;
+
+ pxsx_handle = ACPI_HANDLE(&pdev->dev);
+ if (!pxsx_handle) {
+ IWL_ERR(trans, "Could not retrieve root port ACPI handle");
+ return;
+ }
+
+ /* Get the method's handle */
+ status = acpi_get_handle(pxsx_handle, (acpi_string)SPL_METHOD, &handle);
+ if (ACPI_FAILURE(status)) {
+ IWL_DEBUG_INFO(trans, "SPL method not found");
+ return;
+ }
+
+ /* Call SPLC with no arguments */
+ status = acpi_evaluate_object(handle, NULL, NULL, &splx);
+ if (ACPI_FAILURE(status)) {
+ IWL_ERR(trans, "SPLC invocation failed (0x%x)", status);
+ return;
+ }
+
+ trans->dflt_pwr_limit = splx_get_pwr_limit(trans, splx.pointer);
+ IWL_DEBUG_INFO(trans, "Default power limit set to %lld",
+ trans->dflt_pwr_limit);
+ kfree(splx.pointer);
+}
+
+#else /* CONFIG_ACPI */
+static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev) {}
+#endif
+
/* PCI registers */
#define PCI_CFG_RETRY_TIMEOUT 0x041
@@ -420,6 +500,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_free_trans;
}
+ set_dflt_pwr_limit(iwl_trans, pdev);
+
/* register transport layer debugfs here */
ret = iwl_trans_dbgfs_register(iwl_trans, iwl_trans->dbgfs_dir);
if (ret)
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index 08c23d4..41f684d 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -802,10 +802,9 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 inta;
- lockdep_assert_held(&trans_pcie->irq_lock);
+ lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock);
trace_iwlwifi_dev_irq(trans->dev);
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index f950780..84d4712 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -89,6 +89,7 @@ static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
/* PCI registers */
#define PCI_CFG_RETRY_TIMEOUT 0x041
+#define CPU1_CPU2_SEPARATOR_SECTION 0xFFFFCCCC
static void iwl_pcie_apm_config(struct iwl_trans *trans)
{
@@ -132,8 +133,9 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)
*/
/* Disable L0S exit timer (platform NMI Work/Around) */
- iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
- CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
+ if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
+ iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
+ CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
/*
* Disable L0s without affecting L1;
@@ -203,19 +205,23 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)
/*
* Enable DMA clock and wait for it to stabilize.
*
- * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
- * do not disable clocks. This preserves any hardware bits already
- * set by default in "CLK_CTRL_REG" after reset.
+ * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0"
+ * bits do not disable clocks. This preserves any hardware
+ * bits already set by default in "CLK_CTRL_REG" after reset.
*/
- iwl_write_prph(trans, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
- udelay(20);
+ if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
+ iwl_write_prph(trans, APMG_CLK_EN_REG,
+ APMG_CLK_VAL_DMA_CLK_RQT);
+ udelay(20);
- /* Disable L1-Active */
- iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
- APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
+ /* Disable L1-Active */
+ iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
+ APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
- /* Clear the interrupt in APMG if the NIC is in RFKILL */
- iwl_write_prph(trans, APMG_RTC_INT_STT_REG, APMG_RTC_INT_STT_RFKILL);
+ /* Clear the interrupt in APMG if the NIC is in RFKILL */
+ iwl_write_prph(trans, APMG_RTC_INT_STT_REG,
+ APMG_RTC_INT_STT_RFKILL);
+ }
set_bit(STATUS_DEVICE_ENABLED, &trans->status);
@@ -273,7 +279,8 @@ static int iwl_pcie_nic_init(struct iwl_trans *trans)
spin_unlock(&trans_pcie->irq_lock);
- iwl_pcie_set_pwr(trans, false);
+ if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
+ iwl_pcie_set_pwr(trans, false);
iwl_op_mode_nic_config(trans->op_mode);
@@ -435,78 +442,106 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
return ret;
}
-static int iwl_pcie_secure_set(struct iwl_trans *trans, int cpu)
+static int iwl_pcie_load_cpu_secured_sections(struct iwl_trans *trans,
+ const struct fw_img *image,
+ int cpu,
+ int *first_ucode_section)
{
int shift_param;
- u32 address;
- int ret = 0;
+ int i, ret = 0;
+ u32 last_read_idx = 0;
if (cpu == 1) {
shift_param = 0;
- address = CSR_SECURE_BOOT_CPU1_STATUS_ADDR;
+ *first_ucode_section = 0;
} else {
shift_param = 16;
- address = CSR_SECURE_BOOT_CPU2_STATUS_ADDR;
+ (*first_ucode_section)++;
}
- /* set CPU to started */
- iwl_trans_set_bits_mask(trans,
- CSR_UCODE_LOAD_STATUS_ADDR,
- CSR_CPU_STATUS_LOADING_STARTED << shift_param,
- 1);
-
- /* set last complete descriptor number */
- iwl_trans_set_bits_mask(trans,
- CSR_UCODE_LOAD_STATUS_ADDR,
- CSR_CPU_STATUS_NUM_OF_LAST_COMPLETED
- << shift_param,
- 1);
-
- /* set last loaded block */
- iwl_trans_set_bits_mask(trans,
- CSR_UCODE_LOAD_STATUS_ADDR,
- CSR_CPU_STATUS_NUM_OF_LAST_LOADED_BLOCK
- << shift_param,
- 1);
+ for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
+ last_read_idx = i;
+
+ if (!image->sec[i].data ||
+ image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION) {
+ IWL_DEBUG_FW(trans,
+ "Break since Data not valid or Empty section, sec = %d\n",
+ i);
+ break;
+ }
+
+ if (i == (*first_ucode_section) + 1)
+ /* set CPU to started */
+ iwl_set_bits_prph(trans,
+ CSR_UCODE_LOAD_STATUS_ADDR,
+ LMPM_CPU_HDRS_LOADING_COMPLETED
+ << shift_param);
+ ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
+ if (ret)
+ return ret;
+ }
/* image loading complete */
- iwl_trans_set_bits_mask(trans,
- CSR_UCODE_LOAD_STATUS_ADDR,
- CSR_CPU_STATUS_LOADING_COMPLETED
- << shift_param,
- 1);
-
- /* set FH_TCSR_0_REG */
- iwl_trans_set_bits_mask(trans, FH_TCSR_0_REG0, 0x00400000, 1);
-
- /* verify image verification started */
- ret = iwl_poll_bit(trans, address,
- CSR_SECURE_BOOT_CPU_STATUS_VERF_STATUS,
- CSR_SECURE_BOOT_CPU_STATUS_VERF_STATUS,
- CSR_SECURE_TIME_OUT);
- if (ret < 0) {
- IWL_ERR(trans, "secure boot process didn't start\n");
- return ret;
+ iwl_set_bits_prph(trans,
+ CSR_UCODE_LOAD_STATUS_ADDR,
+ LMPM_CPU_UCODE_LOADING_COMPLETED << shift_param);
+
+ *first_ucode_section = last_read_idx;
+
+ return 0;
+}
+
+static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
+ const struct fw_img *image,
+ int cpu,
+ int *first_ucode_section)
+{
+ int shift_param;
+ int i, ret = 0;
+ u32 last_read_idx = 0;
+
+ if (cpu == 1) {
+ shift_param = 0;
+ *first_ucode_section = 0;
+ } else {
+ shift_param = 16;
+ (*first_ucode_section)++;
}
- /* wait for image verification to complete */
- ret = iwl_poll_bit(trans, address,
- CSR_SECURE_BOOT_CPU_STATUS_VERF_COMPLETED,
- CSR_SECURE_BOOT_CPU_STATUS_VERF_COMPLETED,
- CSR_SECURE_TIME_OUT);
+ for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
+ last_read_idx = i;
- if (ret < 0) {
- IWL_ERR(trans, "Time out on secure boot process\n");
- return ret;
+ if (!image->sec[i].data ||
+ image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION) {
+ IWL_DEBUG_FW(trans,
+ "Break since Data not valid or Empty section, sec = %d\n",
+ i);
+ break;
+ }
+
+ ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
+ if (ret)
+ return ret;
}
+ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+ iwl_set_bits_prph(trans,
+ CSR_UCODE_LOAD_STATUS_ADDR,
+ (LMPM_CPU_UCODE_LOADING_COMPLETED |
+ LMPM_CPU_HDRS_LOADING_COMPLETED |
+ LMPM_CPU_UCODE_LOADING_STARTED) <<
+ shift_param);
+
+ *first_ucode_section = last_read_idx;
+
return 0;
}
static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
const struct fw_img *image)
{
- int i, ret = 0;
+ int ret = 0;
+ int first_ucode_section;
IWL_DEBUG_FW(trans,
"working with %s image\n",
@@ -518,53 +553,68 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
/* configure the ucode to be ready to get the secured image */
if (image->is_secure) {
/* set secure boot inspector addresses */
- iwl_write32(trans, CSR_SECURE_INSPECTOR_CODE_ADDR, 0);
- iwl_write32(trans, CSR_SECURE_INSPECTOR_DATA_ADDR, 0);
-
- /* release CPU1 reset if secure inspector image burned in OTP */
- iwl_write32(trans, CSR_RESET, 0);
- }
-
- /* load to FW the binary sections of CPU1 */
- IWL_DEBUG_INFO(trans, "Loading CPU1\n");
- for (i = 0;
- i < IWL_UCODE_FIRST_SECTION_OF_SECOND_CPU;
- i++) {
- if (!image->sec[i].data)
- break;
- ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
+ iwl_write_prph(trans,
+ LMPM_SECURE_INSPECTOR_CODE_ADDR,
+ LMPM_SECURE_INSPECTOR_CODE_MEM_SPACE);
+
+ iwl_write_prph(trans,
+ LMPM_SECURE_INSPECTOR_DATA_ADDR,
+ LMPM_SECURE_INSPECTOR_DATA_MEM_SPACE);
+
+ /* set CPU1 header address */
+ iwl_write_prph(trans,
+ LMPM_SECURE_UCODE_LOAD_CPU1_HDR_ADDR,
+ LMPM_SECURE_CPU1_HDR_MEM_SPACE);
+
+ /* load to FW the binary Secured sections of CPU1 */
+ ret = iwl_pcie_load_cpu_secured_sections(trans, image, 1,
+ &first_ucode_section);
if (ret)
return ret;
- }
- /* configure the ucode to start secure process on CPU1 */
- if (image->is_secure) {
- /* config CPU1 to start secure protocol */
- ret = iwl_pcie_secure_set(trans, 1);
+ } else {
+ /* load to FW the binary Non secured sections of CPU1 */
+ ret = iwl_pcie_load_cpu_sections(trans, image, 1,
+ &first_ucode_section);
if (ret)
return ret;
- } else {
- /* Remove all resets to allow NIC to operate */
- iwl_write32(trans, CSR_RESET, 0);
}
if (image->is_dual_cpus) {
+ /* set CPU2 header address */
+ iwl_write_prph(trans,
+ LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
+ LMPM_SECURE_CPU2_HDR_MEM_SPACE);
+
/* load to FW the binary sections of CPU2 */
- IWL_DEBUG_INFO(trans, "working w/ DUAL CPUs - Loading CPU2\n");
- for (i = IWL_UCODE_FIRST_SECTION_OF_SECOND_CPU;
- i < IWL_UCODE_SECTION_MAX; i++) {
- if (!image->sec[i].data)
- break;
- ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
- if (ret)
- return ret;
- }
+ if (image->is_secure)
+ ret = iwl_pcie_load_cpu_secured_sections(
+ trans, image, 2,
+ &first_ucode_section);
+ else
+ ret = iwl_pcie_load_cpu_sections(trans, image, 2,
+ &first_ucode_section);
+ if (ret)
+ return ret;
+ }
- if (image->is_secure) {
- /* set CPU2 for secure protocol */
- ret = iwl_pcie_secure_set(trans, 2);
- if (ret)
- return ret;
+ /* release CPU reset */
+ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+ iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT);
+ else
+ iwl_write32(trans, CSR_RESET, 0);
+
+ if (image->is_secure) {
+ /* wait for image verification to complete */
+ ret = iwl_poll_prph_bit(trans,
+ LMPM_SECURE_BOOT_CPU1_STATUS_ADDR,
+ LMPM_SECURE_BOOT_STATUS_SUCCESS,
+ LMPM_SECURE_BOOT_STATUS_SUCCESS,
+ LMPM_SECURE_TIME_OUT);
+
+ if (ret < 0) {
+ IWL_ERR(trans, "Time out on secure boot process\n");
+ return ret;
}
}
@@ -1407,16 +1457,15 @@ static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
{
struct iwl_trans *trans = file->private_data;
char *buf = NULL;
- int pos = 0;
- ssize_t ret = -EFAULT;
-
- ret = pos = iwl_dump_fh(trans, &buf);
- if (buf) {
- ret = simple_read_from_buffer(user_buf,
- count, ppos, buf, pos);
- kfree(buf);
- }
+ ssize_t ret;
+ ret = iwl_dump_fh(trans, &buf);
+ if (ret < 0)
+ return ret;
+ if (!buf)
+ return -EINVAL;
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
+ kfree(buf);
return ret;
}
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 3d54900..2541264 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -705,8 +705,9 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
/* Enable L1-Active */
- iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
- APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
+ if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
+ iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
+ APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
}
void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index cb6d189..54e344a 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -1766,7 +1766,8 @@ static void lbs_join_post(struct lbs_private *priv,
memcpy(priv->wdev->ssid, params->ssid, params->ssid_len);
priv->wdev->ssid_len = params->ssid_len;
- cfg80211_ibss_joined(priv->dev, bssid, GFP_KERNEL);
+ cfg80211_ibss_joined(priv->dev, bssid, params->chandef.chan,
+ GFP_KERNEL);
/* TODO: consider doing this at MACREG_INT_CODE_LINK_SENSED time */
priv->connect_status = LBS_CONNECTED;
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 69d4c31..f7e3562 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -57,6 +57,10 @@ static bool rctbl = false;
module_param(rctbl, bool, 0444);
MODULE_PARM_DESC(rctbl, "Handle rate control table");
+static bool support_p2p_device = true;
+module_param(support_p2p_device, bool, 0444);
+MODULE_PARM_DESC(support_p2p_device, "Support P2P-Device interface type");
+
/**
* enum hwsim_regtest - the type of regulatory tests we offer
*
@@ -335,7 +339,8 @@ static const struct ieee80211_iface_limit hwsim_if_limits[] = {
#endif
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_P2P_GO) },
- { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_DEVICE) },
+ /* must be last, see hwsim_if_comb */
+ { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_DEVICE) }
};
static const struct ieee80211_iface_limit hwsim_if_dfs_limits[] = {
@@ -345,6 +350,27 @@ static const struct ieee80211_iface_limit hwsim_if_dfs_limits[] = {
static const struct ieee80211_iface_combination hwsim_if_comb[] = {
{
.limits = hwsim_if_limits,
+ /* remove the last entry which is P2P_DEVICE */
+ .n_limits = ARRAY_SIZE(hwsim_if_limits) - 1,
+ .max_interfaces = 2048,
+ .num_different_channels = 1,
+ },
+ {
+ .limits = hwsim_if_dfs_limits,
+ .n_limits = ARRAY_SIZE(hwsim_if_dfs_limits),
+ .max_interfaces = 8,
+ .num_different_channels = 1,
+ .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
+ BIT(NL80211_CHAN_WIDTH_20) |
+ BIT(NL80211_CHAN_WIDTH_40) |
+ BIT(NL80211_CHAN_WIDTH_80) |
+ BIT(NL80211_CHAN_WIDTH_160),
+ }
+};
+
+static const struct ieee80211_iface_combination hwsim_if_comb_p2p_dev[] = {
+ {
+ .limits = hwsim_if_limits,
.n_limits = ARRAY_SIZE(hwsim_if_limits),
.max_interfaces = 2048,
.num_different_channels = 1,
@@ -451,7 +477,7 @@ static struct genl_family hwsim_genl_family = {
/* MAC80211_HWSIM netlink policy */
-static struct nla_policy hwsim_genl_policy[HWSIM_ATTR_MAX + 1] = {
+static const struct nla_policy hwsim_genl_policy[HWSIM_ATTR_MAX + 1] = {
[HWSIM_ATTR_ADDR_RECEIVER] = { .type = NLA_UNSPEC, .len = ETH_ALEN },
[HWSIM_ATTR_ADDR_TRANSMITTER] = { .type = NLA_UNSPEC, .len = ETH_ALEN },
[HWSIM_ATTR_FRAME] = { .type = NLA_BINARY,
@@ -468,6 +494,7 @@ static struct nla_policy hwsim_genl_policy[HWSIM_ATTR_MAX + 1] = {
[HWSIM_ATTR_REG_HINT_ALPHA2] = { .type = NLA_STRING, .len = 2 },
[HWSIM_ATTR_REG_CUSTOM_REG] = { .type = NLA_U32 },
[HWSIM_ATTR_REG_STRICT_REG] = { .type = NLA_FLAG },
+ [HWSIM_ATTR_SUPPORT_P2P_DEVICE] = { .type = NLA_FLAG },
};
static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
@@ -1035,32 +1062,6 @@ static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw,
ack = true;
rx_status.mactime = now + data2->tsf_offset;
-#if 0
- /*
- * Don't enable this code by default as the OUI 00:00:00
- * is registered to Xerox so we shouldn't use it here, it
- * might find its way into pcap files.
- * Note that this code requires the headroom in the SKB
- * that was allocated earlier.
- */
- rx_status.vendor_radiotap_oui[0] = 0x00;
- rx_status.vendor_radiotap_oui[1] = 0x00;
- rx_status.vendor_radiotap_oui[2] = 0x00;
- rx_status.vendor_radiotap_subns = 127;
- /*
- * Radiotap vendor namespaces can (and should) also be
- * split into fields by using the standard radiotap
- * presence bitmap mechanism. Use just BIT(0) here for
- * the presence bitmap.
- */
- rx_status.vendor_radiotap_bitmap = BIT(0);
- /* We have 8 bytes of (dummy) data */
- rx_status.vendor_radiotap_len = 8;
- /* For testing, also require it to be aligned */
- rx_status.vendor_radiotap_align = 8;
- /* push the data */
- memcpy(skb_push(nskb, 8), "ABCDEFGH", 8);
-#endif
memcpy(IEEE80211_SKB_RXCB(nskb), &rx_status, sizeof(rx_status));
ieee80211_rx_irqsafe(data2->hw, nskb);
@@ -1275,6 +1276,9 @@ static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac,
mac80211_hwsim_tx_frame(hw, skb,
rcu_dereference(vif->chanctx_conf)->def.chan);
+
+ if (vif->csa_active && ieee80211_csa_is_complete(vif))
+ ieee80211_csa_finish(vif);
}
static enum hrtimer_restart
@@ -1936,7 +1940,7 @@ static struct ieee80211_ops mac80211_hwsim_mchan_ops;
static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
const struct ieee80211_regdomain *regd,
- bool reg_strict)
+ bool reg_strict, bool p2p_device)
{
int err;
u8 addr[ETH_ALEN];
@@ -2000,8 +2004,15 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
/* For channels > 1 DFS is not allowed */
hw->wiphy->n_iface_combinations = 1;
hw->wiphy->iface_combinations = &data->if_combination;
- data->if_combination = hwsim_if_comb[0];
data->if_combination.num_different_channels = data->channels;
+ if (p2p_device)
+ data->if_combination = hwsim_if_comb_p2p_dev[0];
+ else
+ data->if_combination = hwsim_if_comb[0];
+ } else if (p2p_device) {
+ hw->wiphy->iface_combinations = hwsim_if_comb_p2p_dev;
+ hw->wiphy->n_iface_combinations =
+ ARRAY_SIZE(hwsim_if_comb_p2p_dev);
} else {
hw->wiphy->iface_combinations = hwsim_if_comb;
hw->wiphy->n_iface_combinations = ARRAY_SIZE(hwsim_if_comb);
@@ -2017,8 +2028,10 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
BIT(NL80211_IFTYPE_P2P_CLIENT) |
BIT(NL80211_IFTYPE_P2P_GO) |
BIT(NL80211_IFTYPE_ADHOC) |
- BIT(NL80211_IFTYPE_MESH_POINT) |
- BIT(NL80211_IFTYPE_P2P_DEVICE);
+ BIT(NL80211_IFTYPE_MESH_POINT);
+
+ if (p2p_device)
+ hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_DEVICE);
hw->flags = IEEE80211_HW_MFP_CAPABLE |
IEEE80211_HW_SIGNAL_DBM |
@@ -2027,13 +2040,15 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
IEEE80211_HW_AMPDU_AGGREGATION |
IEEE80211_HW_WANT_MONITOR_VIF |
IEEE80211_HW_QUEUE_CONTROL |
- IEEE80211_HW_SUPPORTS_HT_CCK_RATES;
+ IEEE80211_HW_SUPPORTS_HT_CCK_RATES |
+ IEEE80211_HW_CHANCTX_STA_CSA;
if (rctbl)
hw->flags |= IEEE80211_HW_SUPPORTS_RC_TABLE;
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
- WIPHY_FLAG_AP_UAPSD;
+ WIPHY_FLAG_AP_UAPSD |
+ WIPHY_FLAG_HAS_CHANNEL_SWITCH;
hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR;
/* ask mac80211 to reserve space for magic */
@@ -2407,6 +2422,7 @@ static int hwsim_create_radio_nl(struct sk_buff *msg, struct genl_info *info)
const char *alpha2 = NULL;
const struct ieee80211_regdomain *regd = NULL;
bool reg_strict = info->attrs[HWSIM_ATTR_REG_STRICT_REG];
+ bool p2p_device = info->attrs[HWSIM_ATTR_SUPPORT_P2P_DEVICE];
if (info->attrs[HWSIM_ATTR_CHANNELS])
chans = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]);
@@ -2422,7 +2438,8 @@ static int hwsim_create_radio_nl(struct sk_buff *msg, struct genl_info *info)
regd = hwsim_world_regdom_custom[idx];
}
- return mac80211_hwsim_create_radio(chans, alpha2, regd, reg_strict);
+ return mac80211_hwsim_create_radio(chans, alpha2, regd, reg_strict,
+ p2p_device);
}
static int hwsim_destroy_radio_nl(struct sk_buff *msg, struct genl_info *info)
@@ -2640,7 +2657,8 @@ static int __init init_mac80211_hwsim(void)
}
err = mac80211_hwsim_create_radio(channels, reg_alpha2,
- regd, reg_strict);
+ regd, reg_strict,
+ support_p2p_device);
if (err < 0)
goto out_free_radios;
}
diff --git a/drivers/net/wireless/mac80211_hwsim.h b/drivers/net/wireless/mac80211_hwsim.h
index 2747cce..6e72996 100644
--- a/drivers/net/wireless/mac80211_hwsim.h
+++ b/drivers/net/wireless/mac80211_hwsim.h
@@ -107,6 +107,7 @@ enum {
* (nla string, length 2)
* @HWSIM_ATTR_REG_CUSTOM_REG: custom regulatory domain index (u32 attribute)
* @HWSIM_ATTR_REG_STRICT_REG: request REGULATORY_STRICT_REG (flag attribute)
+ * @HWSIM_ATTR_SUPPORT_P2P_DEVICE: support P2P Device virtual interface (flag)
* @__HWSIM_ATTR_MAX: enum limit
*/
@@ -126,6 +127,7 @@ enum {
HWSIM_ATTR_REG_HINT_ALPHA2,
HWSIM_ATTR_REG_CUSTOM_REG,
HWSIM_ATTR_REG_STRICT_REG,
+ HWSIM_ATTR_SUPPORT_P2P_DEVICE,
__HWSIM_ATTR_MAX,
};
#define HWSIM_ATTR_MAX (__HWSIM_ATTR_MAX - 1)
diff --git a/drivers/net/wireless/mwifiex/11ac.c b/drivers/net/wireless/mwifiex/11ac.c
index 5e0eec4..bb43251 100644
--- a/drivers/net/wireless/mwifiex/11ac.c
+++ b/drivers/net/wireless/mwifiex/11ac.c
@@ -23,6 +23,31 @@
#include "main.h"
#include "11ac.h"
+/* Tables of the MCS map to the highest data rate (in Mbps) supported
+ * for long GI.
+ */
+static const u16 max_rate_lgi_80MHZ[8][3] = {
+ {0x124, 0x15F, 0x186}, /* NSS = 1 */
+ {0x249, 0x2BE, 0x30C}, /* NSS = 2 */
+ {0x36D, 0x41D, 0x492}, /* NSS = 3 */
+ {0x492, 0x57C, 0x618}, /* NSS = 4 */
+ {0x5B6, 0x6DB, 0x79E}, /* NSS = 5 */
+ {0x6DB, 0x83A, 0x0}, /* NSS = 6 */
+ {0x7FF, 0x999, 0xAAA}, /* NSS = 7 */
+ {0x924, 0xAF8, 0xC30} /* NSS = 8 */
+};
+
+static const u16 max_rate_lgi_160MHZ[8][3] = {
+ {0x249, 0x2BE, 0x30C}, /* NSS = 1 */
+ {0x492, 0x57C, 0x618}, /* NSS = 2 */
+ {0x6DB, 0x83A, 0x0}, /* NSS = 3 */
+ {0x924, 0xAF8, 0xC30}, /* NSS = 4 */
+ {0xB6D, 0xDB6, 0xF3C}, /* NSS = 5 */
+ {0xDB6, 0x1074, 0x1248}, /* NSS = 6 */
+ {0xFFF, 0x1332, 0x1554}, /* NSS = 7 */
+ {0x1248, 0x15F0, 0x1860} /* NSS = 8 */
+};
+
/* This function converts the 2-bit MCS map to the highest long GI
* VHT data rate.
*/
@@ -30,33 +55,10 @@ static u16
mwifiex_convert_mcsmap_to_maxrate(struct mwifiex_private *priv,
u8 bands, u16 mcs_map)
{
- u8 i, nss, max_mcs;
+ u8 i, nss, mcs;
u16 max_rate = 0;
u32 usr_vht_cap_info = 0;
struct mwifiex_adapter *adapter = priv->adapter;
- /* tables of the MCS map to the highest data rate (in Mbps)
- * supported for long GI
- */
- u16 max_rate_lgi_80MHZ[8][3] = {
- {0x124, 0x15F, 0x186}, /* NSS = 1 */
- {0x249, 0x2BE, 0x30C}, /* NSS = 2 */
- {0x36D, 0x41D, 0x492}, /* NSS = 3 */
- {0x492, 0x57C, 0x618}, /* NSS = 4 */
- {0x5B6, 0x6DB, 0x79E}, /* NSS = 5 */
- {0x6DB, 0x83A, 0x0}, /* NSS = 6 */
- {0x7FF, 0x999, 0xAAA}, /* NSS = 7 */
- {0x924, 0xAF8, 0xC30} /* NSS = 8 */
- };
- u16 max_rate_lgi_160MHZ[8][3] = {
- {0x249, 0x2BE, 0x30C}, /* NSS = 1 */
- {0x492, 0x57C, 0x618}, /* NSS = 2 */
- {0x6DB, 0x83A, 0x0}, /* NSS = 3 */
- {0x924, 0xAF8, 0xC30}, /* NSS = 4 */
- {0xB6D, 0xDB6, 0xF3C}, /* NSS = 5 */
- {0xDB6, 0x1074, 0x1248}, /* NSS = 6 */
- {0xFFF, 0x1332, 0x1554}, /* NSS = 7 */
- {0x1248, 0x15F0, 0x1860} /* NSS = 8 */
- };
if (bands & BAND_AAC)
usr_vht_cap_info = adapter->usr_dot_11ac_dev_cap_a;
@@ -64,29 +66,29 @@ mwifiex_convert_mcsmap_to_maxrate(struct mwifiex_private *priv,
usr_vht_cap_info = adapter->usr_dot_11ac_dev_cap_bg;
/* find the max NSS supported */
- nss = 0;
- for (i = 0; i < 8; i++) {
- max_mcs = (mcs_map >> (2 * i)) & 0x3;
- if (max_mcs < 3)
+ nss = 1;
+ for (i = 1; i <= 8; i++) {
+ mcs = GET_VHTNSSMCS(mcs_map, i);
+ if (mcs < IEEE80211_VHT_MCS_NOT_SUPPORTED)
nss = i;
}
- max_mcs = (mcs_map >> (2 * nss)) & 0x3;
+ mcs = GET_VHTNSSMCS(mcs_map, nss);
- /* if max_mcs is 3, nss must be 0 (SS = 1). Thus, max mcs is MCS 9 */
- if (max_mcs >= 3)
- max_mcs = 2;
+ /* if mcs is 3, nss must be 1 (NSS = 1). Default mcs to MCS 0~9 */
+ if (mcs == IEEE80211_VHT_MCS_NOT_SUPPORTED)
+ mcs = IEEE80211_VHT_MCS_SUPPORT_0_9;
if (GET_VHTCAP_CHWDSET(usr_vht_cap_info)) {
/* support 160 MHz */
- max_rate = max_rate_lgi_160MHZ[nss][max_mcs];
+ max_rate = max_rate_lgi_160MHZ[nss - 1][mcs];
if (!max_rate)
/* MCS9 is not supported in NSS6 */
- max_rate = max_rate_lgi_160MHZ[nss][max_mcs - 1];
+ max_rate = max_rate_lgi_160MHZ[nss - 1][mcs - 1];
} else {
- max_rate = max_rate_lgi_80MHZ[nss][max_mcs];
+ max_rate = max_rate_lgi_80MHZ[nss - 1][mcs];
if (!max_rate)
/* MCS9 is not supported in NSS3 */
- max_rate = max_rate_lgi_80MHZ[nss][max_mcs - 1];
+ max_rate = max_rate_lgi_80MHZ[nss - 1][mcs - 1];
}
return max_rate;
@@ -94,21 +96,20 @@ mwifiex_convert_mcsmap_to_maxrate(struct mwifiex_private *priv,
static void
mwifiex_fill_vht_cap_info(struct mwifiex_private *priv,
- struct mwifiex_ie_types_vhtcap *vht_cap, u8 bands)
+ struct ieee80211_vht_cap *vht_cap, u8 bands)
{
struct mwifiex_adapter *adapter = priv->adapter;
if (bands & BAND_A)
- vht_cap->vht_cap.vht_cap_info =
+ vht_cap->vht_cap_info =
cpu_to_le32(adapter->usr_dot_11ac_dev_cap_a);
else
- vht_cap->vht_cap.vht_cap_info =
+ vht_cap->vht_cap_info =
cpu_to_le32(adapter->usr_dot_11ac_dev_cap_bg);
}
-static void
-mwifiex_fill_vht_cap_tlv(struct mwifiex_private *priv,
- struct mwifiex_ie_types_vhtcap *vht_cap, u8 bands)
+void mwifiex_fill_vht_cap_tlv(struct mwifiex_private *priv,
+ struct ieee80211_vht_cap *vht_cap, u8 bands)
{
struct mwifiex_adapter *adapter = priv->adapter;
u16 mcs_map_user, mcs_map_resp, mcs_map_result;
@@ -119,46 +120,48 @@ mwifiex_fill_vht_cap_tlv(struct mwifiex_private *priv,
/* rx MCS Set: find the minimum of the user rx mcs and ap rx mcs */
mcs_map_user = GET_DEVRXMCSMAP(adapter->usr_dot_11ac_mcs_support);
- mcs_map_resp = le16_to_cpu(vht_cap->vht_cap.supp_mcs.rx_mcs_map);
+ mcs_map_resp = le16_to_cpu(vht_cap->supp_mcs.rx_mcs_map);
mcs_map_result = 0;
for (nss = 1; nss <= 8; nss++) {
mcs_user = GET_VHTNSSMCS(mcs_map_user, nss);
mcs_resp = GET_VHTNSSMCS(mcs_map_resp, nss);
- if ((mcs_user == NO_NSS_SUPPORT) ||
- (mcs_resp == NO_NSS_SUPPORT))
- SET_VHTNSSMCS(mcs_map_result, nss, NO_NSS_SUPPORT);
+ if ((mcs_user == IEEE80211_VHT_MCS_NOT_SUPPORTED) ||
+ (mcs_resp == IEEE80211_VHT_MCS_NOT_SUPPORTED))
+ SET_VHTNSSMCS(mcs_map_result, nss,
+ IEEE80211_VHT_MCS_NOT_SUPPORTED);
else
SET_VHTNSSMCS(mcs_map_result, nss,
min(mcs_user, mcs_resp));
}
- vht_cap->vht_cap.supp_mcs.rx_mcs_map = cpu_to_le16(mcs_map_result);
+ vht_cap->supp_mcs.rx_mcs_map = cpu_to_le16(mcs_map_result);
tmp = mwifiex_convert_mcsmap_to_maxrate(priv, bands, mcs_map_result);
- vht_cap->vht_cap.supp_mcs.rx_highest = cpu_to_le16(tmp);
+ vht_cap->supp_mcs.rx_highest = cpu_to_le16(tmp);
/* tx MCS Set: find the minimum of the user tx mcs and ap tx mcs */
mcs_map_user = GET_DEVTXMCSMAP(adapter->usr_dot_11ac_mcs_support);
- mcs_map_resp = le16_to_cpu(vht_cap->vht_cap.supp_mcs.tx_mcs_map);
+ mcs_map_resp = le16_to_cpu(vht_cap->supp_mcs.tx_mcs_map);
mcs_map_result = 0;
for (nss = 1; nss <= 8; nss++) {
mcs_user = GET_VHTNSSMCS(mcs_map_user, nss);
mcs_resp = GET_VHTNSSMCS(mcs_map_resp, nss);
- if ((mcs_user == NO_NSS_SUPPORT) ||
- (mcs_resp == NO_NSS_SUPPORT))
- SET_VHTNSSMCS(mcs_map_result, nss, NO_NSS_SUPPORT);
+ if ((mcs_user == IEEE80211_VHT_MCS_NOT_SUPPORTED) ||
+ (mcs_resp == IEEE80211_VHT_MCS_NOT_SUPPORTED))
+ SET_VHTNSSMCS(mcs_map_result, nss,
+ IEEE80211_VHT_MCS_NOT_SUPPORTED);
else
SET_VHTNSSMCS(mcs_map_result, nss,
min(mcs_user, mcs_resp));
}
- vht_cap->vht_cap.supp_mcs.tx_mcs_map = cpu_to_le16(mcs_map_result);
+ vht_cap->supp_mcs.tx_mcs_map = cpu_to_le16(mcs_map_result);
tmp = mwifiex_convert_mcsmap_to_maxrate(priv, bands, mcs_map_result);
- vht_cap->vht_cap.supp_mcs.tx_highest = cpu_to_le16(tmp);
+ vht_cap->supp_mcs.tx_highest = cpu_to_le16(tmp);
return;
}
@@ -193,7 +196,8 @@ int mwifiex_cmd_append_11ac_tlv(struct mwifiex_private *priv,
sizeof(struct ieee_types_header),
le16_to_cpu(vht_cap->header.len));
- mwifiex_fill_vht_cap_tlv(priv, vht_cap, bss_desc->bss_band);
+ mwifiex_fill_vht_cap_tlv(priv, &vht_cap->vht_cap,
+ bss_desc->bss_band);
*buffer += sizeof(*vht_cap);
ret_len += sizeof(*vht_cap);
}
@@ -300,3 +304,81 @@ void mwifiex_set_11ac_ba_params(struct mwifiex_private *priv)
return;
}
+
+bool mwifiex_is_bss_in_11ac_mode(struct mwifiex_private *priv)
+{
+ struct mwifiex_bssdescriptor *bss_desc;
+ struct ieee80211_vht_operation *vht_oper;
+
+ bss_desc = &priv->curr_bss_params.bss_descriptor;
+ vht_oper = bss_desc->bcn_vht_oper;
+
+ if (!bss_desc->bcn_vht_cap || !vht_oper)
+ return false;
+
+ if (vht_oper->chan_width == IEEE80211_VHT_CHANWIDTH_USE_HT)
+ return false;
+
+ return true;
+}
+
+u8 mwifiex_get_center_freq_index(struct mwifiex_private *priv, u8 band,
+ u32 pri_chan, u8 chan_bw)
+{
+ u8 center_freq_idx = 0;
+
+ if (band & BAND_AAC) {
+ switch (pri_chan) {
+ case 36:
+ case 40:
+ case 44:
+ case 48:
+ if (chan_bw == IEEE80211_VHT_CHANWIDTH_80MHZ)
+ center_freq_idx = 42;
+ break;
+ case 52:
+ case 56:
+ case 60:
+ case 64:
+ if (chan_bw == IEEE80211_VHT_CHANWIDTH_80MHZ)
+ center_freq_idx = 58;
+ else if (chan_bw == IEEE80211_VHT_CHANWIDTH_160MHZ)
+ center_freq_idx = 50;
+ break;
+ case 100:
+ case 104:
+ case 108:
+ case 112:
+ if (chan_bw == IEEE80211_VHT_CHANWIDTH_80MHZ)
+ center_freq_idx = 106;
+ break;
+ case 116:
+ case 120:
+ case 124:
+ case 128:
+ if (chan_bw == IEEE80211_VHT_CHANWIDTH_80MHZ)
+ center_freq_idx = 122;
+ else if (chan_bw == IEEE80211_VHT_CHANWIDTH_160MHZ)
+ center_freq_idx = 114;
+ break;
+ case 132:
+ case 136:
+ case 140:
+ case 144:
+ if (chan_bw == IEEE80211_VHT_CHANWIDTH_80MHZ)
+ center_freq_idx = 138;
+ break;
+ case 149:
+ case 153:
+ case 157:
+ case 161:
+ if (chan_bw == IEEE80211_VHT_CHANWIDTH_80MHZ)
+ center_freq_idx = 155;
+ break;
+ default:
+ center_freq_idx = 42;
+ }
+ }
+
+ return center_freq_idx;
+}
diff --git a/drivers/net/wireless/mwifiex/11ac.h b/drivers/net/wireless/mwifiex/11ac.h
index 7c2c69b..0b02cb6 100644
--- a/drivers/net/wireless/mwifiex/11ac.h
+++ b/drivers/net/wireless/mwifiex/11ac.h
@@ -40,4 +40,6 @@ int mwifiex_cmd_append_11ac_tlv(struct mwifiex_private *priv,
int mwifiex_cmd_11ac_cfg(struct mwifiex_private *priv,
struct host_cmd_ds_command *cmd, u16 cmd_action,
struct mwifiex_11ac_vht_cfg *cfg);
+void mwifiex_fill_vht_cap_tlv(struct mwifiex_private *priv,
+ struct ieee80211_vht_cap *vht_cap, u8 bands);
#endif /* _MWIFIEX_11AC_H_ */
diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c
index 6261f8c..37677af 100644
--- a/drivers/net/wireless/mwifiex/11n.c
+++ b/drivers/net/wireless/mwifiex/11n.c
@@ -34,22 +34,26 @@
*
* RD responder bit to set to clear in the extended capability header.
*/
-void
-mwifiex_fill_cap_info(struct mwifiex_private *priv, u8 radio_type,
- struct mwifiex_ie_types_htcap *ht_cap)
+int mwifiex_fill_cap_info(struct mwifiex_private *priv, u8 radio_type,
+ struct ieee80211_ht_cap *ht_cap)
{
- uint16_t ht_ext_cap = le16_to_cpu(ht_cap->ht_cap.extended_ht_cap_info);
+ uint16_t ht_ext_cap = le16_to_cpu(ht_cap->extended_ht_cap_info);
struct ieee80211_supported_band *sband =
priv->wdev->wiphy->bands[radio_type];
- ht_cap->ht_cap.ampdu_params_info =
+ if (WARN_ON_ONCE(!sband)) {
+ dev_err(priv->adapter->dev, "Invalid radio type!\n");
+ return -EINVAL;
+ }
+
+ ht_cap->ampdu_params_info =
(sband->ht_cap.ampdu_factor &
IEEE80211_HT_AMPDU_PARM_FACTOR) |
((sband->ht_cap.ampdu_density <<
IEEE80211_HT_AMPDU_PARM_DENSITY_SHIFT) &
IEEE80211_HT_AMPDU_PARM_DENSITY);
- memcpy((u8 *) &ht_cap->ht_cap.mcs, &sband->ht_cap.mcs,
+ memcpy((u8 *)&ht_cap->mcs, &sband->ht_cap.mcs,
sizeof(sband->ht_cap.mcs));
if (priv->bss_mode == NL80211_IFTYPE_STATION ||
@@ -57,13 +61,18 @@ mwifiex_fill_cap_info(struct mwifiex_private *priv, u8 radio_type,
(priv->adapter->sec_chan_offset !=
IEEE80211_HT_PARAM_CHA_SEC_NONE)))
/* Set MCS32 for infra mode or ad-hoc mode with 40MHz support */
- SETHT_MCS32(ht_cap->ht_cap.mcs.rx_mask);
+ SETHT_MCS32(ht_cap->mcs.rx_mask);
/* Clear RD responder bit */
ht_ext_cap &= ~IEEE80211_HT_EXT_CAP_RD_RESPONDER;
- ht_cap->ht_cap.cap_info = cpu_to_le16(sband->ht_cap.cap);
- ht_cap->ht_cap.extended_ht_cap_info = cpu_to_le16(ht_ext_cap);
+ ht_cap->cap_info = cpu_to_le16(sband->ht_cap.cap);
+ ht_cap->extended_ht_cap_info = cpu_to_le16(ht_ext_cap);
+
+ if (ISSUPP_BEAMFORMING(priv->adapter->hw_dot_11n_dev_cap))
+ ht_cap->tx_BF_cap_info = cpu_to_le32(MWIFIEX_DEF_11N_TX_BF_CAP);
+
+ return 0;
}
/*
@@ -312,7 +321,7 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
sizeof(struct ieee_types_header),
le16_to_cpu(ht_cap->header.len));
- mwifiex_fill_cap_info(priv, radio_type, ht_cap);
+ mwifiex_fill_cap_info(priv, radio_type, &ht_cap->ht_cap);
*buffer += sizeof(struct mwifiex_ie_types_htcap);
ret_len += sizeof(struct mwifiex_ie_types_htcap);
@@ -528,16 +537,32 @@ void mwifiex_create_ba_tbl(struct mwifiex_private *priv, u8 *ra, int tid,
int mwifiex_send_addba(struct mwifiex_private *priv, int tid, u8 *peer_mac)
{
struct host_cmd_ds_11n_addba_req add_ba_req;
+ struct mwifiex_sta_node *sta_ptr;
+ u32 tx_win_size = priv->add_ba_param.tx_win_size;
static u8 dialog_tok;
int ret;
dev_dbg(priv->adapter->dev, "cmd: %s: tid %d\n", __func__, tid);
+ if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
+ ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
+ priv->adapter->is_hw_11ac_capable &&
+ memcmp(priv->cfg_bssid, peer_mac, ETH_ALEN)) {
+ sta_ptr = mwifiex_get_sta_entry(priv, peer_mac);
+ if (!sta_ptr) {
+ dev_warn(priv->adapter->dev,
+ "BA setup with unknown TDLS peer %pM!\n",
+ peer_mac);
+ return -1;
+ }
+ if (sta_ptr->is_11ac_enabled)
+ tx_win_size = MWIFIEX_11AC_STA_AMPDU_DEF_TXWINSIZE;
+ }
+
add_ba_req.block_ack_param_set = cpu_to_le16(
(u16) ((tid << BLOCKACKPARAM_TID_POS) |
- (priv->add_ba_param.
- tx_win_size << BLOCKACKPARAM_WINSIZE_POS) |
- IMMEDIATE_BLOCK_ACK));
+ tx_win_size << BLOCKACKPARAM_WINSIZE_POS |
+ IMMEDIATE_BLOCK_ACK));
add_ba_req.block_ack_tmo = cpu_to_le16((u16)priv->add_ba_param.timeout);
++dialog_tok;
diff --git a/drivers/net/wireless/mwifiex/11n.h b/drivers/net/wireless/mwifiex/11n.h
index 375db01..12bb6ac 100644
--- a/drivers/net/wireless/mwifiex/11n.h
+++ b/drivers/net/wireless/mwifiex/11n.h
@@ -34,8 +34,8 @@ int mwifiex_cmd_11n_cfg(struct mwifiex_private *priv,
int mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
struct mwifiex_bssdescriptor *bss_desc,
u8 **buffer);
-void mwifiex_fill_cap_info(struct mwifiex_private *, u8 radio_type,
- struct mwifiex_ie_types_htcap *);
+int mwifiex_fill_cap_info(struct mwifiex_private *, u8 radio_type,
+ struct ieee80211_ht_cap *);
int mwifiex_set_get_11n_htcap_cfg(struct mwifiex_private *priv,
u16 action, int *htcap_cfg);
void mwifiex_11n_delete_tx_ba_stream_tbl_entry(struct mwifiex_private *priv,
@@ -64,14 +64,32 @@ int mwifiex_cmd_amsdu_aggr_ctrl(struct host_cmd_ds_command *cmd,
struct mwifiex_ds_11n_amsdu_aggr_ctrl *aa_ctrl);
void mwifiex_del_tx_ba_stream_tbl_by_ra(struct mwifiex_private *priv, u8 *ra);
-/*
- * This function checks whether AMPDU is allowed or not for a particular TID.
- */
static inline u8
-mwifiex_is_ampdu_allowed(struct mwifiex_private *priv, int tid)
+mwifiex_is_station_ampdu_allowed(struct mwifiex_private *priv,
+ struct mwifiex_ra_list_tbl *ptr, int tid)
{
- return ((priv->aggr_prio_tbl[tid].ampdu_ap != BA_STREAM_NOT_ALLOWED)
- ? true : false);
+ struct mwifiex_sta_node *node = mwifiex_get_sta_entry(priv, ptr->ra);
+
+ if (unlikely(!node))
+ return false;
+
+ return (node->ampdu_sta[tid] != BA_STREAM_NOT_ALLOWED) ? true : false;
+}
+
+/* This function checks whether AMPDU is allowed or not for a particular TID. */
+static inline u8
+mwifiex_is_ampdu_allowed(struct mwifiex_private *priv,
+ struct mwifiex_ra_list_tbl *ptr, int tid)
+{
+ if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) {
+ return mwifiex_is_station_ampdu_allowed(priv, ptr, tid);
+ } else {
+ if (ptr->tdls_link)
+ return mwifiex_is_station_ampdu_allowed(priv, ptr, tid);
+
+ return (priv->aggr_prio_tbl[tid].ampdu_ap !=
+ BA_STREAM_NOT_ALLOWED) ? true : false;
+ }
}
/*
@@ -165,4 +183,14 @@ static inline int mwifiex_is_sta_11n_enabled(struct mwifiex_private *priv,
return node->is_11n_enabled;
}
+
+static inline u8
+mwifiex_tdls_peer_11n_enabled(struct mwifiex_private *priv, u8 *ra)
+{
+ struct mwifiex_sta_node *node = mwifiex_get_sta_entry(priv, ra);
+ if (node)
+ return node->is_11n_enabled;
+
+ return false;
+}
#endif /* !_MWIFIEX_11N_H_ */
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.c b/drivers/net/wireless/mwifiex/11n_rxreorder.c
index ada809f..b361257 100644
--- a/drivers/net/wireless/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.c
@@ -290,7 +290,11 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
last_seq = node->rx_seq[tid];
}
} else {
- last_seq = priv->rx_seq[tid];
+ node = mwifiex_get_sta_entry(priv, ta);
+ if (node)
+ last_seq = node->rx_seq[tid];
+ else
+ last_seq = priv->rx_seq[tid];
}
if (last_seq != MWIFIEX_DEF_11N_RX_SEQ_NUM &&
@@ -358,10 +362,28 @@ int mwifiex_cmd_11n_addba_rsp_gen(struct mwifiex_private *priv,
*cmd_addba_req)
{
struct host_cmd_ds_11n_addba_rsp *add_ba_rsp = &cmd->params.add_ba_rsp;
+ struct mwifiex_sta_node *sta_ptr;
+ u32 rx_win_size = priv->add_ba_param.rx_win_size;
u8 tid;
int win_size;
uint16_t block_ack_param_set;
+ if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
+ ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
+ priv->adapter->is_hw_11ac_capable &&
+ memcmp(priv->cfg_bssid, cmd_addba_req->peer_mac_addr, ETH_ALEN)) {
+ sta_ptr = mwifiex_get_sta_entry(priv,
+ cmd_addba_req->peer_mac_addr);
+ if (!sta_ptr) {
+ dev_warn(priv->adapter->dev,
+ "BA setup with unknown TDLS peer %pM!\n",
+ cmd_addba_req->peer_mac_addr);
+ return -1;
+ }
+ if (sta_ptr->is_11ac_enabled)
+ rx_win_size = MWIFIEX_11AC_STA_AMPDU_DEF_RXWINSIZE;
+ }
+
cmd->command = cpu_to_le16(HostCmd_CMD_11N_ADDBA_RSP);
cmd->size = cpu_to_le16(sizeof(*add_ba_rsp) + S_DS_GEN);
@@ -378,8 +400,7 @@ int mwifiex_cmd_11n_addba_rsp_gen(struct mwifiex_private *priv,
block_ack_param_set &= ~IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK;
/* We donot support AMSDU inside AMPDU, hence reset the bit */
block_ack_param_set &= ~BLOCKACKPARAM_AMSDU_SUPP_MASK;
- block_ack_param_set |= (priv->add_ba_param.rx_win_size <<
- BLOCKACKPARAM_WINSIZE_POS);
+ block_ack_param_set |= rx_win_size << BLOCKACKPARAM_WINSIZE_POS;
add_ba_rsp->block_ack_param_set = cpu_to_le16(block_ack_param_set);
win_size = (le16_to_cpu(add_ba_rsp->block_ack_param_set)
& IEEE80211_ADDBA_PARAM_BUF_SIZE_MASK)
diff --git a/drivers/net/wireless/mwifiex/Makefile b/drivers/net/wireless/mwifiex/Makefile
index a42a506..2aa208f 100644
--- a/drivers/net/wireless/mwifiex/Makefile
+++ b/drivers/net/wireless/mwifiex/Makefile
@@ -41,6 +41,7 @@ mwifiex-y += uap_txrx.o
mwifiex-y += cfg80211.o
mwifiex-y += ethtool.o
mwifiex-y += 11h.o
+mwifiex-y += tdls.o
mwifiex-$(CONFIG_DEBUG_FS) += debugfs.o
obj-$(CONFIG_MWIFIEX) += mwifiex.o
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index 8bfc07c..436ba43 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -1416,9 +1416,6 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
if (params->chandef.width > NL80211_CHAN_WIDTH_20_NOHT)
config_bands |= BAND_GN;
-
- if (params->chandef.width > NL80211_CHAN_WIDTH_40)
- config_bands |= BAND_GAC;
} else {
bss_cfg->band_cfg = BAND_CONFIG_A;
config_bands = BAND_A;
@@ -1583,8 +1580,9 @@ static int mwifiex_cfg80211_inform_ibss_bss(struct mwifiex_private *priv)
* the function notifies the CFG802.11 subsystem of the new BSS connection.
*/
static int
-mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid,
- u8 *bssid, int mode, struct ieee80211_channel *channel,
+mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len,
+ const u8 *ssid, const u8 *bssid, int mode,
+ struct ieee80211_channel *channel,
struct cfg80211_connect_params *sme, bool privacy)
{
struct cfg80211_ssid req_ssid;
@@ -1881,7 +1879,8 @@ mwifiex_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
params->privacy);
done:
if (!ret) {
- cfg80211_ibss_joined(priv->netdev, priv->cfg_bssid, GFP_KERNEL);
+ cfg80211_ibss_joined(priv->netdev, priv->cfg_bssid,
+ params->chandef.chan, GFP_KERNEL);
dev_dbg(priv->adapter->dev,
"info: joined/created adhoc network with bssid"
" %pM successfully\n", priv->cfg_bssid);
@@ -2595,6 +2594,170 @@ static int mwifiex_cfg80211_set_coalesce(struct wiphy *wiphy,
HostCmd_ACT_GEN_SET, 0, &coalesce_cfg);
}
+/* cfg80211 ops handler for tdls_mgmt.
+ * Function prepares TDLS action frame packets and forwards them to FW
+ */
+static int
+mwifiex_cfg80211_tdls_mgmt(struct wiphy *wiphy, struct net_device *dev,
+ u8 *peer, u8 action_code, u8 dialog_token,
+ u16 status_code, const u8 *extra_ies,
+ size_t extra_ies_len)
+{
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+ int ret;
+
+ if (!(wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS))
+ return -ENOTSUPP;
+
+ /* make sure we are in station mode and connected */
+ if (!(priv->bss_type == MWIFIEX_BSS_TYPE_STA && priv->media_connected))
+ return -ENOTSUPP;
+
+ switch (action_code) {
+ case WLAN_TDLS_SETUP_REQUEST:
+ dev_dbg(priv->adapter->dev,
+ "Send TDLS Setup Request to %pM status_code=%d\n", peer,
+ status_code);
+ ret = mwifiex_send_tdls_data_frame(priv, peer, action_code,
+ dialog_token, status_code,
+ extra_ies, extra_ies_len);
+ break;
+ case WLAN_TDLS_SETUP_RESPONSE:
+ dev_dbg(priv->adapter->dev,
+ "Send TDLS Setup Response to %pM status_code=%d\n",
+ peer, status_code);
+ ret = mwifiex_send_tdls_data_frame(priv, peer, action_code,
+ dialog_token, status_code,
+ extra_ies, extra_ies_len);
+ break;
+ case WLAN_TDLS_SETUP_CONFIRM:
+ dev_dbg(priv->adapter->dev,
+ "Send TDLS Confirm to %pM status_code=%d\n", peer,
+ status_code);
+ ret = mwifiex_send_tdls_data_frame(priv, peer, action_code,
+ dialog_token, status_code,
+ extra_ies, extra_ies_len);
+ break;
+ case WLAN_TDLS_TEARDOWN:
+ dev_dbg(priv->adapter->dev, "Send TDLS Tear down to %pM\n",
+ peer);
+ ret = mwifiex_send_tdls_data_frame(priv, peer, action_code,
+ dialog_token, status_code,
+ extra_ies, extra_ies_len);
+ break;
+ case WLAN_TDLS_DISCOVERY_REQUEST:
+ dev_dbg(priv->adapter->dev,
+ "Send TDLS Discovery Request to %pM\n", peer);
+ ret = mwifiex_send_tdls_data_frame(priv, peer, action_code,
+ dialog_token, status_code,
+ extra_ies, extra_ies_len);
+ break;
+ case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
+ dev_dbg(priv->adapter->dev,
+ "Send TDLS Discovery Response to %pM\n", peer);
+ ret = mwifiex_send_tdls_action_frame(priv, peer, action_code,
+ dialog_token, status_code,
+ extra_ies, extra_ies_len);
+ break;
+ default:
+ dev_warn(priv->adapter->dev,
+ "Unknown TDLS mgmt/action frame %pM\n", peer);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int
+mwifiex_cfg80211_tdls_oper(struct wiphy *wiphy, struct net_device *dev,
+ u8 *peer, enum nl80211_tdls_operation action)
+{
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+
+ if (!(wiphy->flags & WIPHY_FLAG_SUPPORTS_TDLS) ||
+ !(wiphy->flags & WIPHY_FLAG_TDLS_EXTERNAL_SETUP))
+ return -ENOTSUPP;
+
+ /* make sure we are in station mode and connected */
+ if (!(priv->bss_type == MWIFIEX_BSS_TYPE_STA && priv->media_connected))
+ return -ENOTSUPP;
+
+ dev_dbg(priv->adapter->dev,
+ "TDLS peer=%pM, oper=%d\n", peer, action);
+
+ switch (action) {
+ case NL80211_TDLS_ENABLE_LINK:
+ action = MWIFIEX_TDLS_ENABLE_LINK;
+ break;
+ case NL80211_TDLS_DISABLE_LINK:
+ action = MWIFIEX_TDLS_DISABLE_LINK;
+ break;
+ case NL80211_TDLS_TEARDOWN:
+ /* shouldn't happen!*/
+ dev_warn(priv->adapter->dev,
+ "tdls_oper: teardown from driver not supported\n");
+ return -EINVAL;
+ case NL80211_TDLS_SETUP:
+ /* shouldn't happen!*/
+ dev_warn(priv->adapter->dev,
+ "tdls_oper: setup from driver not supported\n");
+ return -EINVAL;
+ case NL80211_TDLS_DISCOVERY_REQ:
+ /* shouldn't happen!*/
+ dev_warn(priv->adapter->dev,
+ "tdls_oper: discovery from driver not supported\n");
+ return -EINVAL;
+ default:
+ dev_err(priv->adapter->dev,
+ "tdls_oper: operation not supported\n");
+ return -ENOTSUPP;
+ }
+
+ return mwifiex_tdls_oper(priv, peer, action);
+}
+
+static int
+mwifiex_cfg80211_add_station(struct wiphy *wiphy,
+ struct net_device *dev,
+ u8 *mac, struct station_parameters *params)
+{
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+
+ if (!(params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)))
+ return -ENOTSUPP;
+
+ /* make sure we are in station mode and connected */
+ if ((priv->bss_type != MWIFIEX_BSS_TYPE_STA) || !priv->media_connected)
+ return -ENOTSUPP;
+
+ return mwifiex_tdls_oper(priv, mac, MWIFIEX_TDLS_CREATE_LINK);
+}
+
+static int
+mwifiex_cfg80211_change_station(struct wiphy *wiphy,
+ struct net_device *dev,
+ u8 *mac, struct station_parameters *params)
+{
+ int ret;
+ struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev);
+
+ /* we support change_station handler only for TDLS peers*/
+ if (!(params->sta_flags_set & BIT(NL80211_STA_FLAG_TDLS_PEER)))
+ return -ENOTSUPP;
+
+ /* make sure we are in station mode and connected */
+ if ((priv->bss_type != MWIFIEX_BSS_TYPE_STA) || !priv->media_connected)
+ return -ENOTSUPP;
+
+ priv->sta_params = params;
+
+ ret = mwifiex_tdls_oper(priv, mac, MWIFIEX_TDLS_CONFIG_LINK);
+ priv->sta_params = NULL;
+
+ return ret;
+}
+
/* station cfg80211 operations */
static struct cfg80211_ops mwifiex_cfg80211_ops = {
.add_virtual_intf = mwifiex_add_virtual_intf,
@@ -2630,6 +2793,10 @@ static struct cfg80211_ops mwifiex_cfg80211_ops = {
.set_wakeup = mwifiex_cfg80211_set_wakeup,
#endif
.set_coalesce = mwifiex_cfg80211_set_coalesce,
+ .tdls_mgmt = mwifiex_cfg80211_tdls_mgmt,
+ .tdls_oper = mwifiex_cfg80211_tdls_oper,
+ .add_station = mwifiex_cfg80211_add_station,
+ .change_station = mwifiex_cfg80211_change_station,
};
#ifdef CONFIG_PM
@@ -2715,6 +2882,11 @@ int mwifiex_register_cfg80211(struct mwifiex_adapter *adapter)
WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD |
WIPHY_FLAG_AP_UAPSD |
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
+
+ if (ISSUPP_TDLS_ENABLED(adapter->fw_cap_info))
+ wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS |
+ WIPHY_FLAG_TDLS_EXTERNAL_SETUP;
+
wiphy->regulatory_flags |=
REGULATORY_CUSTOM_REG |
REGULATORY_STRICT_REG;
diff --git a/drivers/net/wireless/mwifiex/cfp.c b/drivers/net/wireless/mwifiex/cfp.c
index 9eefacb..2c3226b 100644
--- a/drivers/net/wireless/mwifiex/cfp.c
+++ b/drivers/net/wireless/mwifiex/cfp.c
@@ -71,6 +71,95 @@ u16 region_code_index[MWIFIEX_MAX_REGION_CODE] = { 0x10, 0x20, 0x30,
static u8 supported_rates_n[N_SUPPORTED_RATES] = { 0x02, 0x04, 0 };
+/* For every mcs_rate line, the first 8 bytes are for stream 1x1,
+ * and all 16 bytes are for stream 2x2.
+ */
+static const u16 mcs_rate[4][16] = {
+ /* LGI 40M */
+ { 0x1b, 0x36, 0x51, 0x6c, 0xa2, 0xd8, 0xf3, 0x10e,
+ 0x36, 0x6c, 0xa2, 0xd8, 0x144, 0x1b0, 0x1e6, 0x21c },
+
+ /* SGI 40M */
+ { 0x1e, 0x3c, 0x5a, 0x78, 0xb4, 0xf0, 0x10e, 0x12c,
+ 0x3c, 0x78, 0xb4, 0xf0, 0x168, 0x1e0, 0x21c, 0x258 },
+
+ /* LGI 20M */
+ { 0x0d, 0x1a, 0x27, 0x34, 0x4e, 0x68, 0x75, 0x82,
+ 0x1a, 0x34, 0x4e, 0x68, 0x9c, 0xd0, 0xea, 0x104 },
+
+ /* SGI 20M */
+ { 0x0e, 0x1c, 0x2b, 0x39, 0x56, 0x73, 0x82, 0x90,
+ 0x1c, 0x39, 0x56, 0x73, 0xad, 0xe7, 0x104, 0x120 }
+};
+
+/* AC rates */
+static const u16 ac_mcs_rate_nss1[8][10] = {
+ /* LG 160M */
+ { 0x75, 0xEA, 0x15F, 0x1D4, 0x2BE, 0x3A8, 0x41D,
+ 0x492, 0x57C, 0x618 },
+
+ /* SG 160M */
+ { 0x82, 0x104, 0x186, 0x208, 0x30C, 0x410, 0x492,
+ 0x514, 0x618, 0x6C6 },
+
+ /* LG 80M */
+ { 0x3B, 0x75, 0xB0, 0xEA, 0x15F, 0x1D4, 0x20F,
+ 0x249, 0x2BE, 0x30C },
+
+ /* SG 80M */
+ { 0x41, 0x82, 0xC3, 0x104, 0x186, 0x208, 0x249,
+ 0x28A, 0x30C, 0x363 },
+
+ /* LG 40M */
+ { 0x1B, 0x36, 0x51, 0x6C, 0xA2, 0xD8, 0xF3,
+ 0x10E, 0x144, 0x168 },
+
+ /* SG 40M */
+ { 0x1E, 0x3C, 0x5A, 0x78, 0xB4, 0xF0, 0x10E,
+ 0x12C, 0x168, 0x190 },
+
+ /* LG 20M */
+ { 0xD, 0x1A, 0x27, 0x34, 0x4E, 0x68, 0x75, 0x82, 0x9C, 0x00 },
+
+ /* SG 20M */
+ { 0xF, 0x1D, 0x2C, 0x3A, 0x57, 0x74, 0x82, 0x91, 0xAE, 0x00 },
+};
+
+/* NSS2 note: the value in the table is 2 multiplier of the actual rate */
+static const u16 ac_mcs_rate_nss2[8][10] = {
+ /* LG 160M */
+ { 0xEA, 0x1D4, 0x2BE, 0x3A8, 0x57C, 0x750, 0x83A,
+ 0x924, 0xAF8, 0xC30 },
+
+ /* SG 160M */
+ { 0x104, 0x208, 0x30C, 0x410, 0x618, 0x820, 0x924,
+ 0xA28, 0xC30, 0xD8B },
+
+ /* LG 80M */
+ { 0x75, 0xEA, 0x15F, 0x1D4, 0x2BE, 0x3A8, 0x41D,
+ 0x492, 0x57C, 0x618 },
+
+ /* SG 80M */
+ { 0x82, 0x104, 0x186, 0x208, 0x30C, 0x410, 0x492,
+ 0x514, 0x618, 0x6C6 },
+
+ /* LG 40M */
+ { 0x36, 0x6C, 0xA2, 0xD8, 0x144, 0x1B0, 0x1E6,
+ 0x21C, 0x288, 0x2D0 },
+
+ /* SG 40M */
+ { 0x3C, 0x78, 0xB4, 0xF0, 0x168, 0x1E0, 0x21C,
+ 0x258, 0x2D0, 0x320 },
+
+ /* LG 20M */
+ { 0x1A, 0x34, 0x4A, 0x68, 0x9C, 0xD0, 0xEA, 0x104,
+ 0x138, 0x00 },
+
+ /* SG 20M */
+ { 0x1D, 0x3A, 0x57, 0x74, 0xAE, 0xE6, 0x104, 0x121,
+ 0x15B, 0x00 },
+};
+
struct region_code_mapping {
u8 code;
u8 region[IEEE80211_COUNTRY_STRING_LEN];
@@ -109,95 +198,6 @@ u8 *mwifiex_11d_code_2_region(u8 code)
u32 mwifiex_index_to_acs_data_rate(struct mwifiex_private *priv,
u8 index, u8 ht_info)
{
- /*
- * For every mcs_rate line, the first 8 bytes are for stream 1x1,
- * and all 16 bytes are for stream 2x2.
- */
- u16 mcs_rate[4][16] = {
- /* LGI 40M */
- { 0x1b, 0x36, 0x51, 0x6c, 0xa2, 0xd8, 0xf3, 0x10e,
- 0x36, 0x6c, 0xa2, 0xd8, 0x144, 0x1b0, 0x1e6, 0x21c },
-
- /* SGI 40M */
- { 0x1e, 0x3c, 0x5a, 0x78, 0xb4, 0xf0, 0x10e, 0x12c,
- 0x3c, 0x78, 0xb4, 0xf0, 0x168, 0x1e0, 0x21c, 0x258 },
-
- /* LGI 20M */
- { 0x0d, 0x1a, 0x27, 0x34, 0x4e, 0x68, 0x75, 0x82,
- 0x1a, 0x34, 0x4e, 0x68, 0x9c, 0xd0, 0xea, 0x104 },
-
- /* SGI 20M */
- { 0x0e, 0x1c, 0x2b, 0x39, 0x56, 0x73, 0x82, 0x90,
- 0x1c, 0x39, 0x56, 0x73, 0xad, 0xe7, 0x104, 0x120 }
- };
- /* AC rates */
- u16 ac_mcs_rate_nss1[8][10] = {
- /* LG 160M */
- { 0x75, 0xEA, 0x15F, 0x1D4, 0x2BE, 0x3A8, 0x41D,
- 0x492, 0x57C, 0x618 },
-
- /* SG 160M */
- { 0x82, 0x104, 0x186, 0x208, 0x30C, 0x410, 0x492,
- 0x514, 0x618, 0x6C6 },
-
- /* LG 80M */
- { 0x3B, 0x75, 0xB0, 0xEA, 0x15F, 0x1D4, 0x20F,
- 0x249, 0x2BE, 0x30C },
-
- /* SG 80M */
- { 0x41, 0x82, 0xC3, 0x104, 0x186, 0x208, 0x249,
- 0x28A, 0x30C, 0x363 },
-
- /* LG 40M */
- { 0x1B, 0x36, 0x51, 0x6C, 0xA2, 0xD8, 0xF3,
- 0x10E, 0x144, 0x168 },
-
- /* SG 40M */
- { 0x1E, 0x3C, 0x5A, 0x78, 0xB4, 0xF0, 0x10E,
- 0x12C, 0x168, 0x190 },
-
- /* LG 20M */
- { 0xD, 0x1A, 0x27, 0x34, 0x4E, 0x68, 0x75, 0x82, 0x9C, 0x00 },
-
- /* SG 20M */
- { 0xF, 0x1D, 0x2C, 0x3A, 0x57, 0x74, 0x82, 0x91, 0xAE, 0x00 },
- };
- /* NSS2 note: the value in the table is 2 multiplier of the actual
- * rate
- */
- u16 ac_mcs_rate_nss2[8][10] = {
- /* LG 160M */
- { 0xEA, 0x1D4, 0x2BE, 0x3A8, 0x57C, 0x750, 0x83A,
- 0x924, 0xAF8, 0xC30 },
-
- /* SG 160M */
- { 0x104, 0x208, 0x30C, 0x410, 0x618, 0x820, 0x924,
- 0xA28, 0xC30, 0xD8B },
-
- /* LG 80M */
- { 0x75, 0xEA, 0x15F, 0x1D4, 0x2BE, 0x3A8, 0x41D,
- 0x492, 0x57C, 0x618 },
-
- /* SG 80M */
- { 0x82, 0x104, 0x186, 0x208, 0x30C, 0x410, 0x492,
- 0x514, 0x618, 0x6C6 },
-
- /* LG 40M */
- { 0x36, 0x6C, 0xA2, 0xD8, 0x144, 0x1B0, 0x1E6,
- 0x21C, 0x288, 0x2D0 },
-
- /* SG 40M */
- { 0x3C, 0x78, 0xB4, 0xF0, 0x168, 0x1E0, 0x21C,
- 0x258, 0x2D0, 0x320 },
-
- /* LG 20M */
- { 0x1A, 0x34, 0x4A, 0x68, 0x9C, 0xD0, 0xEA, 0x104,
- 0x138, 0x00 },
-
- /* SG 20M */
- { 0x1D, 0x3A, 0x57, 0x74, 0xAE, 0xE6, 0x104, 0x121,
- 0x15B, 0x00 },
- };
u32 rate = 0;
u8 mcs_index = 0;
u8 bw = 0;
@@ -252,26 +252,6 @@ u32 mwifiex_index_to_acs_data_rate(struct mwifiex_private *priv,
u32 mwifiex_index_to_data_rate(struct mwifiex_private *priv,
u8 index, u8 ht_info)
{
- /* For every mcs_rate line, the first 8 bytes are for stream 1x1,
- * and all 16 bytes are for stream 2x2.
- */
- u16 mcs_rate[4][16] = {
- /* LGI 40M */
- { 0x1b, 0x36, 0x51, 0x6c, 0xa2, 0xd8, 0xf3, 0x10e,
- 0x36, 0x6c, 0xa2, 0xd8, 0x144, 0x1b0, 0x1e6, 0x21c },
-
- /* SGI 40M */
- { 0x1e, 0x3c, 0x5a, 0x78, 0xb4, 0xf0, 0x10e, 0x12c,
- 0x3c, 0x78, 0xb4, 0xf0, 0x168, 0x1e0, 0x21c, 0x258 },
-
- /* LGI 20M */
- { 0x0d, 0x1a, 0x27, 0x34, 0x4e, 0x68, 0x75, 0x82,
- 0x1a, 0x34, 0x4e, 0x68, 0x9c, 0xd0, 0xea, 0x104 },
-
- /* SGI 20M */
- { 0x0e, 0x1c, 0x2b, 0x39, 0x56, 0x73, 0x82, 0x90,
- 0x1c, 0x39, 0x56, 0x73, 0xad, 0xe7, 0x104, 0x120 }
- };
u32 mcs_num_supp =
(priv->adapter->hw_dev_mcs_support == HT_STREAM_2X2) ? 16 : 8;
u32 rate;
@@ -458,7 +438,6 @@ u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates)
break;
case BAND_G:
case BAND_G | BAND_GN:
- case BAND_G | BAND_GN | BAND_GAC:
dev_dbg(adapter->dev, "info: infra band=%d "
"supported_rates_g\n", adapter->config_bands);
k = mwifiex_copy_rates(rates, k, supported_rates_g,
@@ -469,10 +448,7 @@ u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates)
case BAND_A | BAND_B:
case BAND_A | BAND_B | BAND_G | BAND_GN | BAND_AN:
case BAND_A | BAND_B | BAND_G | BAND_GN | BAND_AN | BAND_AAC:
- case BAND_A | BAND_B | BAND_G | BAND_GN | BAND_AN |
- BAND_AAC | BAND_GAC:
case BAND_B | BAND_G | BAND_GN:
- case BAND_B | BAND_G | BAND_GN | BAND_GAC:
dev_dbg(adapter->dev, "info: infra band=%d "
"supported_rates_bg\n", adapter->config_bands);
k = mwifiex_copy_rates(rates, k, supported_rates_bg,
@@ -496,7 +472,6 @@ u32 mwifiex_get_supported_rates(struct mwifiex_private *priv, u8 *rates)
sizeof(supported_rates_a));
break;
case BAND_GN:
- case BAND_GN | BAND_GAC:
dev_dbg(adapter->dev, "info: infra band=%d "
"supported_rates_n\n", adapter->config_bands);
k = mwifiex_copy_rates(rates, k, supported_rates_n,
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index 1ddc8b2e..2154460 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -595,7 +595,8 @@ int mwifiex_send_cmd_async(struct mwifiex_private *priv, uint16_t cmd_no,
}
/* Send command */
- if (cmd_no == HostCmd_CMD_802_11_SCAN) {
+ if (cmd_no == HostCmd_CMD_802_11_SCAN ||
+ cmd_no == HostCmd_CMD_802_11_SCAN_EXT) {
mwifiex_queue_scan_cmd(priv, cmd_node);
} else {
mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true);
@@ -1454,7 +1455,10 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv,
{
struct host_cmd_ds_get_hw_spec *hw_spec = &resp->params.hw_spec;
struct mwifiex_adapter *adapter = priv->adapter;
- int i;
+ struct mwifiex_ie_types_header *tlv;
+ struct hw_spec_fw_api_rev *api_rev;
+ u16 resp_size, api_id;
+ int i, left_len, parsed_len = 0;
adapter->fw_cap_info = le32_to_cpu(hw_spec->fw_cap_info);
@@ -1498,8 +1502,10 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv,
/* Copy 11AC cap */
adapter->hw_dot_11ac_dev_cap =
le32_to_cpu(hw_spec->dot_11ac_dev_cap);
- adapter->usr_dot_11ac_dev_cap_bg = adapter->hw_dot_11ac_dev_cap;
- adapter->usr_dot_11ac_dev_cap_a = adapter->hw_dot_11ac_dev_cap;
+ adapter->usr_dot_11ac_dev_cap_bg = adapter->hw_dot_11ac_dev_cap
+ & ~MWIFIEX_DEF_11AC_CAP_BF_RESET_MASK;
+ adapter->usr_dot_11ac_dev_cap_a = adapter->hw_dot_11ac_dev_cap
+ & ~MWIFIEX_DEF_11AC_CAP_BF_RESET_MASK;
/* Copy 11AC mcs */
adapter->hw_dot_11ac_mcs_support =
@@ -1510,6 +1516,46 @@ int mwifiex_ret_get_hw_spec(struct mwifiex_private *priv,
adapter->is_hw_11ac_capable = false;
}
+ resp_size = le16_to_cpu(resp->size) - S_DS_GEN;
+ if (resp_size > sizeof(struct host_cmd_ds_get_hw_spec)) {
+ /* we have variable HW SPEC information */
+ left_len = resp_size - sizeof(struct host_cmd_ds_get_hw_spec);
+ while (left_len > sizeof(struct mwifiex_ie_types_header)) {
+ tlv = (void *)&hw_spec->tlvs + parsed_len;
+ switch (le16_to_cpu(tlv->type)) {
+ case TLV_TYPE_FW_API_REV:
+ api_rev = (struct hw_spec_fw_api_rev *)tlv;
+ api_id = le16_to_cpu(api_rev->api_id);
+ switch (api_id) {
+ case KEY_API_VER_ID:
+ adapter->fw_key_api_major_ver =
+ api_rev->major_ver;
+ adapter->fw_key_api_minor_ver =
+ api_rev->minor_ver;
+ dev_dbg(adapter->dev,
+ "fw_key_api v%d.%d\n",
+ adapter->fw_key_api_major_ver,
+ adapter->fw_key_api_minor_ver);
+ break;
+ default:
+ dev_warn(adapter->dev,
+ "Unknown FW api_id: %d\n",
+ api_id);
+ break;
+ }
+ break;
+ default:
+ dev_warn(adapter->dev,
+ "Unknown GET_HW_SPEC TLV type: %#x\n",
+ le16_to_cpu(tlv->type));
+ break;
+ }
+ parsed_len += le16_to_cpu(tlv->len) +
+ sizeof(struct mwifiex_ie_types_header);
+ left_len -= parsed_len;
+ }
+ }
+
dev_dbg(adapter->dev, "info: GET_HW_SPEC: fw_release_number- %#x\n",
adapter->fw_release_number);
dev_dbg(adapter->dev, "info: GET_HW_SPEC: permanent addr: %pM\n",
diff --git a/drivers/net/wireless/mwifiex/decl.h b/drivers/net/wireless/mwifiex/decl.h
index 3a21bd0..e7b3e16 100644
--- a/drivers/net/wireless/mwifiex/decl.h
+++ b/drivers/net/wireless/mwifiex/decl.h
@@ -75,10 +75,16 @@
#define MWIFIEX_BUF_FLAG_REQUEUED_PKT BIT(0)
#define MWIFIEX_BUF_FLAG_BRIDGED_PKT BIT(1)
+#define MWIFIEX_BUF_FLAG_TDLS_PKT BIT(2)
#define MWIFIEX_BRIDGED_PKTS_THR_HIGH 1024
#define MWIFIEX_BRIDGED_PKTS_THR_LOW 128
+#define MWIFIEX_TDLS_DISABLE_LINK 0x00
+#define MWIFIEX_TDLS_ENABLE_LINK 0x01
+#define MWIFIEX_TDLS_CREATE_LINK 0x02
+#define MWIFIEX_TDLS_CONFIG_LINK 0x03
+
enum mwifiex_bss_type {
MWIFIEX_BSS_TYPE_STA = 0,
MWIFIEX_BSS_TYPE_UAP = 1,
@@ -92,6 +98,23 @@ enum mwifiex_bss_role {
MWIFIEX_BSS_ROLE_ANY = 0xff,
};
+enum mwifiex_tdls_status {
+ TDLS_NOT_SETUP = 0,
+ TDLS_SETUP_INPROGRESS,
+ TDLS_SETUP_COMPLETE,
+ TDLS_SETUP_FAILURE,
+ TDLS_LINK_TEARDOWN,
+};
+
+enum mwifiex_tdls_error_code {
+ TDLS_ERR_NO_ERROR = 0,
+ TDLS_ERR_INTERNAL_ERROR,
+ TDLS_ERR_MAX_LINKS_EST,
+ TDLS_ERR_LINK_EXISTS,
+ TDLS_ERR_LINK_NONEXISTENT,
+ TDLS_ERR_PEER_STA_UNREACHABLE = 25,
+};
+
#define BSS_ROLE_BIT_MASK BIT(0)
#define GET_BSS_ROLE(priv) ((priv)->bss_role & BSS_ROLE_BIT_MASK)
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index 5fa932d..aa8abef 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -50,21 +50,23 @@ struct tx_packet_hdr {
#define HOSTCMD_SUPPORTED_RATES 14
#define N_SUPPORTED_RATES 3
#define ALL_802_11_BANDS (BAND_A | BAND_B | BAND_G | BAND_GN | \
- BAND_AN | BAND_GAC | BAND_AAC)
+ BAND_AN | BAND_AAC)
#define FW_MULTI_BANDS_SUPPORT (BIT(8) | BIT(9) | BIT(10) | BIT(11) | \
- BIT(12) | BIT(13))
+ BIT(13))
#define IS_SUPPORT_MULTI_BANDS(adapter) \
(adapter->fw_cap_info & FW_MULTI_BANDS_SUPPORT)
-/* shift bit 12 and bit 13 in fw_cap_info from the firmware to bit 13 and 14
- * for 11ac so that bit 11 is for GN, bit 12 for AN, bit 13 for GAC, and bit
- * bit 14 for AAC, in order to be compatible with the band capability
- * defined in the driver after right shift of 8 bits.
+/* bit 13: 11ac BAND_AAC
+ * bit 12: reserved for lab testing, will be reused for BAND_AN
+ * bit 11: 11n BAND_GN
+ * bit 10: 11a BAND_A
+ * bit 9: 11g BAND_G
+ * bit 8: 11b BAND_B
+ * Map these bits to band capability by right shifting 8 bits.
*/
#define GET_FW_DEFAULT_BANDS(adapter) \
- (((((adapter->fw_cap_info & 0x3000) << 1) | \
- (adapter->fw_cap_info & ~0xF000)) >> 8) & \
+ (((adapter->fw_cap_info & 0x2f00) >> 8) & \
ALL_802_11_BANDS)
#define HostCmd_WEP_KEY_INDEX_MASK 0x3fff
@@ -77,12 +79,21 @@ enum KEY_TYPE_ID {
KEY_TYPE_ID_WAPI,
KEY_TYPE_ID_AES_CMAC,
};
+
+#define WPA_PN_SIZE 8
+#define KEY_PARAMS_FIXED_LEN 10
+#define KEY_INDEX_MASK 0xf
+#define FW_KEY_API_VER_MAJOR_V2 2
+
#define KEY_MCAST BIT(0)
#define KEY_UNICAST BIT(1)
#define KEY_ENABLED BIT(2)
+#define KEY_DEFAULT BIT(3)
+#define KEY_TX_KEY BIT(4)
+#define KEY_RX_KEY BIT(5)
#define KEY_IGTK BIT(10)
-#define WAPI_KEY_LEN 50
+#define WAPI_KEY_LEN (WLAN_KEY_LEN_SMS4 + PN_LEN + 2)
#define MAX_POLL_TRIES 100
#define MAX_FIRMWARE_POLL_TRIES 100
@@ -130,6 +141,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define TLV_TYPE_RSSI_HIGH (PROPRIETARY_TLV_BASE_ID + 22)
#define TLV_TYPE_AUTH_TYPE (PROPRIETARY_TLV_BASE_ID + 31)
#define TLV_TYPE_STA_MAC_ADDR (PROPRIETARY_TLV_BASE_ID + 32)
+#define TLV_TYPE_BSSID (PROPRIETARY_TLV_BASE_ID + 35)
#define TLV_TYPE_CHANNELBANDLIST (PROPRIETARY_TLV_BASE_ID + 42)
#define TLV_TYPE_UAP_BEACON_PERIOD (PROPRIETARY_TLV_BASE_ID + 44)
#define TLV_TYPE_UAP_DTIM_PERIOD (PROPRIETARY_TLV_BASE_ID + 45)
@@ -144,6 +156,8 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define TLV_TYPE_RATE_DROP_CONTROL (PROPRIETARY_TLV_BASE_ID + 82)
#define TLV_TYPE_RATE_SCOPE (PROPRIETARY_TLV_BASE_ID + 83)
#define TLV_TYPE_POWER_GROUP (PROPRIETARY_TLV_BASE_ID + 84)
+#define TLV_TYPE_BSS_SCAN_RSP (PROPRIETARY_TLV_BASE_ID + 86)
+#define TLV_TYPE_BSS_SCAN_INFO (PROPRIETARY_TLV_BASE_ID + 87)
#define TLV_TYPE_UAP_RETRY_LIMIT (PROPRIETARY_TLV_BASE_ID + 93)
#define TLV_TYPE_WAPI_IE (PROPRIETARY_TLV_BASE_ID + 94)
#define TLV_TYPE_UAP_MGMT_FRAME (PROPRIETARY_TLV_BASE_ID + 104)
@@ -154,6 +168,8 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define TLV_TYPE_PWK_CIPHER (PROPRIETARY_TLV_BASE_ID + 145)
#define TLV_TYPE_GWK_CIPHER (PROPRIETARY_TLV_BASE_ID + 146)
#define TLV_TYPE_COALESCE_RULE (PROPRIETARY_TLV_BASE_ID + 154)
+#define TLV_TYPE_KEY_PARAM_V2 (PROPRIETARY_TLV_BASE_ID + 156)
+#define TLV_TYPE_FW_API_REV (PROPRIETARY_TLV_BASE_ID + 199)
#define MWIFIEX_TX_DATA_BUF_SIZE_2K 2048
@@ -176,11 +192,14 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define MWIFIEX_TX_DATA_BUF_SIZE_8K 8192
#define ISSUPP_11NENABLED(FwCapInfo) (FwCapInfo & BIT(11))
+#define ISSUPP_TDLS_ENABLED(FwCapInfo) (FwCapInfo & BIT(14))
#define MWIFIEX_DEF_HT_CAP (IEEE80211_HT_CAP_DSSSCCK40 | \
(1 << IEEE80211_HT_CAP_RX_STBC_SHIFT) | \
IEEE80211_HT_CAP_SM_PS)
+#define MWIFIEX_DEF_11N_TX_BF_CAP 0x09E1E008
+
#define MWIFIEX_DEF_AMPDU IEEE80211_HT_AMPDU_PARM_FACTOR
/* dev_cap bitmap
@@ -204,6 +223,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define ISSUPP_GREENFIELD(Dot11nDevCap) (Dot11nDevCap & BIT(29))
#define ISENABLED_40MHZ_INTOLERANT(Dot11nDevCap) (Dot11nDevCap & BIT(8))
#define ISSUPP_RXLDPC(Dot11nDevCap) (Dot11nDevCap & BIT(22))
+#define ISSUPP_BEAMFORMING(Dot11nDevCap) (Dot11nDevCap & BIT(30))
/* httxcfg bitmap
* 0 reserved
@@ -226,17 +246,24 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
/* HW_SPEC fw_cap_info */
-#define ISSUPP_11ACENABLED(fw_cap_info) (fw_cap_info & (BIT(12)|BIT(13)))
+#define ISSUPP_11ACENABLED(fw_cap_info) (fw_cap_info & BIT(13))
#define GET_VHTCAP_CHWDSET(vht_cap_info) ((vht_cap_info >> 2) & 0x3)
#define GET_VHTNSSMCS(mcs_mapset, nss) ((mcs_mapset >> (2 * (nss - 1))) & 0x3)
#define SET_VHTNSSMCS(mcs_mapset, nss, value) (mcs_mapset |= (value & 0x3) << \
(2 * (nss - 1)))
-#define NO_NSS_SUPPORT 0x3
-
#define GET_DEVTXMCSMAP(dev_mcs_map) (dev_mcs_map >> 16)
#define GET_DEVRXMCSMAP(dev_mcs_map) (dev_mcs_map & 0xFFFF)
+/* Clear SU Beanformer, MU beanformer, MU beanformee and
+ * sounding dimensions bits
+ */
+#define MWIFIEX_DEF_11AC_CAP_BF_RESET_MASK \
+ (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | \
+ IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE | \
+ IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE | \
+ IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK)
+
#define MOD_CLASS_HR_DSSS 0x03
#define MOD_CLASS_OFDM 0x07
#define MOD_CLASS_HT 0x08
@@ -295,10 +322,12 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
#define HostCmd_CMD_CAU_REG_ACCESS 0x00ed
#define HostCmd_CMD_SET_BSS_MODE 0x00f7
#define HostCmd_CMD_PCIE_DESC_DETAILS 0x00fa
+#define HostCmd_CMD_802_11_SCAN_EXT 0x0107
#define HostCmd_CMD_COALESCE_CFG 0x010a
#define HostCmd_CMD_MGMT_FRAME_REG 0x010c
#define HostCmd_CMD_REMAIN_ON_CHAN 0x010d
#define HostCmd_CMD_11AC_CFG 0x0112
+#define HostCmd_CMD_TDLS_OPER 0x0122
#define PROTOCOL_NO_SECURITY 0x01
#define PROTOCOL_STATIC_WEP 0x02
@@ -440,6 +469,7 @@ enum P2P_MODES {
#define EVENT_UAP_MIC_COUNTERMEASURES 0x0000004c
#define EVENT_HOSTWAKE_STAIE 0x0000004d
#define EVENT_CHANNEL_SWITCH_ANN 0x00000050
+#define EVENT_EXT_SCAN_REPORT 0x00000058
#define EVENT_REMAIN_ON_CHAN_EXPIRED 0x0000005f
#define EVENT_ID_MASK 0xffff
@@ -468,6 +498,10 @@ enum P2P_MODES {
#define MWIFIEX_CRITERIA_UNICAST BIT(1)
#define MWIFIEX_CRITERIA_MULTICAST BIT(3)
+#define ACT_TDLS_DELETE 0x00
+#define ACT_TDLS_CREATE 0x01
+#define ACT_TDLS_CONFIG 0x02
+
struct mwifiex_ie_types_header {
__le16 type;
__le16 len;
@@ -480,6 +514,7 @@ struct mwifiex_ie_types_data {
#define MWIFIEX_TxPD_POWER_MGMT_NULL_PACKET 0x01
#define MWIFIEX_TxPD_POWER_MGMT_LAST_PACKET 0x08
+#define MWIFIEX_TXPD_FLAGS_TDLS_PACKET 0x10
struct txpd {
u8 bss_type;
@@ -676,6 +711,56 @@ struct mwifiex_cmac_param {
u8 key[WLAN_KEY_LEN_AES_CMAC];
} __packed;
+struct mwifiex_wep_param {
+ __le16 key_len;
+ u8 key[WLAN_KEY_LEN_WEP104];
+} __packed;
+
+struct mwifiex_tkip_param {
+ u8 pn[WPA_PN_SIZE];
+ __le16 key_len;
+ u8 key[WLAN_KEY_LEN_TKIP];
+} __packed;
+
+struct mwifiex_aes_param {
+ u8 pn[WPA_PN_SIZE];
+ __le16 key_len;
+ u8 key[WLAN_KEY_LEN_CCMP];
+} __packed;
+
+struct mwifiex_wapi_param {
+ u8 pn[PN_LEN];
+ __le16 key_len;
+ u8 key[WLAN_KEY_LEN_SMS4];
+} __packed;
+
+struct mwifiex_cmac_aes_param {
+ u8 ipn[IGTK_PN_LEN];
+ __le16 key_len;
+ u8 key[WLAN_KEY_LEN_AES_CMAC];
+} __packed;
+
+struct mwifiex_ie_type_key_param_set_v2 {
+ __le16 type;
+ __le16 len;
+ u8 mac_addr[ETH_ALEN];
+ u8 key_idx;
+ u8 key_type;
+ __le16 key_info;
+ union {
+ struct mwifiex_wep_param wep;
+ struct mwifiex_tkip_param tkip;
+ struct mwifiex_aes_param aes;
+ struct mwifiex_wapi_param wapi;
+ struct mwifiex_cmac_aes_param cmac_aes;
+ } key_params;
+} __packed;
+
+struct host_cmd_ds_802_11_key_material_v2 {
+ __le16 action;
+ struct mwifiex_ie_type_key_param_set_v2 key_param_set;
+} __packed;
+
struct host_cmd_ds_802_11_key_material {
__le16 action;
struct mwifiex_ie_type_key_param_set key_param_set;
@@ -727,6 +812,17 @@ struct host_cmd_ds_802_11_ps_mode_enh {
} params;
} __packed;
+enum FW_API_VER_ID {
+ KEY_API_VER_ID = 1,
+};
+
+struct hw_spec_fw_api_rev {
+ struct mwifiex_ie_types_header header;
+ __le16 api_id;
+ u8 major_ver;
+ u8 minor_ver;
+} __packed;
+
struct host_cmd_ds_get_hw_spec {
__le16 hw_if_version;
__le16 version;
@@ -748,6 +844,7 @@ struct host_cmd_ds_get_hw_spec {
__le32 reserved_6;
__le32 dot_11ac_dev_cap;
__le32 dot_11ac_mcs_support;
+ u8 tlvs[0];
} __packed;
struct host_cmd_ds_802_11_rssi_info {
@@ -1047,14 +1144,28 @@ struct host_cmd_ds_rf_ant_siso {
__le16 ant_mode;
};
-struct mwifiex_bcn_param {
- u8 bssid[ETH_ALEN];
- u8 rssi;
+struct host_cmd_ds_tdls_oper {
+ __le16 tdls_action;
+ __le16 reason;
+ u8 peer_mac[ETH_ALEN];
+} __packed;
+
+struct mwifiex_fixed_bcn_param {
__le64 timestamp;
__le16 beacon_period;
__le16 cap_info_bitmap;
} __packed;
+struct mwifiex_event_scan_result {
+ __le16 event_id;
+ u8 bss_index;
+ u8 bss_type;
+ u8 more_event;
+ u8 reserved[3];
+ __le16 buf_size;
+ u8 num_of_set;
+} __packed;
+
#define MWIFIEX_USER_SCAN_CHAN_MAX 50
#define MWIFIEX_MAX_SSID_LIST_LENGTH 10
@@ -1124,6 +1235,28 @@ struct host_cmd_ds_802_11_scan_rsp {
u8 bss_desc_and_tlv_buffer[1];
} __packed;
+struct host_cmd_ds_802_11_scan_ext {
+ u32 reserved;
+ u8 tlv_buffer[1];
+} __packed;
+
+struct mwifiex_ie_types_bss_scan_rsp {
+ struct mwifiex_ie_types_header header;
+ u8 bssid[ETH_ALEN];
+ u8 frame_body[1];
+} __packed;
+
+struct mwifiex_ie_types_bss_scan_info {
+ struct mwifiex_ie_types_header header;
+ __le16 rssi;
+ __le16 anpi;
+ u8 cca_busy_fraction;
+ u8 radio_type;
+ u8 channel;
+ u8 reserved;
+ __le64 tsf;
+} __packed;
+
struct host_cmd_ds_802_11_bg_scan_query {
u8 flush;
} __packed;
@@ -1296,6 +1429,11 @@ struct mwifiex_ie_types_vhtcap {
struct ieee80211_vht_cap vht_cap;
} __packed;
+struct mwifiex_ie_types_aid {
+ struct mwifiex_ie_types_header header;
+ __le16 aid;
+} __packed;
+
struct mwifiex_ie_types_oper_mode_ntf {
struct mwifiex_ie_types_header header;
u8 oper_mode;
@@ -1331,6 +1469,11 @@ struct mwifiex_ie_types_extcap {
u8 ext_capab[0];
} __packed;
+struct mwifiex_ie_types_qos_info {
+ struct mwifiex_ie_types_header header;
+ u8 qos_info;
+} __packed;
+
struct host_cmd_ds_mac_reg_access {
__le16 action;
__le16 offset;
@@ -1441,6 +1584,11 @@ struct host_cmd_tlv_rates {
u8 rates[0];
} __packed;
+struct mwifiex_ie_types_bssid_list {
+ struct mwifiex_ie_types_header header;
+ u8 bssid[ETH_ALEN];
+} __packed;
+
struct host_cmd_tlv_bcast_ssid {
struct mwifiex_ie_types_header header;
u8 bcast_ctl;
@@ -1634,6 +1782,7 @@ struct host_cmd_ds_command {
struct host_cmd_ds_802_11_ps_mode_enh psmode_enh;
struct host_cmd_ds_802_11_hs_cfg_enh opt_hs_cfg;
struct host_cmd_ds_802_11_scan scan;
+ struct host_cmd_ds_802_11_scan_ext ext_scan;
struct host_cmd_ds_802_11_scan_rsp scan_resp;
struct host_cmd_ds_802_11_bg_scan_query bg_scan_query;
struct host_cmd_ds_802_11_bg_scan_query_rsp bg_scan_query_resp;
@@ -1653,6 +1802,7 @@ struct host_cmd_ds_command {
struct host_cmd_ds_11n_cfg htcfg;
struct host_cmd_ds_wmm_get_status get_wmm_status;
struct host_cmd_ds_802_11_key_material key_material;
+ struct host_cmd_ds_802_11_key_material_v2 key_material_v2;
struct host_cmd_ds_version_ext verext;
struct host_cmd_ds_mgmt_frame_reg reg_mask;
struct host_cmd_ds_remain_on_chan roc_cfg;
@@ -1671,6 +1821,7 @@ struct host_cmd_ds_command {
struct host_cmd_ds_sta_deauth sta_deauth;
struct host_cmd_11ac_vht_cfg vht_cfg;
struct host_cmd_ds_coalesce_cfg coalesce_cfg;
+ struct host_cmd_ds_tdls_oper tdls_oper;
} params;
} __packed;
diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c
index 1d0a817..a4cd2cb 100644
--- a/drivers/net/wireless/mwifiex/init.c
+++ b/drivers/net/wireless/mwifiex/init.c
@@ -137,6 +137,7 @@ int mwifiex_init_priv(struct mwifiex_private *priv)
priv->csa_expire_time = 0;
priv->del_list_idx = 0;
priv->hs2_enabled = false;
+ memcpy(priv->tos_to_tid_inv, tos_to_tid_inv, MAX_NUM_TID);
return mwifiex_add_bss_prio_tbl(priv);
}
@@ -281,6 +282,9 @@ static void mwifiex_init_adapter(struct mwifiex_adapter *adapter)
adapter->arp_filter_size = 0;
adapter->max_mgmt_ie_index = MAX_MGMT_IE_INDEX;
adapter->empty_tx_q_cnt = 0;
+ adapter->ext_scan = true;
+ adapter->fw_key_api_major_ver = 0;
+ adapter->fw_key_api_minor_ver = 0;
}
/*
@@ -450,6 +454,7 @@ int mwifiex_init_lock_list(struct mwifiex_adapter *adapter)
INIT_LIST_HEAD(&priv->tx_ba_stream_tbl_ptr);
INIT_LIST_HEAD(&priv->rx_reorder_tbl_ptr);
INIT_LIST_HEAD(&priv->sta_list);
+ skb_queue_head_init(&priv->tdls_txq);
spin_lock_init(&priv->tx_ba_stream_tbl_lock);
spin_lock_init(&priv->rx_reorder_tbl_lock);
diff --git a/drivers/net/wireless/mwifiex/ioctl.h b/drivers/net/wireless/mwifiex/ioctl.h
index 00a95f4..5974642 100644
--- a/drivers/net/wireless/mwifiex/ioctl.h
+++ b/drivers/net/wireless/mwifiex/ioctl.h
@@ -60,8 +60,7 @@ enum {
BAND_A = 4,
BAND_GN = 8,
BAND_AN = 16,
- BAND_GAC = 32,
- BAND_AAC = 64,
+ BAND_AAC = 32,
};
#define MWIFIEX_WPA_PASSHPHRASE_LEN 64
@@ -86,6 +85,10 @@ struct wep_key {
#define BAND_CONFIG_A 0x01
#define MWIFIEX_SUPPORTED_RATES 14
#define MWIFIEX_SUPPORTED_RATES_EXT 32
+#define MWIFIEX_TDLS_SUPPORTED_RATES 8
+#define MWIFIEX_TDLS_DEF_QOS_CAPAB 0xf
+#define MWIFIEX_PRIO_BK 2
+#define MWIFIEX_PRIO_VI 5
struct mwifiex_uap_bss_param {
u8 channel;
@@ -233,7 +236,10 @@ struct mwifiex_ds_encrypt_key {
u8 mac_addr[ETH_ALEN];
u32 is_wapi_key;
u8 pn[PN_LEN]; /* packet number */
+ u8 pn_len;
u8 is_igtk_key;
+ u8 is_current_wep_key;
+ u8 is_rx_seq_valid;
};
struct mwifiex_power_cfg {
@@ -432,4 +438,16 @@ struct mwifiex_ds_coalesce_cfg {
struct mwifiex_coalesce_rule rule[MWIFIEX_COALESCE_MAX_RULES];
};
+struct mwifiex_ds_tdls_oper {
+ u16 tdls_action;
+ u8 peer_mac[ETH_ALEN];
+ u16 capability;
+ u8 qos_info;
+ u8 *ext_capab;
+ u8 ext_capab_len;
+ u8 *supp_rates;
+ u8 supp_rates_len;
+ u8 *ht_capab;
+};
+
#endif /* !_MWIFIEX_IOCTL_H_ */
diff --git a/drivers/net/wireless/mwifiex/join.c b/drivers/net/wireless/mwifiex/join.c
index 4e4686e..34472ea 100644
--- a/drivers/net/wireless/mwifiex/join.c
+++ b/drivers/net/wireless/mwifiex/join.c
@@ -515,8 +515,7 @@ int mwifiex_cmd_802_11_associate(struct mwifiex_private *priv,
if (ISSUPP_11ACENABLED(priv->adapter->fw_cap_info) &&
!bss_desc->disable_11n && !bss_desc->disable_11ac &&
- (priv->adapter->config_bands & BAND_GAC ||
- priv->adapter->config_bands & BAND_AAC))
+ priv->adapter->config_bands & BAND_AAC)
mwifiex_cmd_append_11ac_tlv(priv, bss_desc, &pos);
/* Append vendor specific IE TLV */
@@ -983,7 +982,7 @@ mwifiex_cmd_802_11_ad_hoc_start(struct mwifiex_private *priv,
cpu_to_le16(sizeof(struct ieee80211_ht_cap));
radio_type = mwifiex_band_to_radio_type(
priv->adapter->config_bands);
- mwifiex_fill_cap_info(priv, radio_type, ht_cap);
+ mwifiex_fill_cap_info(priv, radio_type, &ht_cap->ht_cap);
if (adapter->sec_chan_offset ==
IEEE80211_HT_PARAM_CHA_SEC_NONE) {
@@ -1300,8 +1299,7 @@ int mwifiex_associate(struct mwifiex_private *priv,
if (ISSUPP_11ACENABLED(priv->adapter->fw_cap_info) &&
!bss_desc->disable_11n && !bss_desc->disable_11ac &&
- (priv->adapter->config_bands & BAND_GAC ||
- priv->adapter->config_bands & BAND_AAC))
+ priv->adapter->config_bands & BAND_AAC)
mwifiex_set_11ac_ba_params(priv);
else
mwifiex_set_ba_params(priv);
@@ -1335,8 +1333,7 @@ mwifiex_adhoc_start(struct mwifiex_private *priv,
priv->curr_bss_params.band);
if (ISSUPP_11ACENABLED(priv->adapter->fw_cap_info) &&
- (priv->adapter->config_bands & BAND_GAC ||
- priv->adapter->config_bands & BAND_AAC))
+ priv->adapter->config_bands & BAND_AAC)
mwifiex_set_11ac_ba_params(priv);
else
mwifiex_set_ba_params(priv);
@@ -1376,8 +1373,7 @@ int mwifiex_adhoc_join(struct mwifiex_private *priv,
if (ISSUPP_11ACENABLED(priv->adapter->fw_cap_info) &&
!bss_desc->disable_11n && !bss_desc->disable_11ac &&
- (priv->adapter->config_bands & BAND_GAC ||
- priv->adapter->config_bands & BAND_AAC))
+ priv->adapter->config_bands & BAND_AAC)
mwifiex_set_11ac_ba_params(priv);
else
mwifiex_set_ba_params(priv);
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index d8ad554..407f8ea 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -59,7 +59,7 @@ enum {
#define MWIFIEX_UPLD_SIZE (2312)
-#define MAX_EVENT_SIZE 1024
+#define MAX_EVENT_SIZE 2048
#define ARP_FILTER_MAX_BUF_SIZE 68
@@ -210,6 +210,7 @@ struct mwifiex_ra_list_tbl {
u16 ba_pkt_count;
u8 ba_packet_thr;
u16 total_pkt_count;
+ bool tdls_link;
};
struct mwifiex_tid_tbl {
@@ -262,6 +263,31 @@ struct ieee_types_generic {
u8 data[IEEE_MAX_IE_SIZE - sizeof(struct ieee_types_header)];
} __packed;
+struct ieee_types_bss_co_2040 {
+ struct ieee_types_header ieee_hdr;
+ u8 bss_2040co;
+} __packed;
+
+struct ieee_types_extcap {
+ struct ieee_types_header ieee_hdr;
+ u8 ext_capab[8];
+} __packed;
+
+struct ieee_types_vht_cap {
+ struct ieee_types_header ieee_hdr;
+ struct ieee80211_vht_cap vhtcap;
+} __packed;
+
+struct ieee_types_vht_oper {
+ struct ieee_types_header ieee_hdr;
+ struct ieee80211_vht_operation vhtoper;
+} __packed;
+
+struct ieee_types_aid {
+ struct ieee_types_header ieee_hdr;
+ u16 aid;
+} __packed;
+
struct mwifiex_bssdescriptor {
u8 mac_address[ETH_ALEN];
struct cfg80211_ssid ssid;
@@ -443,6 +469,7 @@ struct mwifiex_private {
u8 wpa_ie_len;
u8 wpa_is_gtk_set;
struct host_cmd_ds_802_11_key_material aes_key;
+ struct host_cmd_ds_802_11_key_material_v2 aes_key_v2;
u8 wapi_ie[256];
u8 wapi_ie_len;
u8 *wps_ie;
@@ -461,6 +488,7 @@ struct mwifiex_private {
struct mwifiex_tx_aggr aggr_prio_tbl[MAX_NUM_TID];
struct mwifiex_add_ba_param add_ba_param;
u16 rx_seq[MAX_NUM_TID];
+ u8 tos_to_tid_inv[MAX_NUM_TID];
struct list_head rx_reorder_tbl_ptr;
/* spin lock for rx_reorder_tbl_ptr queue */
spinlock_t rx_reorder_tbl_lock;
@@ -518,6 +546,8 @@ struct mwifiex_private {
unsigned long csa_expire_time;
u8 del_list_idx;
bool hs2_enabled;
+ struct station_parameters *sta_params;
+ struct sk_buff_head tdls_txq;
};
enum mwifiex_ba_status {
@@ -583,17 +613,35 @@ struct mwifiex_bss_priv {
u64 fw_tsf;
};
-/* This is AP specific structure which stores information
- * about associated STA
+struct mwifiex_tdls_capab {
+ __le16 capab;
+ u8 rates[32];
+ u8 rates_len;
+ u8 qos_info;
+ u8 coex_2040;
+ u16 aid;
+ struct ieee80211_ht_cap ht_capb;
+ struct ieee80211_ht_operation ht_oper;
+ struct ieee_types_extcap extcap;
+ struct ieee_types_generic rsn_ie;
+ struct ieee80211_vht_cap vhtcap;
+ struct ieee80211_vht_operation vhtoper;
+};
+
+/* This is AP/TDLS specific structure which stores information
+ * about associated/peer STA
*/
struct mwifiex_sta_node {
struct list_head list;
u8 mac_addr[ETH_ALEN];
u8 is_wmm_enabled;
u8 is_11n_enabled;
+ u8 is_11ac_enabled;
u8 ampdu_sta[MAX_NUM_TID];
u16 rx_seq[MAX_NUM_TID];
u16 max_amsdu;
+ u8 tdls_status;
+ struct mwifiex_tdls_capab tdls_cap;
};
struct mwifiex_if_ops {
@@ -753,6 +801,8 @@ struct mwifiex_adapter {
atomic_t is_tx_received;
atomic_t pending_bridged_pkts;
struct semaphore *card_sem;
+ bool ext_scan;
+ u8 fw_key_api_major_ver, fw_key_api_minor_ver;
};
int mwifiex_init_lock_list(struct mwifiex_adapter *adapter);
@@ -938,6 +988,12 @@ mwifiex_set_wmm_params(struct mwifiex_private *priv,
struct cfg80211_ap_settings *params);
void mwifiex_set_ba_params(struct mwifiex_private *priv);
void mwifiex_set_11ac_ba_params(struct mwifiex_private *priv);
+int mwifiex_cmd_802_11_scan_ext(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd,
+ void *data_buf);
+int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv);
+int mwifiex_handle_event_ext_scan_report(struct mwifiex_private *priv,
+ void *buf);
/*
* This function checks if the queuing is RA based or not.
@@ -1078,7 +1134,7 @@ int mwifiex_set_encode(struct mwifiex_private *priv, struct key_params *kp,
const u8 *key, int key_len, u8 key_index,
const u8 *mac_addr, int disable);
-int mwifiex_set_gen_ie(struct mwifiex_private *priv, u8 *ie, int ie_len);
+int mwifiex_set_gen_ie(struct mwifiex_private *priv, const u8 *ie, int ie_len);
int mwifiex_get_ver_ext(struct mwifiex_private *priv);
@@ -1159,6 +1215,32 @@ void mwifiex_dnld_txpwr_table(struct mwifiex_private *priv);
extern const struct ethtool_ops mwifiex_ethtool_ops;
+void mwifiex_del_all_sta_list(struct mwifiex_private *priv);
+void mwifiex_del_sta_entry(struct mwifiex_private *priv, u8 *mac);
+void
+mwifiex_set_sta_ht_cap(struct mwifiex_private *priv, const u8 *ies,
+ int ies_len, struct mwifiex_sta_node *node);
+struct mwifiex_sta_node *
+mwifiex_add_sta_entry(struct mwifiex_private *priv, u8 *mac);
+struct mwifiex_sta_node *
+mwifiex_get_sta_entry(struct mwifiex_private *priv, u8 *mac);
+int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, u8 *peer,
+ u8 action_code, u8 dialog_token,
+ u16 status_code, const u8 *extra_ies,
+ size_t extra_ies_len);
+int mwifiex_send_tdls_action_frame(struct mwifiex_private *priv,
+ u8 *peer, u8 action_code, u8 dialog_token,
+ u16 status_code, const u8 *extra_ies,
+ size_t extra_ies_len);
+void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
+ u8 *buf, int len);
+int mwifiex_tdls_oper(struct mwifiex_private *priv, u8 *peer, u8 action);
+int mwifiex_get_tdls_link_status(struct mwifiex_private *priv, u8 *mac);
+void mwifiex_disable_all_tdls_links(struct mwifiex_private *priv);
+bool mwifiex_is_bss_in_11ac_mode(struct mwifiex_private *priv);
+u8 mwifiex_get_center_freq_index(struct mwifiex_private *priv, u8 band,
+ u32 pri_chan, u8 chan_bw);
+
#ifdef CONFIG_DEBUG_FS
void mwifiex_debugfs_init(void);
void mwifiex_debugfs_remove(void);
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index 7fe7b53..31d8a9d 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -39,20 +39,31 @@ static struct semaphore add_remove_card_sem;
static int
mwifiex_map_pci_memory(struct mwifiex_adapter *adapter, struct sk_buff *skb,
- int size, int flags)
+ size_t size, int flags)
{
struct pcie_service_card *card = adapter->card;
- dma_addr_t buf_pa;
+ struct mwifiex_dma_mapping mapping;
- buf_pa = pci_map_single(card->dev, skb->data, size, flags);
- if (pci_dma_mapping_error(card->dev, buf_pa)) {
+ mapping.addr = pci_map_single(card->dev, skb->data, size, flags);
+ if (pci_dma_mapping_error(card->dev, mapping.addr)) {
dev_err(adapter->dev, "failed to map pci memory!\n");
return -1;
}
- memcpy(skb->cb, &buf_pa, sizeof(dma_addr_t));
+ mapping.len = size;
+ memcpy(skb->cb, &mapping, sizeof(mapping));
return 0;
}
+static void mwifiex_unmap_pci_memory(struct mwifiex_adapter *adapter,
+ struct sk_buff *skb, int flags)
+{
+ struct pcie_service_card *card = adapter->card;
+ struct mwifiex_dma_mapping mapping;
+
+ MWIFIEX_SKB_PACB(skb, &mapping);
+ pci_unmap_single(card->dev, mapping.addr, mapping.len, flags);
+}
+
/*
* This function reads sleep cookie and checks if FW is ready
*/
@@ -456,7 +467,7 @@ static int mwifiex_init_rxq_ring(struct mwifiex_adapter *adapter)
PCI_DMA_FROMDEVICE))
return -1;
- MWIFIEX_SKB_PACB(skb, &buf_pa);
+ buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
dev_dbg(adapter->dev,
"info: RX ring: skb=%p len=%d data=%p buf_pa=%#x:%x\n",
@@ -513,7 +524,7 @@ static int mwifiex_pcie_init_evt_ring(struct mwifiex_adapter *adapter)
PCI_DMA_FROMDEVICE))
return -1;
- MWIFIEX_SKB_PACB(skb, &buf_pa);
+ buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
dev_dbg(adapter->dev,
"info: EVT ring: skb=%p len=%d data=%p buf_pa=%#x:%x\n",
@@ -549,8 +560,8 @@ static void mwifiex_cleanup_txq_ring(struct mwifiex_adapter *adapter)
desc2 = card->txbd_ring[i];
if (card->tx_buf_list[i]) {
skb = card->tx_buf_list[i];
- pci_unmap_single(card->dev, desc2->paddr,
- skb->len, PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb,
+ PCI_DMA_TODEVICE);
dev_kfree_skb_any(skb);
}
memset(desc2, 0, sizeof(*desc2));
@@ -558,8 +569,8 @@ static void mwifiex_cleanup_txq_ring(struct mwifiex_adapter *adapter)
desc = card->txbd_ring[i];
if (card->tx_buf_list[i]) {
skb = card->tx_buf_list[i];
- pci_unmap_single(card->dev, desc->paddr,
- skb->len, PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb,
+ PCI_DMA_TODEVICE);
dev_kfree_skb_any(skb);
}
memset(desc, 0, sizeof(*desc));
@@ -587,8 +598,8 @@ static void mwifiex_cleanup_rxq_ring(struct mwifiex_adapter *adapter)
desc2 = card->rxbd_ring[i];
if (card->rx_buf_list[i]) {
skb = card->rx_buf_list[i];
- pci_unmap_single(card->dev, desc2->paddr,
- skb->len, PCI_DMA_FROMDEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb,
+ PCI_DMA_FROMDEVICE);
dev_kfree_skb_any(skb);
}
memset(desc2, 0, sizeof(*desc2));
@@ -596,8 +607,8 @@ static void mwifiex_cleanup_rxq_ring(struct mwifiex_adapter *adapter)
desc = card->rxbd_ring[i];
if (card->rx_buf_list[i]) {
skb = card->rx_buf_list[i];
- pci_unmap_single(card->dev, desc->paddr,
- skb->len, PCI_DMA_FROMDEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb,
+ PCI_DMA_FROMDEVICE);
dev_kfree_skb_any(skb);
}
memset(desc, 0, sizeof(*desc));
@@ -622,8 +633,8 @@ static void mwifiex_cleanup_evt_ring(struct mwifiex_adapter *adapter)
desc = card->evtbd_ring[i];
if (card->evt_buf_list[i]) {
skb = card->evt_buf_list[i];
- pci_unmap_single(card->dev, desc->paddr, MAX_EVENT_SIZE,
- PCI_DMA_FROMDEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb,
+ PCI_DMA_FROMDEVICE);
dev_kfree_skb_any(skb);
}
card->evt_buf_list[i] = NULL;
@@ -861,7 +872,6 @@ static int mwifiex_pcie_alloc_cmdrsp_buf(struct mwifiex_adapter *adapter)
static int mwifiex_pcie_delete_cmdrsp_buf(struct mwifiex_adapter *adapter)
{
struct pcie_service_card *card;
- dma_addr_t buf_pa;
if (!adapter)
return 0;
@@ -869,16 +879,14 @@ static int mwifiex_pcie_delete_cmdrsp_buf(struct mwifiex_adapter *adapter)
card = adapter->card;
if (card && card->cmdrsp_buf) {
- MWIFIEX_SKB_PACB(card->cmdrsp_buf, &buf_pa);
- pci_unmap_single(card->dev, buf_pa, MWIFIEX_UPLD_SIZE,
- PCI_DMA_FROMDEVICE);
+ mwifiex_unmap_pci_memory(adapter, card->cmdrsp_buf,
+ PCI_DMA_FROMDEVICE);
dev_kfree_skb_any(card->cmdrsp_buf);
}
if (card && card->cmd_buf) {
- MWIFIEX_SKB_PACB(card->cmd_buf, &buf_pa);
- pci_unmap_single(card->dev, buf_pa, card->cmd_buf->len,
- PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, card->cmd_buf,
+ PCI_DMA_TODEVICE);
}
return 0;
}
@@ -956,7 +964,6 @@ static int mwifiex_clean_pcie_ring_buf(struct mwifiex_adapter *adapter)
static int mwifiex_pcie_send_data_complete(struct mwifiex_adapter *adapter)
{
struct sk_buff *skb;
- dma_addr_t buf_pa;
u32 wrdoneidx, rdptr, num_tx_buffs, unmap_count = 0;
struct mwifiex_pcie_buf_desc *desc;
struct mwifiex_pfu_buf_desc *desc2;
@@ -986,13 +993,13 @@ static int mwifiex_pcie_send_data_complete(struct mwifiex_adapter *adapter)
reg->tx_start_ptr;
skb = card->tx_buf_list[wrdoneidx];
+
if (skb) {
dev_dbg(adapter->dev,
"SEND COMP: Detach skb %p at txbd_rdidx=%d\n",
skb, wrdoneidx);
- MWIFIEX_SKB_PACB(skb, &buf_pa);
- pci_unmap_single(card->dev, buf_pa, skb->len,
- PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb,
+ PCI_DMA_TODEVICE);
unmap_count++;
@@ -1082,12 +1089,12 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb,
tmp = (__le16 *)&payload[2];
*tmp = cpu_to_le16(MWIFIEX_TYPE_DATA);
- if (mwifiex_map_pci_memory(adapter, skb, skb->len ,
+ if (mwifiex_map_pci_memory(adapter, skb, skb->len,
PCI_DMA_TODEVICE))
return -1;
wrindx = (card->txbd_wrptr & reg->tx_mask) >> reg->tx_start_ptr;
- MWIFIEX_SKB_PACB(skb, &buf_pa);
+ buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
card->tx_buf_list[wrindx] = skb;
if (reg->pfu_enabled) {
@@ -1162,8 +1169,7 @@ mwifiex_pcie_send_data(struct mwifiex_adapter *adapter, struct sk_buff *skb,
return -EINPROGRESS;
done_unmap:
- MWIFIEX_SKB_PACB(skb, &buf_pa);
- pci_unmap_single(card->dev, buf_pa, skb->len, PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
card->tx_buf_list[wrindx] = NULL;
if (reg->pfu_enabled)
memset(desc2, 0, sizeof(*desc2));
@@ -1217,9 +1223,7 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
if (!skb_data)
return -ENOMEM;
- MWIFIEX_SKB_PACB(skb_data, &buf_pa);
- pci_unmap_single(card->dev, buf_pa, MWIFIEX_RX_DATA_BUF_SIZE,
- PCI_DMA_FROMDEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb_data, PCI_DMA_FROMDEVICE);
card->rx_buf_list[rd_index] = NULL;
/* Get data length from interface header -
@@ -1246,7 +1250,7 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
PCI_DMA_FROMDEVICE))
return -1;
- MWIFIEX_SKB_PACB(skb_tmp, &buf_pa);
+ buf_pa = MWIFIEX_SKB_DMA_ADDR(skb_tmp);
dev_dbg(adapter->dev,
"RECV DATA: Attach new sk_buff %p at rxbd_rdidx=%d\n",
@@ -1322,7 +1326,7 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
if (mwifiex_map_pci_memory(adapter, skb, skb->len , PCI_DMA_TODEVICE))
return -1;
- MWIFIEX_SKB_PACB(skb, &buf_pa);
+ buf_pa = MWIFIEX_SKB_DMA_ADDR(skb);
/* Write the lower 32bits of the physical address to low command
* address scratch register
@@ -1331,8 +1335,7 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
dev_err(adapter->dev,
"%s: failed to write download command to boot code.\n",
__func__);
- pci_unmap_single(card->dev, buf_pa, MWIFIEX_UPLD_SIZE,
- PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
return -1;
}
@@ -1344,8 +1347,7 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
dev_err(adapter->dev,
"%s: failed to write download command to boot code.\n",
__func__);
- pci_unmap_single(card->dev, buf_pa, MWIFIEX_UPLD_SIZE,
- PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
return -1;
}
@@ -1354,8 +1356,7 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
dev_err(adapter->dev,
"%s: failed to write command len to cmd_size scratch reg\n",
__func__);
- pci_unmap_single(card->dev, buf_pa, MWIFIEX_UPLD_SIZE,
- PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
return -1;
}
@@ -1364,8 +1365,7 @@ mwifiex_pcie_send_boot_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
CPU_INTR_DOOR_BELL)) {
dev_err(adapter->dev,
"%s: failed to assert door-bell intr\n", __func__);
- pci_unmap_single(card->dev, buf_pa,
- MWIFIEX_UPLD_SIZE, PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
return -1;
}
@@ -1439,7 +1439,7 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
*/
if (card->cmdrsp_buf) {
- MWIFIEX_SKB_PACB(card->cmdrsp_buf, &cmdrsp_buf_pa);
+ cmdrsp_buf_pa = MWIFIEX_SKB_DMA_ADDR(card->cmdrsp_buf);
/* Write the lower 32bits of the cmdrsp buffer physical
address */
if (mwifiex_write_reg(adapter, reg->cmdrsp_addr_lo,
@@ -1460,7 +1460,7 @@ mwifiex_pcie_send_cmd(struct mwifiex_adapter *adapter, struct sk_buff *skb)
}
}
- MWIFIEX_SKB_PACB(card->cmd_buf, &cmd_buf_pa);
+ cmd_buf_pa = MWIFIEX_SKB_DMA_ADDR(card->cmd_buf);
/* Write the lower 32bits of the physical address to reg->cmd_addr_lo */
if (mwifiex_write_reg(adapter, reg->cmd_addr_lo,
(u32)cmd_buf_pa)) {
@@ -1514,13 +1514,17 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
int count = 0;
u16 rx_len;
__le16 pkt_len;
- dma_addr_t buf_pa;
dev_dbg(adapter->dev, "info: Rx CMD Response\n");
- MWIFIEX_SKB_PACB(skb, &buf_pa);
- pci_unmap_single(card->dev, buf_pa, MWIFIEX_UPLD_SIZE,
- PCI_DMA_FROMDEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_FROMDEVICE);
+
+ /* Unmap the command as a response has been received. */
+ if (card->cmd_buf) {
+ mwifiex_unmap_pci_memory(adapter, card->cmd_buf,
+ PCI_DMA_TODEVICE);
+ card->cmd_buf = NULL;
+ }
pkt_len = *((__le16 *)skb->data);
rx_len = le16_to_cpu(pkt_len);
@@ -1552,8 +1556,6 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE,
PCI_DMA_FROMDEVICE))
return -1;
-
- MWIFIEX_SKB_PACB(skb, &buf_pa);
} else if (mwifiex_pcie_ok_to_access_hw(adapter)) {
adapter->curr_cmd->resp_skb = skb;
adapter->cmd_resp_received = true;
@@ -1588,8 +1590,6 @@ static int mwifiex_pcie_cmdrsp_complete(struct mwifiex_adapter *adapter,
struct sk_buff *skb)
{
struct pcie_service_card *card = adapter->card;
- dma_addr_t buf_pa;
- struct sk_buff *skb_tmp;
if (skb) {
card->cmdrsp_buf = skb;
@@ -1599,14 +1599,6 @@ static int mwifiex_pcie_cmdrsp_complete(struct mwifiex_adapter *adapter,
return -1;
}
- skb_tmp = card->cmd_buf;
- if (skb_tmp) {
- MWIFIEX_SKB_PACB(skb_tmp, &buf_pa);
- pci_unmap_single(card->dev, buf_pa, skb_tmp->len,
- PCI_DMA_FROMDEVICE);
- card->cmd_buf = NULL;
- }
-
return 0;
}
@@ -1619,7 +1611,6 @@ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter)
const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
u32 rdptr = card->evtbd_rdptr & MWIFIEX_EVTBD_MASK;
u32 wrptr, event;
- dma_addr_t buf_pa;
struct mwifiex_evt_buf_desc *desc;
if (!mwifiex_pcie_ok_to_access_hw(adapter))
@@ -1655,9 +1646,7 @@ static int mwifiex_pcie_process_event_ready(struct mwifiex_adapter *adapter)
dev_dbg(adapter->dev, "info: Read Index: %d\n", rdptr);
skb_cmd = card->evt_buf_list[rdptr];
- MWIFIEX_SKB_PACB(skb_cmd, &buf_pa);
- pci_unmap_single(card->dev, buf_pa, MAX_EVENT_SIZE,
- PCI_DMA_FROMDEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb_cmd, PCI_DMA_FROMDEVICE);
/* Take the pointer and set it to event pointer in adapter
and will return back after event handling callback */
@@ -1703,7 +1692,6 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
int ret = 0;
u32 rdptr = card->evtbd_rdptr & MWIFIEX_EVTBD_MASK;
u32 wrptr;
- dma_addr_t buf_pa;
struct mwifiex_evt_buf_desc *desc;
if (!skb)
@@ -1728,11 +1716,9 @@ static int mwifiex_pcie_event_complete(struct mwifiex_adapter *adapter,
MAX_EVENT_SIZE,
PCI_DMA_FROMDEVICE))
return -1;
- MWIFIEX_SKB_PACB(skb, &buf_pa);
card->evt_buf_list[rdptr] = skb;
- MWIFIEX_SKB_PACB(skb, &buf_pa);
desc = card->evtbd_ring[rdptr];
- desc->paddr = buf_pa;
+ desc->paddr = MWIFIEX_SKB_DMA_ADDR(skb);
desc->len = (u16)skb->len;
desc->flags = 0;
skb = NULL;
@@ -1782,7 +1768,6 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
struct sk_buff *skb;
u32 txlen, tx_blocks = 0, tries, len;
u32 block_retry_cnt = 0;
- dma_addr_t buf_pa;
struct pcie_service_card *card = adapter->card;
const struct mwifiex_pcie_card_reg *reg = card->pcie.reg;
@@ -1880,8 +1865,6 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
goto done;
}
- MWIFIEX_SKB_PACB(skb, &buf_pa);
-
/* Wait for the command done interrupt */
do {
if (mwifiex_read_reg(adapter, PCIE_CPU_INT_STATUS,
@@ -1889,16 +1872,15 @@ static int mwifiex_prog_fw_w_helper(struct mwifiex_adapter *adapter,
dev_err(adapter->dev, "%s: Failed to read "
"interrupt status during fw dnld.\n",
__func__);
- pci_unmap_single(card->dev, buf_pa, skb->len,
- PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb,
+ PCI_DMA_TODEVICE);
ret = -1;
goto done;
}
} while ((ireg_intr & CPU_INTR_DOOR_BELL) ==
CPU_INTR_DOOR_BELL);
- pci_unmap_single(card->dev, buf_pa, skb->len,
- PCI_DMA_TODEVICE);
+ mwifiex_unmap_pci_memory(adapter, skb, PCI_DMA_TODEVICE);
offset += txlen;
} while (true);
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index 0a8a26e..92adbb1 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -595,7 +595,7 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
struct mwifiex_chan_scan_param_set *tmp_chan_list;
struct mwifiex_chan_scan_param_set *start_chan;
- u32 tlv_idx, rates_size;
+ u32 tlv_idx, rates_size, cmd_no;
u32 total_scan_time;
u32 done_early;
u8 radio_type;
@@ -733,9 +733,13 @@ mwifiex_scan_channel_list(struct mwifiex_private *priv,
/* Send the scan command to the firmware with the specified
cfg */
- ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11_SCAN,
- HostCmd_ACT_GEN_SET, 0,
- scan_cfg_out);
+ if (priv->adapter->ext_scan)
+ cmd_no = HostCmd_CMD_802_11_SCAN_EXT;
+ else
+ cmd_no = HostCmd_CMD_802_11_SCAN;
+
+ ret = mwifiex_send_cmd_async(priv, cmd_no, HostCmd_ACT_GEN_SET,
+ 0, scan_cfg_out);
/* rate IE is updated per scan command but same starting
* pointer is used each time so that rate IE from earlier
@@ -786,6 +790,7 @@ mwifiex_config_scan(struct mwifiex_private *priv,
struct mwifiex_adapter *adapter = priv->adapter;
struct mwifiex_ie_types_num_probes *num_probes_tlv;
struct mwifiex_ie_types_wildcard_ssid_params *wildcard_ssid_tlv;
+ struct mwifiex_ie_types_bssid_list *bssid_tlv;
u8 *tlv_pos;
u32 num_probes;
u32 ssid_len;
@@ -848,6 +853,17 @@ mwifiex_config_scan(struct mwifiex_private *priv,
user_scan_in->specific_bssid,
sizeof(scan_cfg_out->specific_bssid));
+ if (adapter->ext_scan &&
+ !is_zero_ether_addr(scan_cfg_out->specific_bssid)) {
+ bssid_tlv =
+ (struct mwifiex_ie_types_bssid_list *)tlv_pos;
+ bssid_tlv->header.type = cpu_to_le16(TLV_TYPE_BSSID);
+ bssid_tlv->header.len = cpu_to_le16(ETH_ALEN);
+ memcpy(bssid_tlv->bssid, user_scan_in->specific_bssid,
+ ETH_ALEN);
+ tlv_pos += sizeof(struct mwifiex_ie_types_bssid_list);
+ }
+
for (i = 0; i < user_scan_in->num_ssids; i++) {
ssid_len = user_scan_in->ssid_list[i].ssid_len;
@@ -941,7 +957,7 @@ mwifiex_config_scan(struct mwifiex_private *priv,
cpu_to_le16(sizeof(struct ieee80211_ht_cap));
radio_type =
mwifiex_band_to_radio_type(priv->adapter->config_bands);
- mwifiex_fill_cap_info(priv, radio_type, ht_cap);
+ mwifiex_fill_cap_info(priv, radio_type, &ht_cap->ht_cap);
tlv_pos += sizeof(struct mwifiex_ie_types_htcap);
}
@@ -1576,6 +1592,228 @@ done:
return 0;
}
+static int
+mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info,
+ u32 *bytes_left, u64 fw_tsf, u8 *radio_type,
+ bool ext_scan, s32 rssi_val)
+{
+ struct mwifiex_adapter *adapter = priv->adapter;
+ struct mwifiex_chan_freq_power *cfp;
+ struct cfg80211_bss *bss;
+ u8 bssid[ETH_ALEN];
+ s32 rssi;
+ const u8 *ie_buf;
+ size_t ie_len;
+ u16 channel = 0;
+ u16 beacon_size = 0;
+ u32 curr_bcn_bytes;
+ u32 freq;
+ u16 beacon_period;
+ u16 cap_info_bitmap;
+ u8 *current_ptr;
+ u64 timestamp;
+ struct mwifiex_fixed_bcn_param *bcn_param;
+ struct mwifiex_bss_priv *bss_priv;
+
+ if (*bytes_left >= sizeof(beacon_size)) {
+ /* Extract & convert beacon size from command buffer */
+ memcpy(&beacon_size, *bss_info, sizeof(beacon_size));
+ *bytes_left -= sizeof(beacon_size);
+ *bss_info += sizeof(beacon_size);
+ }
+
+ if (!beacon_size || beacon_size > *bytes_left) {
+ *bss_info += *bytes_left;
+ *bytes_left = 0;
+ return -EFAULT;
+ }
+
+ /* Initialize the current working beacon pointer for this BSS
+ * iteration
+ */
+ current_ptr = *bss_info;
+
+ /* Advance the return beacon pointer past the current beacon */
+ *bss_info += beacon_size;
+ *bytes_left -= beacon_size;
+
+ curr_bcn_bytes = beacon_size;
+
+ /* First 5 fields are bssid, RSSI(for legacy scan only),
+ * time stamp, beacon interval, and capability information
+ */
+ if (curr_bcn_bytes < ETH_ALEN + sizeof(u8) +
+ sizeof(struct mwifiex_fixed_bcn_param)) {
+ dev_err(adapter->dev, "InterpretIE: not enough bytes left\n");
+ return -EFAULT;
+ }
+
+ memcpy(bssid, current_ptr, ETH_ALEN);
+ current_ptr += ETH_ALEN;
+ curr_bcn_bytes -= ETH_ALEN;
+
+ if (!ext_scan) {
+ rssi = (s32) *(u8 *)current_ptr;
+ rssi = (-rssi) * 100; /* Convert dBm to mBm */
+ current_ptr += sizeof(u8);
+ curr_bcn_bytes -= sizeof(u8);
+ dev_dbg(adapter->dev, "info: InterpretIE: RSSI=%d\n", rssi);
+ } else {
+ rssi = rssi_val;
+ }
+
+ bcn_param = (struct mwifiex_fixed_bcn_param *)current_ptr;
+ current_ptr += sizeof(*bcn_param);
+ curr_bcn_bytes -= sizeof(*bcn_param);
+
+ timestamp = le64_to_cpu(bcn_param->timestamp);
+ beacon_period = le16_to_cpu(bcn_param->beacon_period);
+
+ cap_info_bitmap = le16_to_cpu(bcn_param->cap_info_bitmap);
+ dev_dbg(adapter->dev, "info: InterpretIE: capabilities=0x%X\n",
+ cap_info_bitmap);
+
+ /* Rest of the current buffer are IE's */
+ ie_buf = current_ptr;
+ ie_len = curr_bcn_bytes;
+ dev_dbg(adapter->dev, "info: InterpretIE: IELength for this AP = %d\n",
+ curr_bcn_bytes);
+
+ while (curr_bcn_bytes >= sizeof(struct ieee_types_header)) {
+ u8 element_id, element_len;
+
+ element_id = *current_ptr;
+ element_len = *(current_ptr + 1);
+ if (curr_bcn_bytes < element_len +
+ sizeof(struct ieee_types_header)) {
+ dev_err(adapter->dev,
+ "%s: bytes left < IE length\n", __func__);
+ return -EFAULT;
+ }
+ if (element_id == WLAN_EID_DS_PARAMS) {
+ channel = *(current_ptr +
+ sizeof(struct ieee_types_header));
+ break;
+ }
+
+ current_ptr += element_len + sizeof(struct ieee_types_header);
+ curr_bcn_bytes -= element_len +
+ sizeof(struct ieee_types_header);
+ }
+
+ if (channel) {
+ struct ieee80211_channel *chan;
+ u8 band;
+
+ /* Skip entry if on csa closed channel */
+ if (channel == priv->csa_chan) {
+ dev_dbg(adapter->dev,
+ "Dropping entry on csa closed channel\n");
+ return 0;
+ }
+
+ band = BAND_G;
+ if (radio_type)
+ band = mwifiex_radio_type_to_band(*radio_type &
+ (BIT(0) | BIT(1)));
+
+ cfp = mwifiex_get_cfp(priv, band, channel, 0);
+
+ freq = cfp ? cfp->freq : 0;
+
+ chan = ieee80211_get_channel(priv->wdev->wiphy, freq);
+
+ if (chan && !(chan->flags & IEEE80211_CHAN_DISABLED)) {
+ bss = cfg80211_inform_bss(priv->wdev->wiphy,
+ chan, bssid, timestamp,
+ cap_info_bitmap, beacon_period,
+ ie_buf, ie_len, rssi, GFP_KERNEL);
+ bss_priv = (struct mwifiex_bss_priv *)bss->priv;
+ bss_priv->band = band;
+ bss_priv->fw_tsf = fw_tsf;
+ if (priv->media_connected &&
+ !memcmp(bssid, priv->curr_bss_params.bss_descriptor
+ .mac_address, ETH_ALEN))
+ mwifiex_update_curr_bss_params(priv, bss);
+ cfg80211_put_bss(priv->wdev->wiphy, bss);
+ }
+ } else {
+ dev_dbg(adapter->dev, "missing BSS channel IE\n");
+ }
+
+ return 0;
+}
+
+static void mwifiex_check_next_scan_command(struct mwifiex_private *priv)
+{
+ struct mwifiex_adapter *adapter = priv->adapter;
+ struct cmd_ctrl_node *cmd_node;
+ unsigned long flags;
+
+ spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
+ if (list_empty(&adapter->scan_pending_q)) {
+ spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
+ spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
+ adapter->scan_processing = false;
+ spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
+
+ /* Need to indicate IOCTL complete */
+ if (adapter->curr_cmd->wait_q_enabled) {
+ adapter->cmd_wait_q.status = 0;
+ if (!priv->scan_request) {
+ dev_dbg(adapter->dev,
+ "complete internal scan\n");
+ mwifiex_complete_cmd(adapter,
+ adapter->curr_cmd);
+ }
+ }
+ if (priv->report_scan_result)
+ priv->report_scan_result = false;
+
+ if (priv->scan_request) {
+ dev_dbg(adapter->dev, "info: notifying scan done\n");
+ cfg80211_scan_done(priv->scan_request, 0);
+ priv->scan_request = NULL;
+ } else {
+ priv->scan_aborting = false;
+ dev_dbg(adapter->dev, "info: scan already aborted\n");
+ }
+ } else {
+ if ((priv->scan_aborting && !priv->scan_request) ||
+ priv->scan_block) {
+ spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
+ flags);
+ adapter->scan_delay_cnt = MWIFIEX_MAX_SCAN_DELAY_CNT;
+ mod_timer(&priv->scan_delay_timer, jiffies);
+ dev_dbg(priv->adapter->dev,
+ "info: %s: triggerring scan abort\n", __func__);
+ } else if (!mwifiex_wmm_lists_empty(adapter) &&
+ (priv->scan_request && (priv->scan_request->flags &
+ NL80211_SCAN_FLAG_LOW_PRIORITY))) {
+ spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
+ flags);
+ adapter->scan_delay_cnt = 1;
+ mod_timer(&priv->scan_delay_timer, jiffies +
+ msecs_to_jiffies(MWIFIEX_SCAN_DELAY_MSEC));
+ dev_dbg(priv->adapter->dev,
+ "info: %s: deferring scan\n", __func__);
+ } else {
+ /* Get scan command from scan_pending_q and put to
+ * cmd_pending_q
+ */
+ cmd_node = list_first_entry(&adapter->scan_pending_q,
+ struct cmd_ctrl_node, list);
+ list_del(&cmd_node->list);
+ spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
+ flags);
+ mwifiex_insert_cmd_to_pending_q(adapter, cmd_node,
+ true);
+ }
+ }
+
+ return;
+}
+
/*
* This function handles the command response of scan.
*
@@ -1600,7 +1838,6 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
{
int ret = 0;
struct mwifiex_adapter *adapter = priv->adapter;
- struct cmd_ctrl_node *cmd_node;
struct host_cmd_ds_802_11_scan_rsp *scan_rsp;
struct mwifiex_ie_types_data *tlv_data;
struct mwifiex_ie_types_tsf_timestamp *tsf_tlv;
@@ -1609,12 +1846,11 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
u32 bytes_left;
u32 idx;
u32 tlv_buf_size;
- struct mwifiex_chan_freq_power *cfp;
struct mwifiex_ie_types_chan_band_list_param_set *chan_band_tlv;
struct chan_band_param_set *chan_band;
u8 is_bgscan_resp;
- unsigned long flags;
- struct cfg80211_bss *bss;
+ __le64 fw_tsf = 0;
+ u8 *radio_type;
is_bgscan_resp = (le16_to_cpu(resp->command)
== HostCmd_CMD_802_11_BG_SCAN_QUERY);
@@ -1676,220 +1912,194 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
&chan_band_tlv);
for (idx = 0; idx < scan_rsp->number_of_sets && bytes_left; idx++) {
- u8 bssid[ETH_ALEN];
- s32 rssi;
- const u8 *ie_buf;
- size_t ie_len;
- u16 channel = 0;
- __le64 fw_tsf = 0;
- u16 beacon_size = 0;
- u32 curr_bcn_bytes;
- u32 freq;
- u16 beacon_period;
- u16 cap_info_bitmap;
- u8 *current_ptr;
- u64 timestamp;
- struct mwifiex_bcn_param *bcn_param;
- struct mwifiex_bss_priv *bss_priv;
-
- if (bytes_left >= sizeof(beacon_size)) {
- /* Extract & convert beacon size from command buffer */
- memcpy(&beacon_size, bss_info, sizeof(beacon_size));
- bytes_left -= sizeof(beacon_size);
- bss_info += sizeof(beacon_size);
- }
+ /*
+ * If the TSF TLV was appended to the scan results, save this
+ * entry's TSF value in the fw_tsf field. It is the firmware's
+ * TSF value at the time the beacon or probe response was
+ * received.
+ */
+ if (tsf_tlv)
+ memcpy(&fw_tsf, &tsf_tlv->tsf_data[idx * TSF_DATA_SIZE],
+ sizeof(fw_tsf));
- if (!beacon_size || beacon_size > bytes_left) {
- bss_info += bytes_left;
- bytes_left = 0;
- ret = -1;
- goto check_next_scan;
+ if (chan_band_tlv) {
+ chan_band = &chan_band_tlv->chan_band_param[idx];
+ radio_type = &chan_band->radio_type;
+ } else {
+ radio_type = NULL;
}
- /* Initialize the current working beacon pointer for this BSS
- * iteration */
- current_ptr = bss_info;
+ ret = mwifiex_parse_single_response_buf(priv, &bss_info,
+ &bytes_left,
+ le64_to_cpu(fw_tsf),
+ radio_type, false, 0);
+ if (ret)
+ goto check_next_scan;
+ }
- /* Advance the return beacon pointer past the current beacon */
- bss_info += beacon_size;
- bytes_left -= beacon_size;
+check_next_scan:
+ mwifiex_check_next_scan_command(priv);
+ return ret;
+}
- curr_bcn_bytes = beacon_size;
+/*
+ * This function prepares an extended scan command to be sent to the firmware
+ *
+ * This uses the scan command configuration sent to the command processing
+ * module in command preparation stage to configure a extended scan command
+ * structure to send to firmware.
+ */
+int mwifiex_cmd_802_11_scan_ext(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd,
+ void *data_buf)
+{
+ struct host_cmd_ds_802_11_scan_ext *ext_scan = &cmd->params.ext_scan;
+ struct mwifiex_scan_cmd_config *scan_cfg = data_buf;
- /*
- * First 5 fields are bssid, RSSI, time stamp, beacon interval,
- * and capability information
- */
- if (curr_bcn_bytes < sizeof(struct mwifiex_bcn_param)) {
- dev_err(adapter->dev,
- "InterpretIE: not enough bytes left\n");
- continue;
- }
- bcn_param = (struct mwifiex_bcn_param *)current_ptr;
- current_ptr += sizeof(*bcn_param);
- curr_bcn_bytes -= sizeof(*bcn_param);
+ memcpy(ext_scan->tlv_buffer, scan_cfg->tlv_buf, scan_cfg->tlv_buf_len);
- memcpy(bssid, bcn_param->bssid, ETH_ALEN);
+ cmd->command = cpu_to_le16(HostCmd_CMD_802_11_SCAN_EXT);
- rssi = (s32) bcn_param->rssi;
- rssi = (-rssi) * 100; /* Convert dBm to mBm */
- dev_dbg(adapter->dev, "info: InterpretIE: RSSI=%d\n", rssi);
+ /* Size is equal to the sizeof(fixed portions) + the TLV len + header */
+ cmd->size = cpu_to_le16((u16)(sizeof(ext_scan->reserved)
+ + scan_cfg->tlv_buf_len + S_DS_GEN));
- timestamp = le64_to_cpu(bcn_param->timestamp);
- beacon_period = le16_to_cpu(bcn_param->beacon_period);
+ return 0;
+}
- cap_info_bitmap = le16_to_cpu(bcn_param->cap_info_bitmap);
- dev_dbg(adapter->dev, "info: InterpretIE: capabilities=0x%X\n",
- cap_info_bitmap);
+/* This function handles the command response of extended scan */
+int mwifiex_ret_802_11_scan_ext(struct mwifiex_private *priv)
+{
+ dev_dbg(priv->adapter->dev, "info: EXT scan returns successfully\n");
+ return 0;
+}
- /* Rest of the current buffer are IE's */
- ie_buf = current_ptr;
- ie_len = curr_bcn_bytes;
- dev_dbg(adapter->dev,
- "info: InterpretIE: IELength for this AP = %d\n",
- curr_bcn_bytes);
+/* This function This function handles the event extended scan report. It
+ * parses extended scan results and informs to cfg80211 stack.
+ */
+int mwifiex_handle_event_ext_scan_report(struct mwifiex_private *priv,
+ void *buf)
+{
+ int ret = 0;
+ struct mwifiex_adapter *adapter = priv->adapter;
+ u8 *bss_info;
+ u32 bytes_left, bytes_left_for_tlv, idx;
+ u16 type, len;
+ struct mwifiex_ie_types_data *tlv;
+ struct mwifiex_ie_types_bss_scan_rsp *scan_rsp_tlv;
+ struct mwifiex_ie_types_bss_scan_info *scan_info_tlv;
+ u8 *radio_type;
+ u64 fw_tsf = 0;
+ s32 rssi = 0;
+ struct mwifiex_event_scan_result *event_scan = buf;
+ u8 num_of_set = event_scan->num_of_set;
+ u8 *scan_resp = buf + sizeof(struct mwifiex_event_scan_result);
+ u16 scan_resp_size = le16_to_cpu(event_scan->buf_size);
+
+ if (num_of_set > MWIFIEX_MAX_AP) {
+ dev_err(adapter->dev,
+ "EXT_SCAN: Invalid number of AP returned (%d)!!\n",
+ num_of_set);
+ ret = -1;
+ goto check_next_scan;
+ }
- while (curr_bcn_bytes >= sizeof(struct ieee_types_header)) {
- u8 element_id, element_len;
+ bytes_left = scan_resp_size;
+ dev_dbg(adapter->dev,
+ "EXT_SCAN: size %d, returned %d APs...",
+ scan_resp_size, num_of_set);
- element_id = *current_ptr;
- element_len = *(current_ptr + 1);
- if (curr_bcn_bytes < element_len +
- sizeof(struct ieee_types_header)) {
- dev_err(priv->adapter->dev,
- "%s: bytes left < IE length\n",
- __func__);
- goto check_next_scan;
- }
- if (element_id == WLAN_EID_DS_PARAMS) {
- channel = *(current_ptr + sizeof(struct ieee_types_header));
- break;
- }
+ tlv = (struct mwifiex_ie_types_data *)scan_resp;
- current_ptr += element_len +
- sizeof(struct ieee_types_header);
- curr_bcn_bytes -= element_len +
- sizeof(struct ieee_types_header);
+ for (idx = 0; idx < num_of_set && bytes_left; idx++) {
+ type = le16_to_cpu(tlv->header.type);
+ len = le16_to_cpu(tlv->header.len);
+ if (bytes_left < sizeof(struct mwifiex_ie_types_header) + len) {
+ dev_err(adapter->dev, "EXT_SCAN: Error bytes left < TLV length\n");
+ break;
}
+ scan_rsp_tlv = NULL;
+ scan_info_tlv = NULL;
+ bytes_left_for_tlv = bytes_left;
- /*
- * If the TSF TLV was appended to the scan results, save this
- * entry's TSF value in the fw_tsf field. It is the firmware's
- * TSF value at the time the beacon or probe response was
- * received.
+ /* BSS response TLV with beacon or probe response buffer
+ * at the initial position of each descriptor
*/
- if (tsf_tlv)
- memcpy(&fw_tsf, &tsf_tlv->tsf_data[idx * TSF_DATA_SIZE],
- sizeof(fw_tsf));
-
- if (channel) {
- struct ieee80211_channel *chan;
- u8 band;
+ if (type != TLV_TYPE_BSS_SCAN_RSP)
+ break;
- /* Skip entry if on csa closed channel */
- if (channel == priv->csa_chan) {
- dev_dbg(adapter->dev,
- "Dropping entry on csa closed channel\n");
+ bss_info = (u8 *)tlv;
+ scan_rsp_tlv = (struct mwifiex_ie_types_bss_scan_rsp *)tlv;
+ tlv = (struct mwifiex_ie_types_data *)(tlv->data + len);
+ bytes_left_for_tlv -=
+ (len + sizeof(struct mwifiex_ie_types_header));
+
+ while (bytes_left_for_tlv >=
+ sizeof(struct mwifiex_ie_types_header) &&
+ le16_to_cpu(tlv->header.type) != TLV_TYPE_BSS_SCAN_RSP) {
+ type = le16_to_cpu(tlv->header.type);
+ len = le16_to_cpu(tlv->header.len);
+ if (bytes_left_for_tlv <
+ sizeof(struct mwifiex_ie_types_header) + len) {
+ dev_err(adapter->dev,
+ "EXT_SCAN: Error in processing TLV, bytes left < TLV length\n");
+ scan_rsp_tlv = NULL;
+ bytes_left_for_tlv = 0;
continue;
}
-
- band = BAND_G;
- if (chan_band_tlv) {
- chan_band =
- &chan_band_tlv->chan_band_param[idx];
- band = mwifiex_radio_type_to_band(
- chan_band->radio_type
- & (BIT(0) | BIT(1)));
- }
-
- cfp = mwifiex_get_cfp(priv, band, channel, 0);
-
- freq = cfp ? cfp->freq : 0;
-
- chan = ieee80211_get_channel(priv->wdev->wiphy, freq);
-
- if (chan && !(chan->flags & IEEE80211_CHAN_DISABLED)) {
- bss = cfg80211_inform_bss(priv->wdev->wiphy,
- chan, bssid, timestamp,
- cap_info_bitmap, beacon_period,
- ie_buf, ie_len, rssi, GFP_KERNEL);
- bss_priv = (struct mwifiex_bss_priv *)bss->priv;
- bss_priv->band = band;
- bss_priv->fw_tsf = le64_to_cpu(fw_tsf);
- if (priv->media_connected &&
- !memcmp(bssid,
- priv->curr_bss_params.bss_descriptor
- .mac_address, ETH_ALEN))
- mwifiex_update_curr_bss_params(priv,
- bss);
- cfg80211_put_bss(priv->wdev->wiphy, bss);
+ switch (type) {
+ case TLV_TYPE_BSS_SCAN_INFO:
+ scan_info_tlv =
+ (struct mwifiex_ie_types_bss_scan_info *)tlv;
+ if (len !=
+ sizeof(struct mwifiex_ie_types_bss_scan_info) -
+ sizeof(struct mwifiex_ie_types_header)) {
+ bytes_left_for_tlv = 0;
+ continue;
+ }
+ break;
+ default:
+ break;
}
- } else {
- dev_dbg(adapter->dev, "missing BSS channel IE\n");
+ tlv = (struct mwifiex_ie_types_data *)(tlv->data + len);
+ bytes_left -=
+ (len + sizeof(struct mwifiex_ie_types_header));
+ bytes_left_for_tlv -=
+ (len + sizeof(struct mwifiex_ie_types_header));
}
- }
-check_next_scan:
- spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
- if (list_empty(&adapter->scan_pending_q)) {
- spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags);
- spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags);
- adapter->scan_processing = false;
- spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags);
+ if (!scan_rsp_tlv)
+ break;
- /* Need to indicate IOCTL complete */
- if (adapter->curr_cmd->wait_q_enabled) {
- adapter->cmd_wait_q.status = 0;
- if (!priv->scan_request) {
- dev_dbg(adapter->dev,
- "complete internal scan\n");
- mwifiex_complete_cmd(adapter,
- adapter->curr_cmd);
- }
- }
- if (priv->report_scan_result)
- priv->report_scan_result = false;
+ /* Advance pointer to the beacon buffer length and
+ * update the bytes count so that the function
+ * wlan_interpret_bss_desc_with_ie() can handle the
+ * scan buffer withut any change
+ */
+ bss_info += sizeof(u16);
+ bytes_left -= sizeof(u16);
- if (priv->scan_request) {
- dev_dbg(adapter->dev, "info: notifying scan done\n");
- cfg80211_scan_done(priv->scan_request, 0);
- priv->scan_request = NULL;
- } else {
- priv->scan_aborting = false;
- dev_dbg(adapter->dev, "info: scan already aborted\n");
- }
- } else {
- if ((priv->scan_aborting && !priv->scan_request) ||
- priv->scan_block) {
- spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
- flags);
- adapter->scan_delay_cnt = MWIFIEX_MAX_SCAN_DELAY_CNT;
- mod_timer(&priv->scan_delay_timer, jiffies);
- dev_dbg(priv->adapter->dev,
- "info: %s: triggerring scan abort\n", __func__);
- } else if (!mwifiex_wmm_lists_empty(adapter) &&
- (priv->scan_request && (priv->scan_request->flags &
- NL80211_SCAN_FLAG_LOW_PRIORITY))) {
- spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
- flags);
- adapter->scan_delay_cnt = 1;
- mod_timer(&priv->scan_delay_timer, jiffies +
- msecs_to_jiffies(MWIFIEX_SCAN_DELAY_MSEC));
- dev_dbg(priv->adapter->dev,
- "info: %s: deferring scan\n", __func__);
+ if (scan_info_tlv) {
+ rssi = (s32)(s16)(le16_to_cpu(scan_info_tlv->rssi));
+ rssi *= 100; /* Convert dBm to mBm */
+ dev_dbg(adapter->dev,
+ "info: InterpretIE: RSSI=%d\n", rssi);
+ fw_tsf = le64_to_cpu(scan_info_tlv->tsf);
+ radio_type = &scan_info_tlv->radio_type;
} else {
- /* Get scan command from scan_pending_q and put to
- cmd_pending_q */
- cmd_node = list_first_entry(&adapter->scan_pending_q,
- struct cmd_ctrl_node, list);
- list_del(&cmd_node->list);
- spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
- flags);
- mwifiex_insert_cmd_to_pending_q(adapter, cmd_node,
- true);
+ radio_type = NULL;
}
+ ret = mwifiex_parse_single_response_buf(priv, &bss_info,
+ &bytes_left, fw_tsf,
+ radio_type, true, rssi);
+ if (ret)
+ goto check_next_scan;
}
+check_next_scan:
+ if (!event_scan->more_event)
+ mwifiex_check_next_scan_command(priv);
+
return ret;
}
diff --git a/drivers/net/wireless/mwifiex/sta_cmd.c b/drivers/net/wireless/mwifiex/sta_cmd.c
index 9208a88..5aa3d39 100644
--- a/drivers/net/wireless/mwifiex/sta_cmd.c
+++ b/drivers/net/wireless/mwifiex/sta_cmd.c
@@ -532,8 +532,228 @@ mwifiex_set_keyparamset_wep(struct mwifiex_private *priv,
return 0;
}
+/* This function populates key material v2 command
+ * to set network key for AES & CMAC AES.
+ */
+static int mwifiex_set_aes_key_v2(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd,
+ struct mwifiex_ds_encrypt_key *enc_key,
+ struct host_cmd_ds_802_11_key_material_v2 *km)
+{
+ struct mwifiex_adapter *adapter = priv->adapter;
+ u16 size, len = KEY_PARAMS_FIXED_LEN;
+
+ if (enc_key->is_igtk_key) {
+ dev_dbg(adapter->dev, "%s: Set CMAC AES Key\n", __func__);
+ if (enc_key->is_rx_seq_valid)
+ memcpy(km->key_param_set.key_params.cmac_aes.ipn,
+ enc_key->pn, enc_key->pn_len);
+ km->key_param_set.key_info &= cpu_to_le16(~KEY_MCAST);
+ km->key_param_set.key_info |= cpu_to_le16(KEY_IGTK);
+ km->key_param_set.key_type = KEY_TYPE_ID_AES_CMAC;
+ km->key_param_set.key_params.cmac_aes.key_len =
+ cpu_to_le16(enc_key->key_len);
+ memcpy(km->key_param_set.key_params.cmac_aes.key,
+ enc_key->key_material, enc_key->key_len);
+ len += sizeof(struct mwifiex_cmac_aes_param);
+ } else {
+ dev_dbg(adapter->dev, "%s: Set AES Key\n", __func__);
+ if (enc_key->is_rx_seq_valid)
+ memcpy(km->key_param_set.key_params.aes.pn,
+ enc_key->pn, enc_key->pn_len);
+ km->key_param_set.key_type = KEY_TYPE_ID_AES;
+ km->key_param_set.key_params.aes.key_len =
+ cpu_to_le16(enc_key->key_len);
+ memcpy(km->key_param_set.key_params.aes.key,
+ enc_key->key_material, enc_key->key_len);
+ len += sizeof(struct mwifiex_aes_param);
+ }
+
+ km->key_param_set.len = cpu_to_le16(len);
+ size = len + sizeof(struct mwifiex_ie_types_header) +
+ sizeof(km->action) + S_DS_GEN;
+ cmd->size = cpu_to_le16(size);
+
+ return 0;
+}
+
+/* This function prepares command to set/get/reset network key(s).
+ * This function prepares key material command for V2 format.
+ * Preparation includes -
+ * - Setting command ID, action and proper size
+ * - Setting WEP keys, WAPI keys or WPA keys along with required
+ * encryption (TKIP, AES) (as required)
+ * - Ensuring correct endian-ness
+ */
+static int
+mwifiex_cmd_802_11_key_material_v2(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd,
+ u16 cmd_action, u32 cmd_oid,
+ struct mwifiex_ds_encrypt_key *enc_key)
+{
+ struct mwifiex_adapter *adapter = priv->adapter;
+ u8 *mac = enc_key->mac_addr;
+ u16 key_info, len = KEY_PARAMS_FIXED_LEN;
+ struct host_cmd_ds_802_11_key_material_v2 *km =
+ &cmd->params.key_material_v2;
+
+ cmd->command = cpu_to_le16(HostCmd_CMD_802_11_KEY_MATERIAL);
+ km->action = cpu_to_le16(cmd_action);
+
+ if (cmd_action == HostCmd_ACT_GEN_GET) {
+ dev_dbg(adapter->dev, "%s: Get key\n", __func__);
+ km->key_param_set.key_idx =
+ enc_key->key_index & KEY_INDEX_MASK;
+ km->key_param_set.type = cpu_to_le16(TLV_TYPE_KEY_PARAM_V2);
+ km->key_param_set.len = cpu_to_le16(KEY_PARAMS_FIXED_LEN);
+ memcpy(km->key_param_set.mac_addr, mac, ETH_ALEN);
+
+ if (enc_key->key_index & MWIFIEX_KEY_INDEX_UNICAST)
+ key_info = KEY_UNICAST;
+ else
+ key_info = KEY_MCAST;
+
+ if (enc_key->is_igtk_key)
+ key_info |= KEY_IGTK;
+
+ km->key_param_set.key_info = cpu_to_le16(key_info);
+
+ cmd->size = cpu_to_le16(sizeof(struct mwifiex_ie_types_header) +
+ S_DS_GEN + KEY_PARAMS_FIXED_LEN +
+ sizeof(km->action));
+ return 0;
+ }
+
+ memset(&km->key_param_set, 0,
+ sizeof(struct mwifiex_ie_type_key_param_set_v2));
+
+ if (enc_key->key_disable) {
+ dev_dbg(adapter->dev, "%s: Remove key\n", __func__);
+ km->action = cpu_to_le16(HostCmd_ACT_GEN_REMOVE);
+ km->key_param_set.type = cpu_to_le16(TLV_TYPE_KEY_PARAM_V2);
+ km->key_param_set.len = cpu_to_le16(KEY_PARAMS_FIXED_LEN);
+ km->key_param_set.key_idx = enc_key->key_index & KEY_INDEX_MASK;
+ key_info = KEY_MCAST | KEY_UNICAST;
+ km->key_param_set.key_info = cpu_to_le16(key_info);
+ memcpy(km->key_param_set.mac_addr, mac, ETH_ALEN);
+ cmd->size = cpu_to_le16(sizeof(struct mwifiex_ie_types_header) +
+ S_DS_GEN + KEY_PARAMS_FIXED_LEN +
+ sizeof(km->action));
+ return 0;
+ }
+
+ km->action = cpu_to_le16(HostCmd_ACT_GEN_SET);
+ km->key_param_set.key_idx = enc_key->key_index & KEY_INDEX_MASK;
+ km->key_param_set.type = cpu_to_le16(TLV_TYPE_KEY_PARAM_V2);
+ key_info = KEY_ENABLED;
+ memcpy(km->key_param_set.mac_addr, mac, ETH_ALEN);
+
+ if (enc_key->key_len <= WLAN_KEY_LEN_WEP104) {
+ dev_dbg(adapter->dev, "%s: Set WEP Key\n", __func__);
+ len += sizeof(struct mwifiex_wep_param);
+ km->key_param_set.len = cpu_to_le16(len);
+ km->key_param_set.key_type = KEY_TYPE_ID_WEP;
+
+ if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) {
+ key_info |= KEY_MCAST | KEY_UNICAST;
+ } else {
+ if (enc_key->is_current_wep_key) {
+ key_info |= KEY_MCAST | KEY_UNICAST;
+ if (km->key_param_set.key_idx ==
+ (priv->wep_key_curr_index & KEY_INDEX_MASK))
+ key_info |= KEY_DEFAULT;
+ } else {
+ if (mac) {
+ if (is_broadcast_ether_addr(mac))
+ key_info |= KEY_MCAST;
+ else
+ key_info |= KEY_UNICAST |
+ KEY_DEFAULT;
+ } else {
+ key_info |= KEY_MCAST;
+ }
+ }
+ }
+ km->key_param_set.key_info = cpu_to_le16(key_info);
+
+ km->key_param_set.key_params.wep.key_len =
+ cpu_to_le16(enc_key->key_len);
+ memcpy(km->key_param_set.key_params.wep.key,
+ enc_key->key_material, enc_key->key_len);
+
+ cmd->size = cpu_to_le16(sizeof(struct mwifiex_ie_types_header) +
+ len + sizeof(km->action) + S_DS_GEN);
+ return 0;
+ }
+
+ if (is_broadcast_ether_addr(mac))
+ key_info |= KEY_MCAST | KEY_RX_KEY;
+ else
+ key_info |= KEY_UNICAST | KEY_TX_KEY | KEY_RX_KEY;
+
+ if (enc_key->is_wapi_key) {
+ dev_dbg(adapter->dev, "%s: Set WAPI Key\n", __func__);
+ km->key_param_set.key_type = KEY_TYPE_ID_WAPI;
+ memcpy(km->key_param_set.key_params.wapi.pn, enc_key->pn,
+ PN_LEN);
+ km->key_param_set.key_params.wapi.key_len =
+ cpu_to_le16(enc_key->key_len);
+ memcpy(km->key_param_set.key_params.wapi.key,
+ enc_key->key_material, enc_key->key_len);
+ if (is_broadcast_ether_addr(mac))
+ priv->sec_info.wapi_key_on = true;
+
+ if (!priv->sec_info.wapi_key_on)
+ key_info |= KEY_DEFAULT;
+ km->key_param_set.key_info = cpu_to_le16(key_info);
+
+ len += sizeof(struct mwifiex_wapi_param);
+ km->key_param_set.len = cpu_to_le16(len);
+ cmd->size = cpu_to_le16(sizeof(struct mwifiex_ie_types_header) +
+ len + sizeof(km->action) + S_DS_GEN);
+ return 0;
+ }
+
+ if (priv->bss_mode == NL80211_IFTYPE_ADHOC) {
+ key_info |= KEY_DEFAULT;
+ /* Enable unicast bit for WPA-NONE/ADHOC_AES */
+ if (!priv->sec_info.wpa2_enabled &&
+ !is_broadcast_ether_addr(mac))
+ key_info |= KEY_UNICAST;
+ } else {
+ /* Enable default key for WPA/WPA2 */
+ if (!priv->wpa_is_gtk_set)
+ key_info |= KEY_DEFAULT;
+ }
+
+ km->key_param_set.key_info = cpu_to_le16(key_info);
+
+ if (enc_key->key_len == WLAN_KEY_LEN_CCMP)
+ return mwifiex_set_aes_key_v2(priv, cmd, enc_key, km);
+
+ if (enc_key->key_len == WLAN_KEY_LEN_TKIP) {
+ dev_dbg(adapter->dev, "%s: Set TKIP Key\n", __func__);
+ if (enc_key->is_rx_seq_valid)
+ memcpy(km->key_param_set.key_params.tkip.pn,
+ enc_key->pn, enc_key->pn_len);
+ km->key_param_set.key_type = KEY_TYPE_ID_TKIP;
+ km->key_param_set.key_params.tkip.key_len =
+ cpu_to_le16(enc_key->key_len);
+ memcpy(km->key_param_set.key_params.tkip.key,
+ enc_key->key_material, enc_key->key_len);
+
+ len += sizeof(struct mwifiex_tkip_param);
+ km->key_param_set.len = cpu_to_le16(len);
+ cmd->size = cpu_to_le16(sizeof(struct mwifiex_ie_types_header) +
+ len + sizeof(km->action) + S_DS_GEN);
+ }
+
+ return 0;
+}
+
/*
* This function prepares command to set/get/reset network key(s).
+ * This function prepares key material command for V1 format.
*
* Preparation includes -
* - Setting command ID, action and proper size
@@ -542,10 +762,10 @@ mwifiex_set_keyparamset_wep(struct mwifiex_private *priv,
* - Ensuring correct endian-ness
*/
static int
-mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
- struct host_cmd_ds_command *cmd,
- u16 cmd_action, u32 cmd_oid,
- struct mwifiex_ds_encrypt_key *enc_key)
+mwifiex_cmd_802_11_key_material_v1(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd,
+ u16 cmd_action, u32 cmd_oid,
+ struct mwifiex_ds_encrypt_key *enc_key)
{
struct host_cmd_ds_802_11_key_material *key_material =
&cmd->params.key_material;
@@ -724,6 +944,24 @@ mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
return ret;
}
+/* Wrapper function for setting network key depending upon FW KEY API version */
+static int
+mwifiex_cmd_802_11_key_material(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd,
+ u16 cmd_action, u32 cmd_oid,
+ struct mwifiex_ds_encrypt_key *enc_key)
+{
+ if (priv->adapter->fw_key_api_major_ver == FW_KEY_API_VER_MAJOR_V2)
+ return mwifiex_cmd_802_11_key_material_v2(priv, cmd,
+ cmd_action, cmd_oid,
+ enc_key);
+
+ else
+ return mwifiex_cmd_802_11_key_material_v1(priv, cmd,
+ cmd_action, cmd_oid,
+ enc_key);
+}
+
/*
* This function prepares command to set/get 11d domain information.
*
@@ -1280,6 +1518,127 @@ mwifiex_cmd_coalesce_cfg(struct mwifiex_private *priv,
return 0;
}
+static int
+mwifiex_cmd_tdls_oper(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *cmd,
+ void *data_buf)
+{
+ struct host_cmd_ds_tdls_oper *tdls_oper = &cmd->params.tdls_oper;
+ struct mwifiex_ds_tdls_oper *oper = data_buf;
+ struct mwifiex_sta_node *sta_ptr;
+ struct host_cmd_tlv_rates *tlv_rates;
+ struct mwifiex_ie_types_htcap *ht_capab;
+ struct mwifiex_ie_types_qos_info *wmm_qos_info;
+ struct mwifiex_ie_types_extcap *extcap;
+ struct mwifiex_ie_types_vhtcap *vht_capab;
+ struct mwifiex_ie_types_aid *aid;
+ u8 *pos, qos_info;
+ u16 config_len = 0;
+ struct station_parameters *params = priv->sta_params;
+
+ cmd->command = cpu_to_le16(HostCmd_CMD_TDLS_OPER);
+ cmd->size = cpu_to_le16(S_DS_GEN);
+ le16_add_cpu(&cmd->size, sizeof(struct host_cmd_ds_tdls_oper));
+
+ tdls_oper->reason = 0;
+ memcpy(tdls_oper->peer_mac, oper->peer_mac, ETH_ALEN);
+ sta_ptr = mwifiex_get_sta_entry(priv, oper->peer_mac);
+
+ pos = (u8 *)tdls_oper + sizeof(struct host_cmd_ds_tdls_oper);
+
+ switch (oper->tdls_action) {
+ case MWIFIEX_TDLS_DISABLE_LINK:
+ tdls_oper->tdls_action = cpu_to_le16(ACT_TDLS_DELETE);
+ break;
+ case MWIFIEX_TDLS_CREATE_LINK:
+ tdls_oper->tdls_action = cpu_to_le16(ACT_TDLS_CREATE);
+ break;
+ case MWIFIEX_TDLS_CONFIG_LINK:
+ tdls_oper->tdls_action = cpu_to_le16(ACT_TDLS_CONFIG);
+
+ if (!params) {
+ dev_err(priv->adapter->dev,
+ "TDLS config params not available for %pM\n",
+ oper->peer_mac);
+ return -ENODATA;
+ }
+
+ *(__le16 *)pos = cpu_to_le16(params->capability);
+ config_len += sizeof(params->capability);
+
+ qos_info = params->uapsd_queues | (params->max_sp << 5);
+ wmm_qos_info = (struct mwifiex_ie_types_qos_info *)(pos +
+ config_len);
+ wmm_qos_info->header.type = cpu_to_le16(WLAN_EID_QOS_CAPA);
+ wmm_qos_info->header.len = cpu_to_le16(sizeof(qos_info));
+ wmm_qos_info->qos_info = qos_info;
+ config_len += sizeof(struct mwifiex_ie_types_qos_info);
+
+ if (params->ht_capa) {
+ ht_capab = (struct mwifiex_ie_types_htcap *)(pos +
+ config_len);
+ ht_capab->header.type =
+ cpu_to_le16(WLAN_EID_HT_CAPABILITY);
+ ht_capab->header.len =
+ cpu_to_le16(sizeof(struct ieee80211_ht_cap));
+ memcpy(&ht_capab->ht_cap, params->ht_capa,
+ sizeof(struct ieee80211_ht_cap));
+ config_len += sizeof(struct mwifiex_ie_types_htcap);
+ }
+
+ if (params->supported_rates && params->supported_rates_len) {
+ tlv_rates = (struct host_cmd_tlv_rates *)(pos +
+ config_len);
+ tlv_rates->header.type =
+ cpu_to_le16(WLAN_EID_SUPP_RATES);
+ tlv_rates->header.len =
+ cpu_to_le16(params->supported_rates_len);
+ memcpy(tlv_rates->rates, params->supported_rates,
+ params->supported_rates_len);
+ config_len += sizeof(struct host_cmd_tlv_rates) +
+ params->supported_rates_len;
+ }
+
+ if (params->ext_capab && params->ext_capab_len) {
+ extcap = (struct mwifiex_ie_types_extcap *)(pos +
+ config_len);
+ extcap->header.type =
+ cpu_to_le16(WLAN_EID_EXT_CAPABILITY);
+ extcap->header.len = cpu_to_le16(params->ext_capab_len);
+ memcpy(extcap->ext_capab, params->ext_capab,
+ params->ext_capab_len);
+ config_len += sizeof(struct mwifiex_ie_types_extcap) +
+ params->ext_capab_len;
+ }
+ if (params->vht_capa) {
+ vht_capab = (struct mwifiex_ie_types_vhtcap *)(pos +
+ config_len);
+ vht_capab->header.type =
+ cpu_to_le16(WLAN_EID_VHT_CAPABILITY);
+ vht_capab->header.len =
+ cpu_to_le16(sizeof(struct ieee80211_vht_cap));
+ memcpy(&vht_capab->vht_cap, params->vht_capa,
+ sizeof(struct ieee80211_vht_cap));
+ config_len += sizeof(struct mwifiex_ie_types_vhtcap);
+ }
+ if (params->aid) {
+ aid = (struct mwifiex_ie_types_aid *)(pos + config_len);
+ aid->header.type = cpu_to_le16(WLAN_EID_AID);
+ aid->header.len = cpu_to_le16(sizeof(params->aid));
+ aid->aid = cpu_to_le16(params->aid);
+ config_len += sizeof(struct mwifiex_ie_types_aid);
+ }
+
+ break;
+ default:
+ dev_err(priv->adapter->dev, "Unknown TDLS operation\n");
+ return -ENOTSUPP;
+ }
+
+ le16_add_cpu(&cmd->size, config_len);
+
+ return 0;
+}
/*
* This function prepares the commands before sending them to the firmware.
*
@@ -1472,6 +1831,9 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
ret = mwifiex_cmd_ibss_coalescing_status(cmd_ptr, cmd_action,
data_buf);
break;
+ case HostCmd_CMD_802_11_SCAN_EXT:
+ ret = mwifiex_cmd_802_11_scan_ext(priv, cmd_ptr, data_buf);
+ break;
case HostCmd_CMD_MAC_REG_ACCESS:
case HostCmd_CMD_BBP_REG_ACCESS:
case HostCmd_CMD_RF_REG_ACCESS:
@@ -1507,6 +1869,9 @@ int mwifiex_sta_prepare_cmd(struct mwifiex_private *priv, uint16_t cmd_no,
ret = mwifiex_cmd_coalesce_cfg(priv, cmd_ptr, cmd_action,
data_buf);
break;
+ case HostCmd_CMD_TDLS_OPER:
+ ret = mwifiex_cmd_tdls_oper(priv, cmd_ptr, data_buf);
+ break;
default:
dev_err(priv->adapter->dev,
"PREP_CMD: unknown cmd- %#x\n", cmd_no);
diff --git a/drivers/net/wireless/mwifiex/sta_cmdresp.c b/drivers/net/wireless/mwifiex/sta_cmdresp.c
index 24523e4..1c5e188 100644
--- a/drivers/net/wireless/mwifiex/sta_cmdresp.c
+++ b/drivers/net/wireless/mwifiex/sta_cmdresp.c
@@ -69,6 +69,7 @@ mwifiex_process_cmdresp_error(struct mwifiex_private *priv,
break;
case HostCmd_CMD_802_11_SCAN:
+ case HostCmd_CMD_802_11_SCAN_EXT:
/* Cancel all pending scan command */
spin_lock_irqsave(&adapter->scan_pending_q_lock, flags);
list_for_each_entry_safe(cmd_node, tmp_node,
@@ -561,13 +562,13 @@ static int mwifiex_ret_802_11_ad_hoc_stop(struct mwifiex_private *priv,
}
/*
- * This function handles the command response of set/get key material.
+ * This function handles the command response of set/get v1 key material.
*
* Handling includes updating the driver parameters to reflect the
* changes.
*/
-static int mwifiex_ret_802_11_key_material(struct mwifiex_private *priv,
- struct host_cmd_ds_command *resp)
+static int mwifiex_ret_802_11_key_material_v1(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *resp)
{
struct host_cmd_ds_802_11_key_material *key =
&resp->params.key_material;
@@ -590,6 +591,51 @@ static int mwifiex_ret_802_11_key_material(struct mwifiex_private *priv,
}
/*
+ * This function handles the command response of set/get v2 key material.
+ *
+ * Handling includes updating the driver parameters to reflect the
+ * changes.
+ */
+static int mwifiex_ret_802_11_key_material_v2(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *resp)
+{
+ struct host_cmd_ds_802_11_key_material_v2 *key_v2;
+ __le16 len;
+
+ key_v2 = &resp->params.key_material_v2;
+ if (le16_to_cpu(key_v2->action) == HostCmd_ACT_GEN_SET) {
+ if ((le16_to_cpu(key_v2->key_param_set.key_info) & KEY_MCAST)) {
+ dev_dbg(priv->adapter->dev, "info: key: GTK is set\n");
+ priv->wpa_is_gtk_set = true;
+ priv->scan_block = false;
+ }
+ }
+
+ if (key_v2->key_param_set.key_type != KEY_TYPE_ID_AES)
+ return 0;
+
+ memset(priv->aes_key_v2.key_param_set.key_params.aes.key, 0,
+ WLAN_KEY_LEN_CCMP);
+ priv->aes_key_v2.key_param_set.key_params.aes.key_len =
+ key_v2->key_param_set.key_params.aes.key_len;
+ len = priv->aes_key_v2.key_param_set.key_params.aes.key_len;
+ memcpy(priv->aes_key_v2.key_param_set.key_params.aes.key,
+ key_v2->key_param_set.key_params.aes.key, le16_to_cpu(len));
+
+ return 0;
+}
+
+/* Wrapper function for processing response of key material command */
+static int mwifiex_ret_802_11_key_material(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *resp)
+{
+ if (priv->adapter->fw_key_api_major_ver == FW_KEY_API_VER_MAJOR_V2)
+ return mwifiex_ret_802_11_key_material_v2(priv, resp);
+ else
+ return mwifiex_ret_802_11_key_material_v1(priv, resp);
+}
+
+/*
* This function handles the command response of get 11d domain information.
*/
static int mwifiex_ret_802_11d_domain_info(struct mwifiex_private *priv,
@@ -800,7 +846,60 @@ static int mwifiex_ret_ibss_coalescing_status(struct mwifiex_private *priv,
return 0;
}
+static int mwifiex_ret_tdls_oper(struct mwifiex_private *priv,
+ struct host_cmd_ds_command *resp)
+{
+ struct host_cmd_ds_tdls_oper *cmd_tdls_oper = &resp->params.tdls_oper;
+ u16 reason = le16_to_cpu(cmd_tdls_oper->reason);
+ u16 action = le16_to_cpu(cmd_tdls_oper->tdls_action);
+ struct mwifiex_sta_node *node =
+ mwifiex_get_sta_entry(priv, cmd_tdls_oper->peer_mac);
+ switch (action) {
+ case ACT_TDLS_DELETE:
+ if (reason)
+ dev_err(priv->adapter->dev,
+ "TDLS link delete for %pM failed: reason %d\n",
+ cmd_tdls_oper->peer_mac, reason);
+ else
+ dev_dbg(priv->adapter->dev,
+ "TDLS link config for %pM successful\n",
+ cmd_tdls_oper->peer_mac);
+ break;
+ case ACT_TDLS_CREATE:
+ if (reason) {
+ dev_err(priv->adapter->dev,
+ "TDLS link creation for %pM failed: reason %d",
+ cmd_tdls_oper->peer_mac, reason);
+ if (node && reason != TDLS_ERR_LINK_EXISTS)
+ node->tdls_status = TDLS_SETUP_FAILURE;
+ } else {
+ dev_dbg(priv->adapter->dev,
+ "TDLS link creation for %pM successful",
+ cmd_tdls_oper->peer_mac);
+ }
+ break;
+ case ACT_TDLS_CONFIG:
+ if (reason) {
+ dev_err(priv->adapter->dev,
+ "TDLS link config for %pM failed, reason %d\n",
+ cmd_tdls_oper->peer_mac, reason);
+ if (node)
+ node->tdls_status = TDLS_SETUP_FAILURE;
+ } else {
+ dev_dbg(priv->adapter->dev,
+ "TDLS link config for %pM successful\n",
+ cmd_tdls_oper->peer_mac);
+ }
+ break;
+ default:
+ dev_err(priv->adapter->dev,
+ "Unknown TDLS command action respnse %d", action);
+ return -1;
+ }
+
+ return 0;
+}
/*
* This function handles the command response for subscribe event command.
*/
@@ -871,6 +970,10 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
ret = mwifiex_ret_802_11_scan(priv, resp);
adapter->curr_cmd->wait_q_enabled = false;
break;
+ case HostCmd_CMD_802_11_SCAN_EXT:
+ ret = mwifiex_ret_802_11_scan_ext(priv);
+ adapter->curr_cmd->wait_q_enabled = false;
+ break;
case HostCmd_CMD_802_11_BG_SCAN_QUERY:
ret = mwifiex_ret_802_11_scan(priv, resp);
dev_dbg(adapter->dev,
@@ -999,6 +1102,9 @@ int mwifiex_process_sta_cmdresp(struct mwifiex_private *priv, u16 cmdresp_no,
break;
case HostCmd_CMD_COALESCE_CFG:
break;
+ case HostCmd_CMD_TDLS_OPER:
+ ret = mwifiex_ret_tdls_oper(priv, resp);
+ break;
default:
dev_err(adapter->dev, "CMD_RESP: unknown cmd response %#x\n",
resp->command);
diff --git a/drivers/net/wireless/mwifiex/sta_event.c b/drivers/net/wireless/mwifiex/sta_event.c
index 8c351f7..92ff7b3 100644
--- a/drivers/net/wireless/mwifiex/sta_event.c
+++ b/drivers/net/wireless/mwifiex/sta_event.c
@@ -54,6 +54,10 @@ mwifiex_reset_connect_state(struct mwifiex_private *priv, u16 reason_code)
priv->scan_block = false;
+ if ((GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA) &&
+ ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info))
+ mwifiex_disable_all_tdls_links(priv);
+
/* Free Tx and Rx packets, report disconnect to upper layer */
mwifiex_clean_txrx(priv);
@@ -331,6 +335,14 @@ int mwifiex_process_sta_event(struct mwifiex_private *priv)
dev_dbg(adapter->dev, "event: PORT RELEASE\n");
break;
+ case EVENT_EXT_SCAN_REPORT:
+ dev_dbg(adapter->dev, "event: EXT_SCAN Report\n");
+ if (adapter->ext_scan)
+ ret = mwifiex_handle_event_ext_scan_report(priv,
+ adapter->event_skb->data);
+
+ break;
+
case EVENT_WMM_STATUS_CHANGE:
dev_dbg(adapter->dev, "event: WMM status changed\n");
ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_WMM_GET_STATUS,
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index c5cb2ed..b393d55 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -290,7 +290,7 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
if (mwifiex_band_to_radio_type(bss_desc->bss_band) ==
HostCmd_SCAN_RADIO_TYPE_BG)
- config_bands = BAND_B | BAND_G | BAND_GN | BAND_GAC;
+ config_bands = BAND_B | BAND_G | BAND_GN;
else
config_bands = BAND_A | BAND_AN | BAND_AAC;
@@ -865,6 +865,7 @@ static int mwifiex_sec_ioctl_set_wapi_key(struct mwifiex_private *priv,
static int mwifiex_sec_ioctl_set_wep_key(struct mwifiex_private *priv,
struct mwifiex_ds_encrypt_key *encrypt_key)
{
+ struct mwifiex_adapter *adapter = priv->adapter;
int ret;
struct mwifiex_wep_key *wep_key;
int index;
@@ -879,10 +880,17 @@ static int mwifiex_sec_ioctl_set_wep_key(struct mwifiex_private *priv,
/* Copy the required key as the current key */
wep_key = &priv->wep_key[index];
if (!wep_key->key_length) {
- dev_err(priv->adapter->dev,
+ dev_err(adapter->dev,
"key not set, so cannot enable it\n");
return -1;
}
+
+ if (adapter->fw_key_api_major_ver == FW_KEY_API_VER_MAJOR_V2) {
+ memcpy(encrypt_key->key_material,
+ wep_key->key_material, wep_key->key_length);
+ encrypt_key->key_len = wep_key->key_length;
+ }
+
priv->wep_key_curr_index = (u16) index;
priv->sec_info.wep_enabled = 1;
} else {
@@ -897,13 +905,25 @@ static int mwifiex_sec_ioctl_set_wep_key(struct mwifiex_private *priv,
priv->sec_info.wep_enabled = 1;
}
if (wep_key->key_length) {
+ void *enc_key;
+
+ if (encrypt_key->key_disable)
+ memset(&priv->wep_key[index], 0,
+ sizeof(struct mwifiex_wep_key));
+
+ if (adapter->fw_key_api_major_ver == FW_KEY_API_VER_MAJOR_V2)
+ enc_key = encrypt_key;
+ else
+ enc_key = NULL;
+
/* Send request to firmware */
ret = mwifiex_send_cmd_async(priv,
HostCmd_CMD_802_11_KEY_MATERIAL,
- HostCmd_ACT_GEN_SET, 0, NULL);
+ HostCmd_ACT_GEN_SET, 0, enc_key);
if (ret)
return ret;
}
+
if (priv->sec_info.wep_enabled)
priv->curr_pkt_filter |= HostCmd_ACT_MAC_WEP_ENABLE;
else
@@ -1044,19 +1064,27 @@ int mwifiex_set_encode(struct mwifiex_private *priv, struct key_params *kp,
memset(&encrypt_key, 0, sizeof(struct mwifiex_ds_encrypt_key));
encrypt_key.key_len = key_len;
+ encrypt_key.key_index = key_index;
if (kp && kp->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
encrypt_key.is_igtk_key = true;
if (!disable) {
- encrypt_key.key_index = key_index;
if (key_len)
memcpy(encrypt_key.key_material, key, key_len);
+ else
+ encrypt_key.is_current_wep_key = true;
+
if (mac_addr)
memcpy(encrypt_key.mac_addr, mac_addr, ETH_ALEN);
- if (kp && kp->seq && kp->seq_len)
+ if (kp && kp->seq && kp->seq_len) {
memcpy(encrypt_key.pn, kp->seq, kp->seq_len);
+ encrypt_key.pn_len = kp->seq_len;
+ encrypt_key.is_rx_seq_valid = true;
+ }
} else {
+ if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP)
+ return 0;
encrypt_key.key_disable = true;
if (mac_addr)
memcpy(encrypt_key.mac_addr, mac_addr, ETH_ALEN);
@@ -1391,7 +1419,7 @@ static int mwifiex_misc_ioctl_gen_ie(struct mwifiex_private *priv,
* with requisite parameters and calls the IOCTL handler.
*/
int
-mwifiex_set_gen_ie(struct mwifiex_private *priv, u8 *ie, int ie_len)
+mwifiex_set_gen_ie(struct mwifiex_private *priv, const u8 *ie, int ie_len)
{
struct mwifiex_ds_misc_gen_ie gen_ie;
diff --git a/drivers/net/wireless/mwifiex/sta_rx.c b/drivers/net/wireless/mwifiex/sta_rx.c
index 4651d67..b6aa958 100644
--- a/drivers/net/wireless/mwifiex/sta_rx.c
+++ b/drivers/net/wireless/mwifiex/sta_rx.c
@@ -88,11 +88,14 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
struct rxpd *local_rx_pd;
int hdr_chop;
struct ethhdr *eth;
+ u16 rx_pkt_off, rx_pkt_len;
+ u8 *offset;
local_rx_pd = (struct rxpd *) (skb->data);
- rx_pkt_hdr = (void *)local_rx_pd +
- le16_to_cpu(local_rx_pd->rx_pkt_offset);
+ rx_pkt_off = le16_to_cpu(local_rx_pd->rx_pkt_offset);
+ rx_pkt_len = le16_to_cpu(local_rx_pd->rx_pkt_length);
+ rx_pkt_hdr = (void *)local_rx_pd + rx_pkt_off;
if ((!memcmp(&rx_pkt_hdr->rfc1042_hdr, bridge_tunnel_header,
sizeof(bridge_tunnel_header))) ||
@@ -142,6 +145,12 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv,
return 0;
}
+ if (ISSUPP_TDLS_ENABLED(priv->adapter->fw_cap_info) &&
+ ntohs(rx_pkt_hdr->eth803_hdr.h_proto) == ETH_P_TDLS) {
+ offset = (u8 *)local_rx_pd + rx_pkt_off;
+ mwifiex_process_tdls_action_frame(priv, offset, rx_pkt_len);
+ }
+
priv->rxpd_rate = local_rx_pd->rx_rate;
priv->rxpd_htinfo = local_rx_pd->ht_info;
diff --git a/drivers/net/wireless/mwifiex/sta_tx.c b/drivers/net/wireless/mwifiex/sta_tx.c
index 354d64c..1236a5d 100644
--- a/drivers/net/wireless/mwifiex/sta_tx.c
+++ b/drivers/net/wireless/mwifiex/sta_tx.c
@@ -95,6 +95,9 @@ void *mwifiex_process_sta_txpd(struct mwifiex_private *priv,
}
}
+ if (tx_info->flags & MWIFIEX_BUF_FLAG_TDLS_PKT)
+ local_tx_pd->flags |= MWIFIEX_TXPD_FLAGS_TDLS_PACKET;
+
/* Offset of actual data */
pkt_offset = sizeof(struct txpd) + pad;
if (pkt_type == PKT_TYPE_MGMT) {
diff --git a/drivers/net/wireless/mwifiex/tdls.c b/drivers/net/wireless/mwifiex/tdls.c
new file mode 100644
index 0000000..5efd456
--- /dev/null
+++ b/drivers/net/wireless/mwifiex/tdls.c
@@ -0,0 +1,1044 @@
+/* Marvell Wireless LAN device driver: TDLS handling
+ *
+ * Copyright (C) 2014, Marvell International Ltd.
+ *
+ * This software file (the "File") is distributed by Marvell International
+ * Ltd. under the terms of the GNU General Public License Version 2, June 1991
+ * (the "License"). You may use, redistribute and/or modify this File in
+ * accordance with the terms and conditions of the License, a copy of which
+ * is available on the worldwide web at
+ * http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
+ *
+ * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
+ * ARE EXPRESSLY DISCLAIMED. The License provides additional details about
+ * this warranty disclaimer.
+ */
+
+#include "main.h"
+#include "wmm.h"
+#include "11n.h"
+#include "11n_rxreorder.h"
+#include "11ac.h"
+
+#define TDLS_REQ_FIX_LEN 6
+#define TDLS_RESP_FIX_LEN 8
+#define TDLS_CONFIRM_FIX_LEN 6
+
+static void
+mwifiex_restore_tdls_packets(struct mwifiex_private *priv, u8 *mac, u8 status)
+{
+ struct mwifiex_ra_list_tbl *ra_list;
+ struct list_head *tid_list;
+ struct sk_buff *skb, *tmp;
+ struct mwifiex_txinfo *tx_info;
+ unsigned long flags;
+ u32 tid;
+ u8 tid_down;
+
+ dev_dbg(priv->adapter->dev, "%s: %pM\n", __func__, mac);
+ spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
+
+ skb_queue_walk_safe(&priv->tdls_txq, skb, tmp) {
+ if (!ether_addr_equal(mac, skb->data))
+ continue;
+
+ __skb_unlink(skb, &priv->tdls_txq);
+ tx_info = MWIFIEX_SKB_TXCB(skb);
+ tid = skb->priority;
+ tid_down = mwifiex_wmm_downgrade_tid(priv, tid);
+
+ if (status == TDLS_SETUP_COMPLETE) {
+ ra_list = mwifiex_wmm_get_queue_raptr(priv, tid, mac);
+ ra_list->tdls_link = true;
+ tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
+ } else {
+ tid_list = &priv->wmm.tid_tbl_ptr[tid_down].ra_list;
+ if (!list_empty(tid_list))
+ ra_list = list_first_entry(tid_list,
+ struct mwifiex_ra_list_tbl, list);
+ else
+ ra_list = NULL;
+ tx_info->flags &= ~MWIFIEX_BUF_FLAG_TDLS_PKT;
+ }
+
+ if (!ra_list) {
+ mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
+ continue;
+ }
+
+ skb_queue_tail(&ra_list->skb_head, skb);
+
+ ra_list->ba_pkt_count++;
+ ra_list->total_pkt_count++;
+
+ if (atomic_read(&priv->wmm.highest_queued_prio) <
+ tos_to_tid_inv[tid_down])
+ atomic_set(&priv->wmm.highest_queued_prio,
+ tos_to_tid_inv[tid_down]);
+
+ atomic_inc(&priv->wmm.tx_pkts_queued);
+ }
+
+ spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+ return;
+}
+
+static void mwifiex_hold_tdls_packets(struct mwifiex_private *priv, u8 *mac)
+{
+ struct mwifiex_ra_list_tbl *ra_list;
+ struct list_head *ra_list_head;
+ struct sk_buff *skb, *tmp;
+ unsigned long flags;
+ int i;
+
+ dev_dbg(priv->adapter->dev, "%s: %pM\n", __func__, mac);
+ spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
+
+ for (i = 0; i < MAX_NUM_TID; i++) {
+ if (!list_empty(&priv->wmm.tid_tbl_ptr[i].ra_list)) {
+ ra_list_head = &priv->wmm.tid_tbl_ptr[i].ra_list;
+ list_for_each_entry(ra_list, ra_list_head, list) {
+ skb_queue_walk_safe(&ra_list->skb_head, skb,
+ tmp) {
+ if (!ether_addr_equal(mac, skb->data))
+ continue;
+ __skb_unlink(skb, &ra_list->skb_head);
+ atomic_dec(&priv->wmm.tx_pkts_queued);
+ ra_list->total_pkt_count--;
+ skb_queue_tail(&priv->tdls_txq, skb);
+ }
+ }
+ }
+ }
+
+ spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+ return;
+}
+
+/* This function appends rate TLV to scan config command. */
+static int
+mwifiex_tdls_append_rates_ie(struct mwifiex_private *priv,
+ struct sk_buff *skb)
+{
+ u8 rates[MWIFIEX_SUPPORTED_RATES], *pos;
+ u16 rates_size, supp_rates_size, ext_rates_size;
+
+ memset(rates, 0, sizeof(rates));
+ rates_size = mwifiex_get_supported_rates(priv, rates);
+
+ supp_rates_size = min_t(u16, rates_size, MWIFIEX_TDLS_SUPPORTED_RATES);
+
+ if (skb_tailroom(skb) < rates_size + 4) {
+ dev_err(priv->adapter->dev,
+ "Insuffient space while adding rates\n");
+ return -ENOMEM;
+ }
+
+ pos = skb_put(skb, supp_rates_size + 2);
+ *pos++ = WLAN_EID_SUPP_RATES;
+ *pos++ = supp_rates_size;
+ memcpy(pos, rates, supp_rates_size);
+
+ if (rates_size > MWIFIEX_TDLS_SUPPORTED_RATES) {
+ ext_rates_size = rates_size - MWIFIEX_TDLS_SUPPORTED_RATES;
+ pos = skb_put(skb, ext_rates_size + 2);
+ *pos++ = WLAN_EID_EXT_SUPP_RATES;
+ *pos++ = ext_rates_size;
+ memcpy(pos, rates + MWIFIEX_TDLS_SUPPORTED_RATES,
+ ext_rates_size);
+ }
+
+ return 0;
+}
+
+static void mwifiex_tdls_add_aid(struct mwifiex_private *priv,
+ struct sk_buff *skb)
+{
+ struct ieee_types_assoc_rsp *assoc_rsp;
+ u8 *pos;
+
+ assoc_rsp = (struct ieee_types_assoc_rsp *)&priv->assoc_rsp_buf;
+ pos = (void *)skb_put(skb, 4);
+ *pos++ = WLAN_EID_AID;
+ *pos++ = 2;
+ *pos++ = le16_to_cpu(assoc_rsp->a_id);
+
+ return;
+}
+
+static int mwifiex_tdls_add_vht_capab(struct mwifiex_private *priv,
+ struct sk_buff *skb)
+{
+ struct ieee80211_vht_cap vht_cap;
+ u8 *pos;
+
+ pos = (void *)skb_put(skb, sizeof(struct ieee80211_vht_cap) + 2);
+ *pos++ = WLAN_EID_VHT_CAPABILITY;
+ *pos++ = sizeof(struct ieee80211_vht_cap);
+
+ memset(&vht_cap, 0, sizeof(struct ieee80211_vht_cap));
+
+ mwifiex_fill_vht_cap_tlv(priv, &vht_cap, priv->curr_bss_params.band);
+ memcpy(pos, &vht_cap, sizeof(struct ieee80211_ht_cap));
+
+ return 0;
+}
+
+static int mwifiex_tdls_add_vht_oper(struct mwifiex_private *priv,
+ u8 *mac, struct sk_buff *skb)
+{
+ struct mwifiex_bssdescriptor *bss_desc;
+ struct ieee80211_vht_operation *vht_oper;
+ struct ieee80211_vht_cap *vht_cap, *ap_vht_cap = NULL;
+ struct mwifiex_sta_node *sta_ptr;
+ struct mwifiex_adapter *adapter = priv->adapter;
+ u8 supp_chwd_set, peer_supp_chwd_set;
+ u8 *pos, ap_supp_chwd_set, chan_bw;
+ u16 mcs_map_user, mcs_map_resp, mcs_map_result;
+ u16 mcs_user, mcs_resp, nss;
+ u32 usr_vht_cap_info;
+
+ bss_desc = &priv->curr_bss_params.bss_descriptor;
+
+ sta_ptr = mwifiex_get_sta_entry(priv, mac);
+ if (unlikely(!sta_ptr)) {
+ dev_warn(adapter->dev, "TDLS peer station not found in list\n");
+ return -1;
+ }
+
+ if (!mwifiex_is_bss_in_11ac_mode(priv)) {
+ if (sta_ptr->tdls_cap.extcap.ext_capab[7] &
+ WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED) {
+ dev_dbg(adapter->dev,
+ "TDLS peer doesn't support wider bandwitdh\n");
+ return 0;
+ }
+ } else {
+ ap_vht_cap = bss_desc->bcn_vht_cap;
+ }
+
+ pos = (void *)skb_put(skb, sizeof(struct ieee80211_vht_operation) + 2);
+ *pos++ = WLAN_EID_VHT_OPERATION;
+ *pos++ = sizeof(struct ieee80211_vht_operation);
+ vht_oper = (struct ieee80211_vht_operation *)pos;
+
+ if (bss_desc->bss_band & BAND_A)
+ usr_vht_cap_info = adapter->usr_dot_11ac_dev_cap_a;
+ else
+ usr_vht_cap_info = adapter->usr_dot_11ac_dev_cap_bg;
+
+ /* find the minmum bandwith between AP/TDLS peers */
+ vht_cap = &sta_ptr->tdls_cap.vhtcap;
+ supp_chwd_set = GET_VHTCAP_CHWDSET(usr_vht_cap_info);
+ peer_supp_chwd_set =
+ GET_VHTCAP_CHWDSET(le32_to_cpu(vht_cap->vht_cap_info));
+ supp_chwd_set = min_t(u8, supp_chwd_set, peer_supp_chwd_set);
+
+ /* We need check AP's bandwidth when TDLS_WIDER_BANDWIDTH is off */
+
+ if (ap_vht_cap && sta_ptr->tdls_cap.extcap.ext_capab[7] &
+ WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED) {
+ ap_supp_chwd_set =
+ GET_VHTCAP_CHWDSET(le32_to_cpu(ap_vht_cap->vht_cap_info));
+ supp_chwd_set = min_t(u8, supp_chwd_set, ap_supp_chwd_set);
+ }
+
+ switch (supp_chwd_set) {
+ case IEEE80211_VHT_CHANWIDTH_80MHZ:
+ vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_80MHZ;
+ break;
+ case IEEE80211_VHT_CHANWIDTH_160MHZ:
+ vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_160MHZ;
+ break;
+ case IEEE80211_VHT_CHANWIDTH_80P80MHZ:
+ vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_80P80MHZ;
+ break;
+ default:
+ vht_oper->chan_width = IEEE80211_VHT_CHANWIDTH_USE_HT;
+ break;
+ }
+
+ mcs_map_user = GET_DEVRXMCSMAP(adapter->usr_dot_11ac_mcs_support);
+ mcs_map_resp = le16_to_cpu(vht_cap->supp_mcs.rx_mcs_map);
+ mcs_map_result = 0;
+
+ for (nss = 1; nss <= 8; nss++) {
+ mcs_user = GET_VHTNSSMCS(mcs_map_user, nss);
+ mcs_resp = GET_VHTNSSMCS(mcs_map_resp, nss);
+
+ if ((mcs_user == IEEE80211_VHT_MCS_NOT_SUPPORTED) ||
+ (mcs_resp == IEEE80211_VHT_MCS_NOT_SUPPORTED))
+ SET_VHTNSSMCS(mcs_map_result, nss,
+ IEEE80211_VHT_MCS_NOT_SUPPORTED);
+ else
+ SET_VHTNSSMCS(mcs_map_result, nss,
+ min_t(u16, mcs_user, mcs_resp));
+ }
+
+ vht_oper->basic_mcs_set = cpu_to_le16(mcs_map_result);
+
+ switch (vht_oper->chan_width) {
+ case IEEE80211_VHT_CHANWIDTH_80MHZ:
+ chan_bw = IEEE80211_VHT_CHANWIDTH_80MHZ;
+ break;
+ case IEEE80211_VHT_CHANWIDTH_160MHZ:
+ chan_bw = IEEE80211_VHT_CHANWIDTH_160MHZ;
+ break;
+ case IEEE80211_VHT_CHANWIDTH_80P80MHZ:
+ chan_bw = IEEE80211_VHT_CHANWIDTH_80MHZ;
+ break;
+ default:
+ chan_bw = IEEE80211_VHT_CHANWIDTH_USE_HT;
+ break;
+ }
+ vht_oper->center_freq_seg1_idx =
+ mwifiex_get_center_freq_index(priv, BAND_AAC,
+ bss_desc->channel,
+ chan_bw);
+
+ return 0;
+}
+
+static void mwifiex_tdls_add_ext_capab(struct mwifiex_private *priv,
+ struct sk_buff *skb)
+{
+ struct ieee_types_extcap *extcap;
+
+ extcap = (void *)skb_put(skb, sizeof(struct ieee_types_extcap));
+ extcap->ieee_hdr.element_id = WLAN_EID_EXT_CAPABILITY;
+ extcap->ieee_hdr.len = 8;
+ memset(extcap->ext_capab, 0, 8);
+ extcap->ext_capab[4] |= WLAN_EXT_CAPA5_TDLS_ENABLED;
+
+ if (priv->adapter->is_hw_11ac_capable)
+ extcap->ext_capab[7] |= WLAN_EXT_CAPA8_TDLS_WIDE_BW_ENABLED;
+}
+
+static void mwifiex_tdls_add_qos_capab(struct sk_buff *skb)
+{
+ u8 *pos = (void *)skb_put(skb, 3);
+
+ *pos++ = WLAN_EID_QOS_CAPA;
+ *pos++ = 1;
+ *pos++ = MWIFIEX_TDLS_DEF_QOS_CAPAB;
+}
+
+static int mwifiex_prep_tdls_encap_data(struct mwifiex_private *priv,
+ u8 *peer, u8 action_code, u8 dialog_token,
+ u16 status_code, struct sk_buff *skb)
+{
+ struct ieee80211_tdls_data *tf;
+ int ret;
+ u16 capab;
+ struct ieee80211_ht_cap *ht_cap;
+ u8 radio, *pos;
+
+ capab = priv->curr_bss_params.bss_descriptor.cap_info_bitmap;
+
+ tf = (void *)skb_put(skb, offsetof(struct ieee80211_tdls_data, u));
+ memcpy(tf->da, peer, ETH_ALEN);
+ memcpy(tf->sa, priv->curr_addr, ETH_ALEN);
+ tf->ether_type = cpu_to_be16(ETH_P_TDLS);
+ tf->payload_type = WLAN_TDLS_SNAP_RFTYPE;
+
+ switch (action_code) {
+ case WLAN_TDLS_SETUP_REQUEST:
+ tf->category = WLAN_CATEGORY_TDLS;
+ tf->action_code = WLAN_TDLS_SETUP_REQUEST;
+ skb_put(skb, sizeof(tf->u.setup_req));
+ tf->u.setup_req.dialog_token = dialog_token;
+ tf->u.setup_req.capability = cpu_to_le16(capab);
+ ret = mwifiex_tdls_append_rates_ie(priv, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ pos = (void *)skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
+ *pos++ = WLAN_EID_HT_CAPABILITY;
+ *pos++ = sizeof(struct ieee80211_ht_cap);
+ ht_cap = (void *)pos;
+ radio = mwifiex_band_to_radio_type(priv->curr_bss_params.band);
+ ret = mwifiex_fill_cap_info(priv, radio, ht_cap);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ if (priv->adapter->is_hw_11ac_capable) {
+ ret = mwifiex_tdls_add_vht_capab(priv, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+ mwifiex_tdls_add_aid(priv, skb);
+ }
+
+ mwifiex_tdls_add_ext_capab(priv, skb);
+ mwifiex_tdls_add_qos_capab(skb);
+ break;
+
+ case WLAN_TDLS_SETUP_RESPONSE:
+ tf->category = WLAN_CATEGORY_TDLS;
+ tf->action_code = WLAN_TDLS_SETUP_RESPONSE;
+ skb_put(skb, sizeof(tf->u.setup_resp));
+ tf->u.setup_resp.status_code = cpu_to_le16(status_code);
+ tf->u.setup_resp.dialog_token = dialog_token;
+ tf->u.setup_resp.capability = cpu_to_le16(capab);
+ ret = mwifiex_tdls_append_rates_ie(priv, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ pos = (void *)skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
+ *pos++ = WLAN_EID_HT_CAPABILITY;
+ *pos++ = sizeof(struct ieee80211_ht_cap);
+ ht_cap = (void *)pos;
+ radio = mwifiex_band_to_radio_type(priv->curr_bss_params.band);
+ ret = mwifiex_fill_cap_info(priv, radio, ht_cap);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ if (priv->adapter->is_hw_11ac_capable) {
+ ret = mwifiex_tdls_add_vht_capab(priv, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+ mwifiex_tdls_add_aid(priv, skb);
+ }
+
+ mwifiex_tdls_add_ext_capab(priv, skb);
+ mwifiex_tdls_add_qos_capab(skb);
+ break;
+
+ case WLAN_TDLS_SETUP_CONFIRM:
+ tf->category = WLAN_CATEGORY_TDLS;
+ tf->action_code = WLAN_TDLS_SETUP_CONFIRM;
+ skb_put(skb, sizeof(tf->u.setup_cfm));
+ tf->u.setup_cfm.status_code = cpu_to_le16(status_code);
+ tf->u.setup_cfm.dialog_token = dialog_token;
+ if (priv->adapter->is_hw_11ac_capable) {
+ ret = mwifiex_tdls_add_vht_oper(priv, peer, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+ }
+ break;
+
+ case WLAN_TDLS_TEARDOWN:
+ tf->category = WLAN_CATEGORY_TDLS;
+ tf->action_code = WLAN_TDLS_TEARDOWN;
+ skb_put(skb, sizeof(tf->u.teardown));
+ tf->u.teardown.reason_code = cpu_to_le16(status_code);
+ break;
+
+ case WLAN_TDLS_DISCOVERY_REQUEST:
+ tf->category = WLAN_CATEGORY_TDLS;
+ tf->action_code = WLAN_TDLS_DISCOVERY_REQUEST;
+ skb_put(skb, sizeof(tf->u.discover_req));
+ tf->u.discover_req.dialog_token = dialog_token;
+ break;
+ default:
+ dev_err(priv->adapter->dev, "Unknown TDLS frame type.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void
+mwifiex_tdls_add_link_ie(struct sk_buff *skb, u8 *src_addr, u8 *peer, u8 *bssid)
+{
+ struct ieee80211_tdls_lnkie *lnkid;
+
+ lnkid = (void *)skb_put(skb, sizeof(struct ieee80211_tdls_lnkie));
+ lnkid->ie_type = WLAN_EID_LINK_ID;
+ lnkid->ie_len = sizeof(struct ieee80211_tdls_lnkie) -
+ sizeof(struct ieee_types_header);
+
+ memcpy(lnkid->bssid, bssid, ETH_ALEN);
+ memcpy(lnkid->init_sta, src_addr, ETH_ALEN);
+ memcpy(lnkid->resp_sta, peer, ETH_ALEN);
+}
+
+int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv,
+ u8 *peer, u8 action_code, u8 dialog_token,
+ u16 status_code, const u8 *extra_ies,
+ size_t extra_ies_len)
+{
+ struct sk_buff *skb;
+ struct mwifiex_txinfo *tx_info;
+ struct timeval tv;
+ int ret;
+ u16 skb_len;
+
+ skb_len = MWIFIEX_MIN_DATA_HEADER_LEN +
+ max(sizeof(struct ieee80211_mgmt),
+ sizeof(struct ieee80211_tdls_data)) +
+ MWIFIEX_MGMT_FRAME_HEADER_SIZE +
+ MWIFIEX_SUPPORTED_RATES +
+ 3 + /* Qos Info */
+ sizeof(struct ieee_types_extcap) +
+ sizeof(struct ieee80211_ht_cap) +
+ sizeof(struct ieee_types_bss_co_2040) +
+ sizeof(struct ieee80211_ht_operation) +
+ sizeof(struct ieee80211_tdls_lnkie) +
+ extra_ies_len;
+
+ if (priv->adapter->is_hw_11ac_capable)
+ skb_len += sizeof(struct ieee_types_vht_cap) +
+ sizeof(struct ieee_types_vht_oper) +
+ sizeof(struct ieee_types_aid);
+
+ skb = dev_alloc_skb(skb_len);
+ if (!skb) {
+ dev_err(priv->adapter->dev,
+ "allocate skb failed for management frame\n");
+ return -ENOMEM;
+ }
+ skb_reserve(skb, MWIFIEX_MIN_DATA_HEADER_LEN);
+
+ switch (action_code) {
+ case WLAN_TDLS_SETUP_REQUEST:
+ case WLAN_TDLS_SETUP_CONFIRM:
+ case WLAN_TDLS_TEARDOWN:
+ case WLAN_TDLS_DISCOVERY_REQUEST:
+ ret = mwifiex_prep_tdls_encap_data(priv, peer, action_code,
+ dialog_token, status_code,
+ skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+ if (extra_ies_len)
+ memcpy(skb_put(skb, extra_ies_len), extra_ies,
+ extra_ies_len);
+ mwifiex_tdls_add_link_ie(skb, priv->curr_addr, peer,
+ priv->cfg_bssid);
+ break;
+ case WLAN_TDLS_SETUP_RESPONSE:
+ ret = mwifiex_prep_tdls_encap_data(priv, peer, action_code,
+ dialog_token, status_code,
+ skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+ if (extra_ies_len)
+ memcpy(skb_put(skb, extra_ies_len), extra_ies,
+ extra_ies_len);
+ mwifiex_tdls_add_link_ie(skb, peer, priv->curr_addr,
+ priv->cfg_bssid);
+ break;
+ }
+
+ switch (action_code) {
+ case WLAN_TDLS_SETUP_REQUEST:
+ case WLAN_TDLS_SETUP_RESPONSE:
+ skb->priority = MWIFIEX_PRIO_BK;
+ break;
+ default:
+ skb->priority = MWIFIEX_PRIO_VI;
+ break;
+ }
+
+ tx_info = MWIFIEX_SKB_TXCB(skb);
+ tx_info->bss_num = priv->bss_num;
+ tx_info->bss_type = priv->bss_type;
+
+ do_gettimeofday(&tv);
+ skb->tstamp = timeval_to_ktime(tv);
+ mwifiex_queue_tx_pkt(priv, skb);
+
+ return 0;
+}
+
+static int
+mwifiex_construct_tdls_action_frame(struct mwifiex_private *priv, u8 *peer,
+ u8 action_code, u8 dialog_token,
+ u16 status_code, struct sk_buff *skb)
+{
+ struct ieee80211_mgmt *mgmt;
+ u8 bc_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ int ret;
+ u16 capab;
+ struct ieee80211_ht_cap *ht_cap;
+ u8 radio, *pos;
+
+ capab = priv->curr_bss_params.bss_descriptor.cap_info_bitmap;
+
+ mgmt = (void *)skb_put(skb, offsetof(struct ieee80211_mgmt, u));
+
+ memset(mgmt, 0, 24);
+ memcpy(mgmt->da, peer, ETH_ALEN);
+ memcpy(mgmt->sa, priv->curr_addr, ETH_ALEN);
+ memcpy(mgmt->bssid, priv->cfg_bssid, ETH_ALEN);
+ mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
+ IEEE80211_STYPE_ACTION);
+
+ /* add address 4 */
+ pos = skb_put(skb, ETH_ALEN);
+
+ switch (action_code) {
+ case WLAN_PUB_ACTION_TDLS_DISCOVER_RES:
+ skb_put(skb, sizeof(mgmt->u.action.u.tdls_discover_resp) + 1);
+ mgmt->u.action.category = WLAN_CATEGORY_PUBLIC;
+ mgmt->u.action.u.tdls_discover_resp.action_code =
+ WLAN_PUB_ACTION_TDLS_DISCOVER_RES;
+ mgmt->u.action.u.tdls_discover_resp.dialog_token =
+ dialog_token;
+ mgmt->u.action.u.tdls_discover_resp.capability =
+ cpu_to_le16(capab);
+ /* move back for addr4 */
+ memmove(pos + ETH_ALEN, &mgmt->u.action.category,
+ sizeof(mgmt->u.action.u.tdls_discover_resp));
+ /* init address 4 */
+ memcpy(pos, bc_addr, ETH_ALEN);
+
+ ret = mwifiex_tdls_append_rates_ie(priv, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ pos = (void *)skb_put(skb, sizeof(struct ieee80211_ht_cap) + 2);
+ *pos++ = WLAN_EID_HT_CAPABILITY;
+ *pos++ = sizeof(struct ieee80211_ht_cap);
+ ht_cap = (void *)pos;
+ radio = mwifiex_band_to_radio_type(priv->curr_bss_params.band);
+ ret = mwifiex_fill_cap_info(priv, radio, ht_cap);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+
+ if (priv->adapter->is_hw_11ac_capable) {
+ ret = mwifiex_tdls_add_vht_capab(priv, skb);
+ if (ret) {
+ dev_kfree_skb_any(skb);
+ return ret;
+ }
+ mwifiex_tdls_add_aid(priv, skb);
+ }
+
+ mwifiex_tdls_add_ext_capab(priv, skb);
+ mwifiex_tdls_add_qos_capab(skb);
+ break;
+ default:
+ dev_err(priv->adapter->dev, "Unknown TDLS action frame type\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int mwifiex_send_tdls_action_frame(struct mwifiex_private *priv,
+ u8 *peer, u8 action_code, u8 dialog_token,
+ u16 status_code, const u8 *extra_ies,
+ size_t extra_ies_len)
+{
+ struct sk_buff *skb;
+ struct mwifiex_txinfo *tx_info;
+ struct timeval tv;
+ u8 *pos;
+ u32 pkt_type, tx_control;
+ u16 pkt_len, skb_len;
+
+ skb_len = MWIFIEX_MIN_DATA_HEADER_LEN +
+ max(sizeof(struct ieee80211_mgmt),
+ sizeof(struct ieee80211_tdls_data)) +
+ MWIFIEX_MGMT_FRAME_HEADER_SIZE +
+ MWIFIEX_SUPPORTED_RATES +
+ sizeof(struct ieee_types_extcap) +
+ sizeof(struct ieee80211_ht_cap) +
+ sizeof(struct ieee_types_bss_co_2040) +
+ sizeof(struct ieee80211_ht_operation) +
+ sizeof(struct ieee80211_tdls_lnkie) +
+ extra_ies_len +
+ 3 + /* Qos Info */
+ ETH_ALEN; /* Address4 */
+
+ if (priv->adapter->is_hw_11ac_capable)
+ skb_len += sizeof(struct ieee_types_vht_cap) +
+ sizeof(struct ieee_types_vht_oper) +
+ sizeof(struct ieee_types_aid);
+
+ skb = dev_alloc_skb(skb_len);
+ if (!skb) {
+ dev_err(priv->adapter->dev,
+ "allocate skb failed for management frame\n");
+ return -ENOMEM;
+ }
+
+ skb_reserve(skb, MWIFIEX_MIN_DATA_HEADER_LEN);
+
+ pkt_type = PKT_TYPE_MGMT;
+ tx_control = 0;
+ pos = skb_put(skb, MWIFIEX_MGMT_FRAME_HEADER_SIZE + sizeof(pkt_len));
+ memset(pos, 0, MWIFIEX_MGMT_FRAME_HEADER_SIZE + sizeof(pkt_len));
+ memcpy(pos, &pkt_type, sizeof(pkt_type));
+ memcpy(pos + sizeof(pkt_type), &tx_control, sizeof(tx_control));
+
+ if (mwifiex_construct_tdls_action_frame(priv, peer, action_code,
+ dialog_token, status_code,
+ skb)) {
+ dev_kfree_skb_any(skb);
+ return -EINVAL;
+ }
+
+ if (extra_ies_len)
+ memcpy(skb_put(skb, extra_ies_len), extra_ies, extra_ies_len);
+
+ /* the TDLS link IE is always added last we are the responder */
+
+ mwifiex_tdls_add_link_ie(skb, peer, priv->curr_addr,
+ priv->cfg_bssid);
+
+ skb->priority = MWIFIEX_PRIO_VI;
+
+ tx_info = MWIFIEX_SKB_TXCB(skb);
+ tx_info->bss_num = priv->bss_num;
+ tx_info->bss_type = priv->bss_type;
+ tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
+
+ pkt_len = skb->len - MWIFIEX_MGMT_FRAME_HEADER_SIZE - sizeof(pkt_len);
+ memcpy(skb->data + MWIFIEX_MGMT_FRAME_HEADER_SIZE, &pkt_len,
+ sizeof(pkt_len));
+ do_gettimeofday(&tv);
+ skb->tstamp = timeval_to_ktime(tv);
+ mwifiex_queue_tx_pkt(priv, skb);
+
+ return 0;
+}
+
+/* This function process tdls action frame from peer.
+ * Peer capabilities are stored into station node structure.
+ */
+void mwifiex_process_tdls_action_frame(struct mwifiex_private *priv,
+ u8 *buf, int len)
+{
+ struct mwifiex_sta_node *sta_ptr;
+ u8 *peer, *pos, *end;
+ u8 i, action, basic;
+ int ie_len = 0;
+
+ if (len < (sizeof(struct ethhdr) + 3))
+ return;
+ if (*(u8 *)(buf + sizeof(struct ethhdr)) != WLAN_TDLS_SNAP_RFTYPE)
+ return;
+ if (*(u8 *)(buf + sizeof(struct ethhdr) + 1) != WLAN_CATEGORY_TDLS)
+ return;
+
+ peer = buf + ETH_ALEN;
+ action = *(u8 *)(buf + sizeof(struct ethhdr) + 2);
+
+ /* just handle TDLS setup request/response/confirm */
+ if (action > WLAN_TDLS_SETUP_CONFIRM)
+ return;
+
+ dev_dbg(priv->adapter->dev,
+ "rx:tdls action: peer=%pM, action=%d\n", peer, action);
+
+ sta_ptr = mwifiex_add_sta_entry(priv, peer);
+ if (!sta_ptr)
+ return;
+
+ switch (action) {
+ case WLAN_TDLS_SETUP_REQUEST:
+ if (len < (sizeof(struct ethhdr) + TDLS_REQ_FIX_LEN))
+ return;
+
+ pos = buf + sizeof(struct ethhdr) + 4;
+ /* payload 1+ category 1 + action 1 + dialog 1 */
+ sta_ptr->tdls_cap.capab = cpu_to_le16(*(u16 *)pos);
+ ie_len = len - sizeof(struct ethhdr) - TDLS_REQ_FIX_LEN;
+ pos += 2;
+ break;
+
+ case WLAN_TDLS_SETUP_RESPONSE:
+ if (len < (sizeof(struct ethhdr) + TDLS_RESP_FIX_LEN))
+ return;
+ /* payload 1+ category 1 + action 1 + dialog 1 + status code 2*/
+ pos = buf + sizeof(struct ethhdr) + 6;
+ sta_ptr->tdls_cap.capab = cpu_to_le16(*(u16 *)pos);
+ ie_len = len - sizeof(struct ethhdr) - TDLS_RESP_FIX_LEN;
+ pos += 2;
+ break;
+
+ case WLAN_TDLS_SETUP_CONFIRM:
+ if (len < (sizeof(struct ethhdr) + TDLS_CONFIRM_FIX_LEN))
+ return;
+ pos = buf + sizeof(struct ethhdr) + TDLS_CONFIRM_FIX_LEN;
+ ie_len = len - sizeof(struct ethhdr) - TDLS_CONFIRM_FIX_LEN;
+ break;
+ default:
+ dev_warn(priv->adapter->dev, "Unknown TDLS frame type.\n");
+ return;
+ }
+
+ for (end = pos + ie_len; pos + 1 < end; pos += 2 + pos[1]) {
+ if (pos + 2 + pos[1] > end)
+ break;
+
+ switch (*pos) {
+ case WLAN_EID_SUPP_RATES:
+ sta_ptr->tdls_cap.rates_len = pos[1];
+ for (i = 0; i < pos[1]; i++)
+ sta_ptr->tdls_cap.rates[i] = pos[i + 2];
+ break;
+
+ case WLAN_EID_EXT_SUPP_RATES:
+ basic = sta_ptr->tdls_cap.rates_len;
+ for (i = 0; i < pos[1]; i++)
+ sta_ptr->tdls_cap.rates[basic + i] = pos[i + 2];
+ sta_ptr->tdls_cap.rates_len += pos[1];
+ break;
+ case WLAN_EID_HT_CAPABILITY:
+ memcpy((u8 *)&sta_ptr->tdls_cap.ht_capb, pos,
+ sizeof(struct ieee80211_ht_cap));
+ sta_ptr->is_11n_enabled = 1;
+ break;
+ case WLAN_EID_HT_OPERATION:
+ memcpy(&sta_ptr->tdls_cap.ht_oper, pos,
+ sizeof(struct ieee80211_ht_operation));
+ break;
+ case WLAN_EID_BSS_COEX_2040:
+ sta_ptr->tdls_cap.coex_2040 = pos[2];
+ break;
+ case WLAN_EID_EXT_CAPABILITY:
+ memcpy((u8 *)&sta_ptr->tdls_cap.extcap, pos,
+ sizeof(struct ieee_types_header) +
+ min_t(u8, pos[1], 8));
+ break;
+ case WLAN_EID_RSN:
+ memcpy((u8 *)&sta_ptr->tdls_cap.rsn_ie, pos,
+ sizeof(struct ieee_types_header) + pos[1]);
+ break;
+ case WLAN_EID_QOS_CAPA:
+ sta_ptr->tdls_cap.qos_info = pos[2];
+ break;
+ case WLAN_EID_VHT_OPERATION:
+ if (priv->adapter->is_hw_11ac_capable)
+ memcpy(&sta_ptr->tdls_cap.vhtoper, pos,
+ sizeof(struct ieee80211_vht_operation));
+ break;
+ case WLAN_EID_VHT_CAPABILITY:
+ if (priv->adapter->is_hw_11ac_capable) {
+ memcpy((u8 *)&sta_ptr->tdls_cap.vhtcap, pos,
+ sizeof(struct ieee80211_vht_cap));
+ sta_ptr->is_11ac_enabled = 1;
+ }
+ break;
+ case WLAN_EID_AID:
+ if (priv->adapter->is_hw_11ac_capable)
+ sta_ptr->tdls_cap.aid =
+ le16_to_cpu(*(__le16 *)(pos + 2));
+ default:
+ break;
+ }
+ }
+
+ return;
+}
+
+static int
+mwifiex_tdls_process_config_link(struct mwifiex_private *priv, u8 *peer)
+{
+ struct mwifiex_sta_node *sta_ptr;
+ struct mwifiex_ds_tdls_oper tdls_oper;
+
+ memset(&tdls_oper, 0, sizeof(struct mwifiex_ds_tdls_oper));
+ sta_ptr = mwifiex_get_sta_entry(priv, peer);
+
+ if (!sta_ptr || sta_ptr->tdls_status == TDLS_SETUP_FAILURE) {
+ dev_err(priv->adapter->dev,
+ "link absent for peer %pM; cannot config\n", peer);
+ return -EINVAL;
+ }
+
+ memcpy(&tdls_oper.peer_mac, peer, ETH_ALEN);
+ tdls_oper.tdls_action = MWIFIEX_TDLS_CONFIG_LINK;
+ return mwifiex_send_cmd_sync(priv, HostCmd_CMD_TDLS_OPER,
+ HostCmd_ACT_GEN_SET, 0, &tdls_oper);
+}
+
+static int
+mwifiex_tdls_process_create_link(struct mwifiex_private *priv, u8 *peer)
+{
+ struct mwifiex_sta_node *sta_ptr;
+ struct mwifiex_ds_tdls_oper tdls_oper;
+
+ memset(&tdls_oper, 0, sizeof(struct mwifiex_ds_tdls_oper));
+ sta_ptr = mwifiex_get_sta_entry(priv, peer);
+
+ if (sta_ptr && sta_ptr->tdls_status == TDLS_SETUP_INPROGRESS) {
+ dev_dbg(priv->adapter->dev,
+ "Setup already in progress for peer %pM\n", peer);
+ return 0;
+ }
+
+ sta_ptr = mwifiex_add_sta_entry(priv, peer);
+ if (!sta_ptr)
+ return -ENOMEM;
+
+ sta_ptr->tdls_status = TDLS_SETUP_INPROGRESS;
+ mwifiex_hold_tdls_packets(priv, peer);
+ memcpy(&tdls_oper.peer_mac, peer, ETH_ALEN);
+ tdls_oper.tdls_action = MWIFIEX_TDLS_CREATE_LINK;
+ return mwifiex_send_cmd_sync(priv, HostCmd_CMD_TDLS_OPER,
+ HostCmd_ACT_GEN_SET, 0, &tdls_oper);
+}
+
+static int
+mwifiex_tdls_process_disable_link(struct mwifiex_private *priv, u8 *peer)
+{
+ struct mwifiex_sta_node *sta_ptr;
+ struct mwifiex_ds_tdls_oper tdls_oper;
+ unsigned long flags;
+
+ memset(&tdls_oper, 0, sizeof(struct mwifiex_ds_tdls_oper));
+ sta_ptr = mwifiex_get_sta_entry(priv, peer);
+
+ if (sta_ptr) {
+ if (sta_ptr->is_11n_enabled) {
+ mwifiex_11n_cleanup_reorder_tbl(priv);
+ spin_lock_irqsave(&priv->wmm.ra_list_spinlock,
+ flags);
+ mwifiex_11n_delete_all_tx_ba_stream_tbl(priv);
+ spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
+ flags);
+ }
+ mwifiex_del_sta_entry(priv, peer);
+ }
+
+ mwifiex_restore_tdls_packets(priv, peer, TDLS_LINK_TEARDOWN);
+ memcpy(&tdls_oper.peer_mac, peer, ETH_ALEN);
+ tdls_oper.tdls_action = MWIFIEX_TDLS_DISABLE_LINK;
+ return mwifiex_send_cmd_sync(priv, HostCmd_CMD_TDLS_OPER,
+ HostCmd_ACT_GEN_SET, 0, &tdls_oper);
+}
+
+static int
+mwifiex_tdls_process_enable_link(struct mwifiex_private *priv, u8 *peer)
+{
+ struct mwifiex_sta_node *sta_ptr;
+ struct ieee80211_mcs_info mcs;
+ unsigned long flags;
+ int i;
+
+ sta_ptr = mwifiex_get_sta_entry(priv, peer);
+
+ if (sta_ptr && (sta_ptr->tdls_status != TDLS_SETUP_FAILURE)) {
+ dev_dbg(priv->adapter->dev,
+ "tdls: enable link %pM success\n", peer);
+
+ sta_ptr->tdls_status = TDLS_SETUP_COMPLETE;
+
+ mcs = sta_ptr->tdls_cap.ht_capb.mcs;
+ if (mcs.rx_mask[0] != 0xff)
+ sta_ptr->is_11n_enabled = true;
+ if (sta_ptr->is_11n_enabled) {
+ if (le16_to_cpu(sta_ptr->tdls_cap.ht_capb.cap_info) &
+ IEEE80211_HT_CAP_MAX_AMSDU)
+ sta_ptr->max_amsdu =
+ MWIFIEX_TX_DATA_BUF_SIZE_8K;
+ else
+ sta_ptr->max_amsdu =
+ MWIFIEX_TX_DATA_BUF_SIZE_4K;
+
+ for (i = 0; i < MAX_NUM_TID; i++)
+ sta_ptr->ampdu_sta[i] =
+ priv->aggr_prio_tbl[i].ampdu_user;
+ } else {
+ for (i = 0; i < MAX_NUM_TID; i++)
+ sta_ptr->ampdu_sta[i] = BA_STREAM_NOT_ALLOWED;
+ }
+
+ memset(sta_ptr->rx_seq, 0xff, sizeof(sta_ptr->rx_seq));
+ mwifiex_restore_tdls_packets(priv, peer, TDLS_SETUP_COMPLETE);
+ } else {
+ dev_dbg(priv->adapter->dev,
+ "tdls: enable link %pM failed\n", peer);
+ if (sta_ptr) {
+ mwifiex_11n_cleanup_reorder_tbl(priv);
+ spin_lock_irqsave(&priv->wmm.ra_list_spinlock,
+ flags);
+ mwifiex_11n_delete_all_tx_ba_stream_tbl(priv);
+ spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
+ flags);
+ mwifiex_del_sta_entry(priv, peer);
+ }
+ mwifiex_restore_tdls_packets(priv, peer, TDLS_LINK_TEARDOWN);
+
+ return -1;
+ }
+
+ return 0;
+}
+
+int mwifiex_tdls_oper(struct mwifiex_private *priv, u8 *peer, u8 action)
+{
+ switch (action) {
+ case MWIFIEX_TDLS_ENABLE_LINK:
+ return mwifiex_tdls_process_enable_link(priv, peer);
+ case MWIFIEX_TDLS_DISABLE_LINK:
+ return mwifiex_tdls_process_disable_link(priv, peer);
+ case MWIFIEX_TDLS_CREATE_LINK:
+ return mwifiex_tdls_process_create_link(priv, peer);
+ case MWIFIEX_TDLS_CONFIG_LINK:
+ return mwifiex_tdls_process_config_link(priv, peer);
+ }
+ return 0;
+}
+
+int mwifiex_get_tdls_link_status(struct mwifiex_private *priv, u8 *mac)
+{
+ struct mwifiex_sta_node *sta_ptr;
+
+ sta_ptr = mwifiex_get_sta_entry(priv, mac);
+ if (sta_ptr)
+ return sta_ptr->tdls_status;
+
+ return TDLS_NOT_SETUP;
+}
+
+void mwifiex_disable_all_tdls_links(struct mwifiex_private *priv)
+{
+ struct mwifiex_sta_node *sta_ptr;
+ struct mwifiex_ds_tdls_oper tdls_oper;
+ unsigned long flags;
+
+ if (list_empty(&priv->sta_list))
+ return;
+
+ list_for_each_entry(sta_ptr, &priv->sta_list, list) {
+ memset(&tdls_oper, 0, sizeof(struct mwifiex_ds_tdls_oper));
+
+ if (sta_ptr->is_11n_enabled) {
+ mwifiex_11n_cleanup_reorder_tbl(priv);
+ spin_lock_irqsave(&priv->wmm.ra_list_spinlock,
+ flags);
+ mwifiex_11n_delete_all_tx_ba_stream_tbl(priv);
+ spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
+ flags);
+ }
+
+ mwifiex_restore_tdls_packets(priv, sta_ptr->mac_addr,
+ TDLS_LINK_TEARDOWN);
+ memcpy(&tdls_oper.peer_mac, sta_ptr->mac_addr, ETH_ALEN);
+ tdls_oper.tdls_action = MWIFIEX_TDLS_DISABLE_LINK;
+ if (mwifiex_send_cmd_async(priv, HostCmd_CMD_TDLS_OPER,
+ HostCmd_ACT_GEN_SET, 0, &tdls_oper))
+ dev_warn(priv->adapter->dev,
+ "Disable link failed for TDLS peer %pM",
+ sta_ptr->mac_addr);
+ }
+
+ mwifiex_del_all_sta_list(priv);
+}
diff --git a/drivers/net/wireless/mwifiex/uap_event.c b/drivers/net/wireless/mwifiex/uap_event.c
index 7180665..2d47ba7 100644
--- a/drivers/net/wireless/mwifiex/uap_event.c
+++ b/drivers/net/wireless/mwifiex/uap_event.c
@@ -21,126 +21,8 @@
#include "main.h"
#include "11n.h"
-/*
- * This function will return the pointer to station entry in station list
- * table which matches specified mac address.
- * This function should be called after acquiring RA list spinlock.
- * NULL is returned if station entry is not found in associated STA list.
- */
-struct mwifiex_sta_node *
-mwifiex_get_sta_entry(struct mwifiex_private *priv, u8 *mac)
-{
- struct mwifiex_sta_node *node;
-
- if (!mac)
- return NULL;
-
- list_for_each_entry(node, &priv->sta_list, list) {
- if (!memcmp(node->mac_addr, mac, ETH_ALEN))
- return node;
- }
- return NULL;
-}
-
-/*
- * This function will add a sta_node entry to associated station list
- * table with the given mac address.
- * If entry exist already, existing entry is returned.
- * If received mac address is NULL, NULL is returned.
- */
-static struct mwifiex_sta_node *
-mwifiex_add_sta_entry(struct mwifiex_private *priv, u8 *mac)
-{
- struct mwifiex_sta_node *node;
- unsigned long flags;
- if (!mac)
- return NULL;
-
- spin_lock_irqsave(&priv->sta_list_spinlock, flags);
- node = mwifiex_get_sta_entry(priv, mac);
- if (node)
- goto done;
-
- node = kzalloc(sizeof(struct mwifiex_sta_node), GFP_ATOMIC);
- if (!node)
- goto done;
-
- memcpy(node->mac_addr, mac, ETH_ALEN);
- list_add_tail(&node->list, &priv->sta_list);
-
-done:
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
- return node;
-}
-
-/*
- * This function will search for HT IE in association request IEs
- * and set station HT parameters accordingly.
- */
-static void
-mwifiex_set_sta_ht_cap(struct mwifiex_private *priv, const u8 *ies,
- int ies_len, struct mwifiex_sta_node *node)
-{
- const struct ieee80211_ht_cap *ht_cap;
-
- if (!ies)
- return;
-
- ht_cap = (void *)cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies, ies_len);
- if (ht_cap) {
- node->is_11n_enabled = 1;
- node->max_amsdu = le16_to_cpu(ht_cap->cap_info) &
- IEEE80211_HT_CAP_MAX_AMSDU ?
- MWIFIEX_TX_DATA_BUF_SIZE_8K :
- MWIFIEX_TX_DATA_BUF_SIZE_4K;
- } else {
- node->is_11n_enabled = 0;
- }
-
- return;
-}
-
-/*
- * This function will delete a station entry from station list
- */
-static void mwifiex_del_sta_entry(struct mwifiex_private *priv, u8 *mac)
-{
- struct mwifiex_sta_node *node;
- unsigned long flags;
-
- spin_lock_irqsave(&priv->sta_list_spinlock, flags);
-
- node = mwifiex_get_sta_entry(priv, mac);
- if (node) {
- list_del(&node->list);
- kfree(node);
- }
-
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
- return;
-}
-
-/*
- * This function will delete all stations from associated station list.
- */
-static void mwifiex_del_all_sta_list(struct mwifiex_private *priv)
-{
- struct mwifiex_sta_node *node, *tmp;
- unsigned long flags;
-
- spin_lock_irqsave(&priv->sta_list_spinlock, flags);
-
- list_for_each_entry_safe(node, tmp, &priv->sta_list, list) {
- list_del(&node->list);
- kfree(node);
- }
-
- INIT_LIST_HEAD(&priv->sta_list);
- spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
- return;
-}
/*
* This function handles AP interface specific events generated by firmware.
diff --git a/drivers/net/wireless/mwifiex/util.c b/drivers/net/wireless/mwifiex/util.c
index 9b82e22..8d37bfc 100644
--- a/drivers/net/wireless/mwifiex/util.c
+++ b/drivers/net/wireless/mwifiex/util.c
@@ -252,3 +252,117 @@ int mwifiex_complete_cmd(struct mwifiex_adapter *adapter,
return 0;
}
+
+/* This function will return the pointer to station entry in station list
+ * table which matches specified mac address.
+ * This function should be called after acquiring RA list spinlock.
+ * NULL is returned if station entry is not found in associated STA list.
+ */
+struct mwifiex_sta_node *
+mwifiex_get_sta_entry(struct mwifiex_private *priv, u8 *mac)
+{
+ struct mwifiex_sta_node *node;
+
+ if (!mac)
+ return NULL;
+
+ list_for_each_entry(node, &priv->sta_list, list) {
+ if (!memcmp(node->mac_addr, mac, ETH_ALEN))
+ return node;
+ }
+
+ return NULL;
+}
+
+/* This function will add a sta_node entry to associated station list
+ * table with the given mac address.
+ * If entry exist already, existing entry is returned.
+ * If received mac address is NULL, NULL is returned.
+ */
+struct mwifiex_sta_node *
+mwifiex_add_sta_entry(struct mwifiex_private *priv, u8 *mac)
+{
+ struct mwifiex_sta_node *node;
+ unsigned long flags;
+
+ if (!mac)
+ return NULL;
+
+ spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+ node = mwifiex_get_sta_entry(priv, mac);
+ if (node)
+ goto done;
+
+ node = kzalloc(sizeof(*node), GFP_ATOMIC);
+ if (!node)
+ goto done;
+
+ memcpy(node->mac_addr, mac, ETH_ALEN);
+ list_add_tail(&node->list, &priv->sta_list);
+
+done:
+ spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ return node;
+}
+
+/* This function will search for HT IE in association request IEs
+ * and set station HT parameters accordingly.
+ */
+void
+mwifiex_set_sta_ht_cap(struct mwifiex_private *priv, const u8 *ies,
+ int ies_len, struct mwifiex_sta_node *node)
+{
+ const struct ieee80211_ht_cap *ht_cap;
+
+ if (!ies)
+ return;
+
+ ht_cap = (void *)cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies, ies_len);
+ if (ht_cap) {
+ node->is_11n_enabled = 1;
+ node->max_amsdu = le16_to_cpu(ht_cap->cap_info) &
+ IEEE80211_HT_CAP_MAX_AMSDU ?
+ MWIFIEX_TX_DATA_BUF_SIZE_8K :
+ MWIFIEX_TX_DATA_BUF_SIZE_4K;
+ } else {
+ node->is_11n_enabled = 0;
+ }
+
+ return;
+}
+
+/* This function will delete a station entry from station list */
+void mwifiex_del_sta_entry(struct mwifiex_private *priv, u8 *mac)
+{
+ struct mwifiex_sta_node *node;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+
+ node = mwifiex_get_sta_entry(priv, mac);
+ if (node) {
+ list_del(&node->list);
+ kfree(node);
+ }
+
+ spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ return;
+}
+
+/* This function will delete all stations from associated station list. */
+void mwifiex_del_all_sta_list(struct mwifiex_private *priv)
+{
+ struct mwifiex_sta_node *node, *tmp;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->sta_list_spinlock, flags);
+
+ list_for_each_entry_safe(node, tmp, &priv->sta_list, list) {
+ list_del(&node->list);
+ kfree(node);
+ }
+
+ INIT_LIST_HEAD(&priv->sta_list);
+ spin_unlock_irqrestore(&priv->sta_list_spinlock, flags);
+ return;
+}
diff --git a/drivers/net/wireless/mwifiex/util.h b/drivers/net/wireless/mwifiex/util.h
index cb2d058..ddae570 100644
--- a/drivers/net/wireless/mwifiex/util.h
+++ b/drivers/net/wireless/mwifiex/util.h
@@ -30,8 +30,24 @@ static inline struct mwifiex_txinfo *MWIFIEX_SKB_TXCB(struct sk_buff *skb)
return (struct mwifiex_txinfo *)(skb->cb + sizeof(dma_addr_t));
}
-static inline void MWIFIEX_SKB_PACB(struct sk_buff *skb, dma_addr_t *buf_pa)
+struct mwifiex_dma_mapping {
+ dma_addr_t addr;
+ size_t len;
+};
+
+static inline void MWIFIEX_SKB_PACB(struct sk_buff *skb,
+ struct mwifiex_dma_mapping *mapping)
{
- memcpy(buf_pa, skb->cb, sizeof(dma_addr_t));
+ memcpy(mapping, skb->cb, sizeof(*mapping));
}
+
+static inline dma_addr_t MWIFIEX_SKB_DMA_ADDR(struct sk_buff *skb)
+{
+ struct mwifiex_dma_mapping mapping;
+
+ MWIFIEX_SKB_PACB(skb, &mapping);
+
+ return mapping.addr;
+}
+
#endif /* !_MWIFIEX_UTIL_H_ */
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index 981cf6e..1c5f2b6 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -64,21 +64,6 @@ static u8 tos_to_tid[] = {
0x07 /* 1 1 1 AC_VO */
};
-/*
- * This table inverses the tos_to_tid operation to get a priority
- * which is in sequential order, and can be compared.
- * Use this to compare the priority of two different TIDs.
- */
-static u8 tos_to_tid_inv[] = {
- 0x02, /* from tos_to_tid[2] = 0 */
- 0x00, /* from tos_to_tid[0] = 1 */
- 0x01, /* from tos_to_tid[1] = 2 */
- 0x03,
- 0x04,
- 0x05,
- 0x06,
- 0x07};
-
static u8 ac_to_tid[4][2] = { {1, 2}, {0, 3}, {4, 5}, {6, 7} };
/*
@@ -175,8 +160,15 @@ mwifiex_ralist_add(struct mwifiex_private *priv, u8 *ra)
break;
ra_list->is_11n_enabled = 0;
+ ra_list->tdls_link = false;
if (!mwifiex_queuing_ra_based(priv)) {
- ra_list->is_11n_enabled = IS_11N_ENABLED(priv);
+ if (mwifiex_get_tdls_link_status(priv, ra) ==
+ TDLS_SETUP_COMPLETE) {
+ ra_list->is_11n_enabled =
+ mwifiex_tdls_peer_11n_enabled(priv, ra);
+ } else {
+ ra_list->is_11n_enabled = IS_11N_ENABLED(priv);
+ }
} else {
ra_list->is_11n_enabled =
mwifiex_is_sta_11n_enabled(priv, node);
@@ -213,8 +205,9 @@ static void mwifiex_wmm_default_queue_priorities(struct mwifiex_private *priv)
* This function map ACs to TIDs.
*/
static void
-mwifiex_wmm_queue_priorities_tid(struct mwifiex_wmm_desc *wmm)
+mwifiex_wmm_queue_priorities_tid(struct mwifiex_private *priv)
{
+ struct mwifiex_wmm_desc *wmm = &priv->wmm;
u8 *queue_priority = wmm->queue_priority;
int i;
@@ -224,7 +217,7 @@ mwifiex_wmm_queue_priorities_tid(struct mwifiex_wmm_desc *wmm)
}
for (i = 0; i < MAX_NUM_TID; ++i)
- tos_to_tid_inv[tos_to_tid[i]] = (u8)i;
+ priv->tos_to_tid_inv[tos_to_tid[i]] = (u8)i;
atomic_set(&wmm->highest_queued_prio, HIGH_PRIO_TID);
}
@@ -285,7 +278,7 @@ mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv,
}
}
- mwifiex_wmm_queue_priorities_tid(&priv->wmm);
+ mwifiex_wmm_queue_priorities_tid(priv);
}
/*
@@ -388,8 +381,7 @@ mwifiex_wmm_convert_tos_to_ac(struct mwifiex_adapter *adapter, u32 tos)
* AP is disabled (due to call admission control (ACM bit). Mapping
* of TID to AC is taken care of internally.
*/
-static u8
-mwifiex_wmm_downgrade_tid(struct mwifiex_private *priv, u32 tid)
+u8 mwifiex_wmm_downgrade_tid(struct mwifiex_private *priv, u32 tid)
{
enum mwifiex_wmm_ac_e ac, ac_down;
u8 new_tid;
@@ -421,9 +413,11 @@ mwifiex_wmm_init(struct mwifiex_adapter *adapter)
continue;
for (i = 0; i < MAX_NUM_TID; ++i) {
- priv->aggr_prio_tbl[i].amsdu = tos_to_tid_inv[i];
- priv->aggr_prio_tbl[i].ampdu_ap = tos_to_tid_inv[i];
- priv->aggr_prio_tbl[i].ampdu_user = tos_to_tid_inv[i];
+ priv->aggr_prio_tbl[i].amsdu = priv->tos_to_tid_inv[i];
+ priv->aggr_prio_tbl[i].ampdu_ap =
+ priv->tos_to_tid_inv[i];
+ priv->aggr_prio_tbl[i].ampdu_user =
+ priv->tos_to_tid_inv[i];
}
priv->aggr_prio_tbl[6].amsdu
@@ -546,6 +540,7 @@ void
mwifiex_clean_txrx(struct mwifiex_private *priv)
{
unsigned long flags;
+ struct sk_buff *skb, *tmp;
mwifiex_11n_cleanup_reorder_tbl(priv);
spin_lock_irqsave(&priv->wmm.ra_list_spinlock, flags);
@@ -563,6 +558,9 @@ mwifiex_clean_txrx(struct mwifiex_private *priv)
!priv->adapter->surprise_removed)
priv->adapter->if_ops.clean_pcie_ring(priv->adapter);
spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
+
+ skb_queue_walk_safe(&priv->tdls_txq, skb, tmp)
+ mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
}
/*
@@ -591,7 +589,7 @@ mwifiex_wmm_get_ralist_node(struct mwifiex_private *priv, u8 tid,
* If no such node is found, a new node is added first and then
* retrieved.
*/
-static struct mwifiex_ra_list_tbl *
+struct mwifiex_ra_list_tbl *
mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid, u8 *ra_addr)
{
struct mwifiex_ra_list_tbl *ra_list;
@@ -641,6 +639,21 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
struct mwifiex_ra_list_tbl *ra_list;
u8 ra[ETH_ALEN], tid_down;
unsigned long flags;
+ struct list_head list_head;
+ int tdls_status = TDLS_NOT_SETUP;
+ struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
+ struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
+
+ memcpy(ra, eth_hdr->h_dest, ETH_ALEN);
+
+ if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA &&
+ ISSUPP_TDLS_ENABLED(adapter->fw_cap_info)) {
+ if (ntohs(eth_hdr->h_proto) == ETH_P_TDLS)
+ dev_dbg(adapter->dev,
+ "TDLS setup packet for %pM. Don't block\n", ra);
+ else
+ tdls_status = mwifiex_get_tdls_link_status(priv, ra);
+ }
if (!priv->media_connected && !mwifiex_is_skb_mgmt_frame(skb)) {
dev_dbg(adapter->dev, "data: drop packet in disconnect\n");
@@ -659,12 +672,27 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
have only 1 raptr for a tid in case of infra */
if (!mwifiex_queuing_ra_based(priv) &&
!mwifiex_is_skb_mgmt_frame(skb)) {
- if (!list_empty(&priv->wmm.tid_tbl_ptr[tid_down].ra_list))
- ra_list = list_first_entry(
- &priv->wmm.tid_tbl_ptr[tid_down].ra_list,
- struct mwifiex_ra_list_tbl, list);
- else
- ra_list = NULL;
+ switch (tdls_status) {
+ case TDLS_SETUP_COMPLETE:
+ ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down,
+ ra);
+ tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
+ break;
+ case TDLS_SETUP_INPROGRESS:
+ skb_queue_tail(&priv->tdls_txq, skb);
+ spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock,
+ flags);
+ return;
+ default:
+ list_head = priv->wmm.tid_tbl_ptr[tid_down].ra_list;
+ if (!list_empty(&list_head))
+ ra_list = list_first_entry(
+ &list_head, struct mwifiex_ra_list_tbl,
+ list);
+ else
+ ra_list = NULL;
+ break;
+ }
} else {
memcpy(ra, skb->data, ETH_ALEN);
if (ra[0] & 0x01 || mwifiex_is_skb_mgmt_frame(skb))
@@ -684,9 +712,9 @@ mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
ra_list->total_pkt_count++;
if (atomic_read(&priv->wmm.highest_queued_prio) <
- tos_to_tid_inv[tid_down])
+ priv->tos_to_tid_inv[tid_down])
atomic_set(&priv->wmm.highest_queued_prio,
- tos_to_tid_inv[tid_down]);
+ priv->tos_to_tid_inv[tid_down]);
atomic_inc(&priv->wmm.tx_pkts_queued);
@@ -1227,7 +1255,7 @@ mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
/* ra_list_spinlock has been freed in
mwifiex_send_single_packet() */
} else {
- if (mwifiex_is_ampdu_allowed(priv, tid) &&
+ if (mwifiex_is_ampdu_allowed(priv, ptr, tid) &&
ptr->ba_pkt_count > ptr->ba_packet_thr) {
if (mwifiex_space_avail_for_new_ba_stream(adapter)) {
mwifiex_create_ba_tbl(priv, ptr->ra, tid,
diff --git a/drivers/net/wireless/mwifiex/wmm.h b/drivers/net/wireless/mwifiex/wmm.h
index 0f129d4..83e4208 100644
--- a/drivers/net/wireless/mwifiex/wmm.h
+++ b/drivers/net/wireless/mwifiex/wmm.h
@@ -34,6 +34,21 @@ enum ieee_types_wmm_ecw_bitmasks {
static const u16 mwifiex_1d_to_wmm_queue[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
/*
+ * This table inverses the tos_to_tid operation to get a priority
+ * which is in sequential order, and can be compared.
+ * Use this to compare the priority of two different TIDs.
+ */
+static const u8 tos_to_tid_inv[] = {
+ 0x02, /* from tos_to_tid[2] = 0 */
+ 0x00, /* from tos_to_tid[0] = 1 */
+ 0x01, /* from tos_to_tid[1] = 2 */
+ 0x03,
+ 0x04,
+ 0x05,
+ 0x06,
+ 0x07};
+
+/*
* This function retrieves the TID of the given RA list.
*/
static inline int
@@ -107,5 +122,8 @@ void mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv,
void mwifiex_wmm_setup_ac_downgrade(struct mwifiex_private *priv);
int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
const struct host_cmd_ds_command *resp);
+struct mwifiex_ra_list_tbl *
+mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid, u8 *ra_addr);
+u8 mwifiex_wmm_downgrade_tid(struct mwifiex_private *priv, u32 tid);
#endif /* !_MWIFIEX_WMM_H_ */
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 5028557..2e89a86 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -2835,7 +2835,9 @@ static void rndis_wlan_do_link_up_work(struct usbnet *usbdev)
bssid, req_ie, req_ie_len,
resp_ie, resp_ie_len, GFP_KERNEL);
} else if (priv->infra_mode == NDIS_80211_INFRA_ADHOC)
- cfg80211_ibss_joined(usbdev->net, bssid, GFP_KERNEL);
+ cfg80211_ibss_joined(usbdev->net, bssid,
+ get_current_channel(usbdev, NULL),
+ GFP_KERNEL);
kfree(info);
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index caddc1b..14a90dd 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -125,9 +125,9 @@ static inline bool rt2800usb_entry_txstatus_timeout(struct queue_entry *entry)
tout = time_after(jiffies, entry->last_action + msecs_to_jiffies(100));
if (unlikely(tout))
- rt2x00_warn(entry->queue->rt2x00dev,
- "TX status timeout for entry %d in queue %d\n",
- entry->entry_idx, entry->queue->qid);
+ rt2x00_dbg(entry->queue->rt2x00dev,
+ "TX status timeout for entry %d in queue %d\n",
+ entry->entry_idx, entry->queue->qid);
return tout;
}
@@ -566,8 +566,8 @@ static void rt2800usb_txdone(struct rt2x00_dev *rt2x00dev)
queue = rt2x00queue_get_tx_queue(rt2x00dev, qid);
if (unlikely(rt2x00queue_empty(queue))) {
- rt2x00_warn(rt2x00dev, "Got TX status for an empty queue %u, dropping\n",
- qid);
+ rt2x00_dbg(rt2x00dev, "Got TX status for an empty queue %u, dropping\n",
+ qid);
break;
}
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c
index 3867d14..7980ab1 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c
@@ -619,13 +619,13 @@ static int rtl8180_start(struct ieee80211_hw *dev)
if (priv->r8185) {
reg = rtl818x_ioread8(priv, &priv->map->CW_CONF);
- reg &= ~RTL818X_CW_CONF_PERPACKET_CW_SHIFT;
- reg |= RTL818X_CW_CONF_PERPACKET_RETRY_SHIFT;
+ reg &= ~RTL818X_CW_CONF_PERPACKET_CW;
+ reg |= RTL818X_CW_CONF_PERPACKET_RETRY;
rtl818x_iowrite8(priv, &priv->map->CW_CONF, reg);
reg = rtl818x_ioread8(priv, &priv->map->TX_AGC_CTL);
- reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_GAIN_SHIFT;
- reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL_SHIFT;
+ reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_GAIN;
+ reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL;
reg |= RTL818X_TX_AGC_CTL_FEEDBACK_ANT;
rtl818x_iowrite8(priv, &priv->map->TX_AGC_CTL, reg);
diff --git a/drivers/net/wireless/rtl818x/rtl8187/dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c
index fd78df8..c981bcf 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c
@@ -785,7 +785,7 @@ static int rtl8187b_init_hw(struct ieee80211_hw *dev)
rtl818x_iowrite16(priv, (__le16 *)0xFF34, 0x0FFF);
reg = rtl818x_ioread8(priv, &priv->map->CW_CONF);
- reg |= RTL818X_CW_CONF_PERPACKET_RETRY_SHIFT;
+ reg |= RTL818X_CW_CONF_PERPACKET_RETRY;
rtl818x_iowrite8(priv, &priv->map->CW_CONF, reg);
/* Auto Rate Fallback Register (ARFR): 1M-54M setting */
@@ -943,8 +943,8 @@ static int rtl8187_start(struct ieee80211_hw *dev)
rtl818x_iowrite32(priv, &priv->map->RX_CONF, reg);
reg = rtl818x_ioread8(priv, &priv->map->TX_AGC_CTL);
- reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_GAIN_SHIFT;
- reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL_SHIFT;
+ reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_GAIN;
+ reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL;
reg &= ~RTL818X_TX_AGC_CTL_FEEDBACK_ANT;
rtl818x_iowrite8(priv, &priv->map->TX_AGC_CTL, reg);
@@ -986,13 +986,13 @@ static int rtl8187_start(struct ieee80211_hw *dev)
rtl818x_iowrite32(priv, &priv->map->RX_CONF, reg);
reg = rtl818x_ioread8(priv, &priv->map->CW_CONF);
- reg &= ~RTL818X_CW_CONF_PERPACKET_CW_SHIFT;
- reg |= RTL818X_CW_CONF_PERPACKET_RETRY_SHIFT;
+ reg &= ~RTL818X_CW_CONF_PERPACKET_CW;
+ reg |= RTL818X_CW_CONF_PERPACKET_RETRY;
rtl818x_iowrite8(priv, &priv->map->CW_CONF, reg);
reg = rtl818x_ioread8(priv, &priv->map->TX_AGC_CTL);
- reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_GAIN_SHIFT;
- reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL_SHIFT;
+ reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_GAIN;
+ reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL;
reg &= ~RTL818X_TX_AGC_CTL_FEEDBACK_ANT;
rtl818x_iowrite8(priv, &priv->map->TX_AGC_CTL, reg);
diff --git a/drivers/net/wireless/rtl818x/rtl818x.h b/drivers/net/wireless/rtl818x/rtl818x.h
index ce23dfd..fa7f7f6 100644
--- a/drivers/net/wireless/rtl818x/rtl818x.h
+++ b/drivers/net/wireless/rtl818x/rtl818x.h
@@ -144,9 +144,9 @@ struct rtl818x_csr {
__le32 HSSI_PARA;
u8 reserved_13[4];
u8 TX_AGC_CTL;
-#define RTL818X_TX_AGC_CTL_PERPACKET_GAIN_SHIFT (1 << 0)
-#define RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL_SHIFT (1 << 1)
-#define RTL818X_TX_AGC_CTL_FEEDBACK_ANT (1 << 2)
+#define RTL818X_TX_AGC_CTL_PERPACKET_GAIN (1 << 0)
+#define RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL (1 << 1)
+#define RTL818X_TX_AGC_CTL_FEEDBACK_ANT (1 << 2)
u8 TX_GAIN_CCK;
u8 TX_GAIN_OFDM;
u8 TX_ANTENNA;
@@ -158,8 +158,8 @@ struct rtl818x_csr {
u8 SLOT;
u8 reserved_16[5];
u8 CW_CONF;
-#define RTL818X_CW_CONF_PERPACKET_CW_SHIFT (1 << 0)
-#define RTL818X_CW_CONF_PERPACKET_RETRY_SHIFT (1 << 1)
+#define RTL818X_CW_CONF_PERPACKET_CW (1 << 0)
+#define RTL818X_CW_CONF_PERPACKET_RETRY (1 << 1)
u8 CW_VAL;
u8 RATE_FALLBACK;
#define RTL818X_RATE_FALLBACK_ENABLE (1 << 7)
diff --git a/drivers/net/wireless/rtlwifi/rc.c b/drivers/net/wireless/rtlwifi/rc.c
index a98acef..ee28a1a 100644
--- a/drivers/net/wireless/rtlwifi/rc.c
+++ b/drivers/net/wireless/rtlwifi/rc.c
@@ -260,8 +260,7 @@ static void rtl_rate_free_sta(void *rtlpriv,
kfree(rate_priv);
}
-static struct rate_control_ops rtl_rate_ops = {
- .module = NULL,
+static const struct rate_control_ops rtl_rate_ops = {
.name = "rtl_rc",
.alloc = rtl_rate_alloc,
.free = rtl_rate_free,
diff --git a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
index aece6c9..27ace30 100644
--- a/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8188ee/trx.c
@@ -452,7 +452,7 @@ bool rtl88ee_rx_query_desc(struct ieee80211_hw *hw,
/* During testing, hdr was NULL */
return false;
}
- if ((ieee80211_is_robust_mgmt_frame(hdr)) &&
+ if ((_ieee80211_is_robust_mgmt_frame(hdr)) &&
(ieee80211_has_protected(hdr->frame_control)))
rx_status->flag &= ~RX_FLAG_DECRYPTED;
else
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
index 52abf0a..114858d 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
@@ -393,7 +393,7 @@ bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
/* In testing, hdr was NULL here */
return false;
}
- if ((ieee80211_is_robust_mgmt_frame(hdr)) &&
+ if ((_ieee80211_is_robust_mgmt_frame(hdr)) &&
(ieee80211_has_protected(hdr->frame_control)))
rx_status->flag &= ~RX_FLAG_DECRYPTED;
else
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
index 27efbcd..163a681 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
@@ -310,7 +310,7 @@ bool rtl92se_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats,
/* during testing, hdr was NULL here */
return false;
}
- if ((ieee80211_is_robust_mgmt_frame(hdr)) &&
+ if ((_ieee80211_is_robust_mgmt_frame(hdr)) &&
(ieee80211_has_protected(hdr->frame_control)))
rx_status->flag &= ~RX_FLAG_DECRYPTED;
else
diff --git a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
index 50b7be3..721162c 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723ae/trx.c
@@ -334,7 +334,7 @@ bool rtl8723ae_rx_query_desc(struct ieee80211_hw *hw,
/* during testing, hdr could be NULL here */
return false;
}
- if ((ieee80211_is_robust_mgmt_frame(hdr)) &&
+ if ((_ieee80211_is_robust_mgmt_frame(hdr)) &&
(ieee80211_has_protected(hdr->frame_control)))
rx_status->flag &= ~RX_FLAG_DECRYPTED;
else
diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
index be7129b..d50dfac 100644
--- a/drivers/net/wireless/ti/wl12xx/main.c
+++ b/drivers/net/wireless/ti/wl12xx/main.c
@@ -1378,7 +1378,7 @@ static u32 wl12xx_get_rx_packet_len(struct wl1271 *wl, void *rx_data,
static int wl12xx_tx_delayed_compl(struct wl1271 *wl)
{
- if (wl->fw_status_1->tx_results_counter ==
+ if (wl->fw_status->tx_results_counter ==
(wl->tx_results_count & 0xff))
return 0;
@@ -1438,6 +1438,37 @@ out:
return ret;
}
+static void wl12xx_convert_fw_status(struct wl1271 *wl, void *raw_fw_status,
+ struct wl_fw_status *fw_status)
+{
+ struct wl12xx_fw_status *int_fw_status = raw_fw_status;
+
+ fw_status->intr = le32_to_cpu(int_fw_status->intr);
+ fw_status->fw_rx_counter = int_fw_status->fw_rx_counter;
+ fw_status->drv_rx_counter = int_fw_status->drv_rx_counter;
+ fw_status->tx_results_counter = int_fw_status->tx_results_counter;
+ fw_status->rx_pkt_descs = int_fw_status->rx_pkt_descs;
+
+ fw_status->fw_localtime = le32_to_cpu(int_fw_status->fw_localtime);
+ fw_status->link_ps_bitmap = le32_to_cpu(int_fw_status->link_ps_bitmap);
+ fw_status->link_fast_bitmap =
+ le32_to_cpu(int_fw_status->link_fast_bitmap);
+ fw_status->total_released_blks =
+ le32_to_cpu(int_fw_status->total_released_blks);
+ fw_status->tx_total = le32_to_cpu(int_fw_status->tx_total);
+
+ fw_status->counters.tx_released_pkts =
+ int_fw_status->counters.tx_released_pkts;
+ fw_status->counters.tx_lnk_free_pkts =
+ int_fw_status->counters.tx_lnk_free_pkts;
+ fw_status->counters.tx_voice_released_blks =
+ int_fw_status->counters.tx_voice_released_blks;
+ fw_status->counters.tx_last_rate =
+ int_fw_status->counters.tx_last_rate;
+
+ fw_status->log_start_addr = le32_to_cpu(int_fw_status->log_start_addr);
+}
+
static u32 wl12xx_sta_get_ap_rate_mask(struct wl1271 *wl,
struct wl12xx_vif *wlvif)
{
@@ -1677,6 +1708,7 @@ static struct wlcore_ops wl12xx_ops = {
.tx_delayed_compl = wl12xx_tx_delayed_compl,
.hw_init = wl12xx_hw_init,
.init_vif = NULL,
+ .convert_fw_status = wl12xx_convert_fw_status,
.sta_get_ap_rate_mask = wl12xx_sta_get_ap_rate_mask,
.get_pg_ver = wl12xx_get_pg_ver,
.get_mac = wl12xx_get_mac,
@@ -1711,22 +1743,53 @@ static struct ieee80211_sta_ht_cap wl12xx_ht_cap = {
},
};
+static const struct ieee80211_iface_limit wl12xx_iface_limits[] = {
+ {
+ .max = 3,
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT),
+ },
+};
+
+static const struct ieee80211_iface_combination
+wl12xx_iface_combinations[] = {
+ {
+ .max_interfaces = 3,
+ .limits = wl12xx_iface_limits,
+ .n_limits = ARRAY_SIZE(wl12xx_iface_limits),
+ .num_different_channels = 1,
+ },
+};
+
static int wl12xx_setup(struct wl1271 *wl)
{
struct wl12xx_priv *priv = wl->priv;
struct wlcore_platdev_data *pdev_data = dev_get_platdata(&wl->pdev->dev);
struct wl12xx_platform_data *pdata = pdev_data->pdata;
+ BUILD_BUG_ON(WL12XX_MAX_LINKS > WLCORE_MAX_LINKS);
+ BUILD_BUG_ON(WL12XX_MAX_AP_STATIONS > WL12XX_MAX_LINKS);
+
wl->rtable = wl12xx_rtable;
wl->num_tx_desc = WL12XX_NUM_TX_DESCRIPTORS;
wl->num_rx_desc = WL12XX_NUM_RX_DESCRIPTORS;
- wl->num_channels = 1;
+ wl->num_links = WL12XX_MAX_LINKS;
+ wl->max_ap_stations = WL12XX_MAX_AP_STATIONS;
+ wl->iface_combinations = wl12xx_iface_combinations;
+ wl->n_iface_combinations = ARRAY_SIZE(wl12xx_iface_combinations);
wl->num_mac_addr = WL12XX_NUM_MAC_ADDRESSES;
wl->band_rate_to_idx = wl12xx_band_rate_to_idx;
wl->hw_tx_rate_tbl_size = WL12XX_CONF_HW_RXTX_RATE_MAX;
wl->hw_min_ht_rate = WL12XX_CONF_HW_RXTX_RATE_MCS0;
+ wl->fw_status_len = sizeof(struct wl12xx_fw_status);
wl->fw_status_priv_len = 0;
wl->stats.fw_stats_len = sizeof(struct wl12xx_acx_statistics);
+ wl->ofdm_only_ap = true;
wlcore_set_ht_cap(wl, IEEE80211_BAND_2GHZ, &wl12xx_ht_cap);
wlcore_set_ht_cap(wl, IEEE80211_BAND_5GHZ, &wl12xx_ht_cap);
wl12xx_conf_init(wl);
diff --git a/drivers/net/wireless/ti/wl12xx/wl12xx.h b/drivers/net/wireless/ti/wl12xx/wl12xx.h
index 9e5484a..75c9265 100644
--- a/drivers/net/wireless/ti/wl12xx/wl12xx.h
+++ b/drivers/net/wireless/ti/wl12xx/wl12xx.h
@@ -65,6 +65,9 @@
#define WL12XX_RX_BA_MAX_SESSIONS 3
+#define WL12XX_MAX_AP_STATIONS 8
+#define WL12XX_MAX_LINKS 12
+
struct wl127x_rx_mem_pool_addr {
u32 addr;
u32 addr_extra;
@@ -79,4 +82,54 @@ struct wl12xx_priv {
struct wl127x_rx_mem_pool_addr *rx_mem_addr;
};
+struct wl12xx_fw_packet_counters {
+ /* Cumulative counter of released packets per AC */
+ u8 tx_released_pkts[NUM_TX_QUEUES];
+
+ /* Cumulative counter of freed packets per HLID */
+ u8 tx_lnk_free_pkts[WL12XX_MAX_LINKS];
+
+ /* Cumulative counter of released Voice memory blocks */
+ u8 tx_voice_released_blks;
+
+ /* Tx rate of the last transmitted packet */
+ u8 tx_last_rate;
+
+ u8 padding[2];
+} __packed;
+
+/* FW status registers */
+struct wl12xx_fw_status {
+ __le32 intr;
+ u8 fw_rx_counter;
+ u8 drv_rx_counter;
+ u8 reserved;
+ u8 tx_results_counter;
+ __le32 rx_pkt_descs[WL12XX_NUM_RX_DESCRIPTORS];
+
+ __le32 fw_localtime;
+
+ /*
+ * A bitmap (where each bit represents a single HLID)
+ * to indicate if the station is in PS mode.
+ */
+ __le32 link_ps_bitmap;
+
+ /*
+ * A bitmap (where each bit represents a single HLID) to indicate
+ * if the station is in Fast mode
+ */
+ __le32 link_fast_bitmap;
+
+ /* Cumulative counter of total released mem blocks since FW-reset */
+ __le32 total_released_blks;
+
+ /* Size (in Memory Blocks) of TX pool */
+ __le32 tx_total;
+
+ struct wl12xx_fw_packet_counters counters;
+
+ __le32 log_start_addr;
+} __packed;
+
#endif /* __WL12XX_PRIV_H__ */
diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
index ec37b16..de5b4fa 100644
--- a/drivers/net/wireless/ti/wl18xx/main.c
+++ b/drivers/net/wireless/ti/wl18xx/main.c
@@ -648,7 +648,7 @@ static const struct wl18xx_clk_cfg wl18xx_clk_table[NUM_CLOCK_CONFIGS] = {
};
/* TODO: maybe move to a new header file? */
-#define WL18XX_FW_NAME "ti-connectivity/wl18xx-fw-2.bin"
+#define WL18XX_FW_NAME "ti-connectivity/wl18xx-fw-3.bin"
static int wl18xx_identify_chip(struct wl1271 *wl)
{
@@ -1133,6 +1133,39 @@ static int wl18xx_hw_init(struct wl1271 *wl)
return ret;
}
+static void wl18xx_convert_fw_status(struct wl1271 *wl, void *raw_fw_status,
+ struct wl_fw_status *fw_status)
+{
+ struct wl18xx_fw_status *int_fw_status = raw_fw_status;
+
+ fw_status->intr = le32_to_cpu(int_fw_status->intr);
+ fw_status->fw_rx_counter = int_fw_status->fw_rx_counter;
+ fw_status->drv_rx_counter = int_fw_status->drv_rx_counter;
+ fw_status->tx_results_counter = int_fw_status->tx_results_counter;
+ fw_status->rx_pkt_descs = int_fw_status->rx_pkt_descs;
+
+ fw_status->fw_localtime = le32_to_cpu(int_fw_status->fw_localtime);
+ fw_status->link_ps_bitmap = le32_to_cpu(int_fw_status->link_ps_bitmap);
+ fw_status->link_fast_bitmap =
+ le32_to_cpu(int_fw_status->link_fast_bitmap);
+ fw_status->total_released_blks =
+ le32_to_cpu(int_fw_status->total_released_blks);
+ fw_status->tx_total = le32_to_cpu(int_fw_status->tx_total);
+
+ fw_status->counters.tx_released_pkts =
+ int_fw_status->counters.tx_released_pkts;
+ fw_status->counters.tx_lnk_free_pkts =
+ int_fw_status->counters.tx_lnk_free_pkts;
+ fw_status->counters.tx_voice_released_blks =
+ int_fw_status->counters.tx_voice_released_blks;
+ fw_status->counters.tx_last_rate =
+ int_fw_status->counters.tx_last_rate;
+
+ fw_status->log_start_addr = le32_to_cpu(int_fw_status->log_start_addr);
+
+ fw_status->priv = &int_fw_status->priv;
+}
+
static void wl18xx_set_tx_desc_csum(struct wl1271 *wl,
struct wl1271_tx_hw_descr *desc,
struct sk_buff *skb)
@@ -1572,7 +1605,7 @@ static bool wl18xx_lnk_high_prio(struct wl1271 *wl, u8 hlid,
{
u8 thold;
struct wl18xx_fw_status_priv *status_priv =
- (struct wl18xx_fw_status_priv *)wl->fw_status_2->priv;
+ (struct wl18xx_fw_status_priv *)wl->fw_status->priv;
u32 suspend_bitmap = le32_to_cpu(status_priv->link_suspend_bitmap);
/* suspended links are never high priority */
@@ -1594,7 +1627,7 @@ static bool wl18xx_lnk_low_prio(struct wl1271 *wl, u8 hlid,
{
u8 thold;
struct wl18xx_fw_status_priv *status_priv =
- (struct wl18xx_fw_status_priv *)wl->fw_status_2->priv;
+ (struct wl18xx_fw_status_priv *)wl->fw_status->priv;
u32 suspend_bitmap = le32_to_cpu(status_priv->link_suspend_bitmap);
if (test_bit(hlid, (unsigned long *)&suspend_bitmap))
@@ -1632,6 +1665,7 @@ static struct wlcore_ops wl18xx_ops = {
.tx_immediate_compl = wl18xx_tx_immediate_completion,
.tx_delayed_compl = NULL,
.hw_init = wl18xx_hw_init,
+ .convert_fw_status = wl18xx_convert_fw_status,
.set_tx_desc_csum = wl18xx_set_tx_desc_csum,
.get_pg_ver = wl18xx_get_pg_ver,
.set_rx_csum = wl18xx_set_rx_csum,
@@ -1713,19 +1747,62 @@ static struct ieee80211_sta_ht_cap wl18xx_mimo_ht_cap_2ghz = {
},
};
+static const struct ieee80211_iface_limit wl18xx_iface_limits[] = {
+ {
+ .max = 3,
+ .types = BIT(NL80211_IFTYPE_STATION),
+ },
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_AP) |
+ BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_P2P_CLIENT),
+ },
+};
+
+static const struct ieee80211_iface_limit wl18xx_iface_ap_limits[] = {
+ {
+ .max = 2,
+ .types = BIT(NL80211_IFTYPE_AP),
+ },
+};
+
+static const struct ieee80211_iface_combination
+wl18xx_iface_combinations[] = {
+ {
+ .max_interfaces = 3,
+ .limits = wl18xx_iface_limits,
+ .n_limits = ARRAY_SIZE(wl18xx_iface_limits),
+ .num_different_channels = 2,
+ },
+ {
+ .max_interfaces = 2,
+ .limits = wl18xx_iface_ap_limits,
+ .n_limits = ARRAY_SIZE(wl18xx_iface_ap_limits),
+ .num_different_channels = 1,
+ }
+};
+
static int wl18xx_setup(struct wl1271 *wl)
{
struct wl18xx_priv *priv = wl->priv;
int ret;
+ BUILD_BUG_ON(WL18XX_MAX_LINKS > WLCORE_MAX_LINKS);
+ BUILD_BUG_ON(WL18XX_MAX_AP_STATIONS > WL18XX_MAX_LINKS);
+
wl->rtable = wl18xx_rtable;
wl->num_tx_desc = WL18XX_NUM_TX_DESCRIPTORS;
wl->num_rx_desc = WL18XX_NUM_RX_DESCRIPTORS;
- wl->num_channels = 2;
+ wl->num_links = WL18XX_MAX_LINKS;
+ wl->max_ap_stations = WL18XX_MAX_AP_STATIONS;
+ wl->iface_combinations = wl18xx_iface_combinations;
+ wl->n_iface_combinations = ARRAY_SIZE(wl18xx_iface_combinations);
wl->num_mac_addr = WL18XX_NUM_MAC_ADDRESSES;
wl->band_rate_to_idx = wl18xx_band_rate_to_idx;
wl->hw_tx_rate_tbl_size = WL18XX_CONF_HW_RXTX_RATE_MAX;
wl->hw_min_ht_rate = WL18XX_CONF_HW_RXTX_RATE_MCS0;
+ wl->fw_status_len = sizeof(struct wl18xx_fw_status);
wl->fw_status_priv_len = sizeof(struct wl18xx_fw_status_priv);
wl->stats.fw_stats_len = sizeof(struct wl18xx_acx_statistics);
wl->static_data_priv_len = sizeof(struct wl18xx_static_data_priv);
diff --git a/drivers/net/wireless/ti/wl18xx/tx.c b/drivers/net/wireless/ti/wl18xx/tx.c
index 57c6943..be1ebd5 100644
--- a/drivers/net/wireless/ti/wl18xx/tx.c
+++ b/drivers/net/wireless/ti/wl18xx/tx.c
@@ -32,7 +32,7 @@ static
void wl18xx_get_last_tx_rate(struct wl1271 *wl, struct ieee80211_vif *vif,
struct ieee80211_tx_rate *rate)
{
- u8 fw_rate = wl->fw_status_2->counters.tx_last_rate;
+ u8 fw_rate = wl->fw_status->counters.tx_last_rate;
if (fw_rate > CONF_HW_RATE_INDEX_MAX) {
wl1271_error("last Tx rate invalid: %d", fw_rate);
@@ -139,7 +139,7 @@ static void wl18xx_tx_complete_packet(struct wl1271 *wl, u8 tx_stat_byte)
void wl18xx_tx_immediate_complete(struct wl1271 *wl)
{
struct wl18xx_fw_status_priv *status_priv =
- (struct wl18xx_fw_status_priv *)wl->fw_status_2->priv;
+ (struct wl18xx_fw_status_priv *)wl->fw_status->priv;
struct wl18xx_priv *priv = wl->priv;
u8 i;
diff --git a/drivers/net/wireless/ti/wl18xx/wl18xx.h b/drivers/net/wireless/ti/wl18xx/wl18xx.h
index 9204e07..eb7cfe8 100644
--- a/drivers/net/wireless/ti/wl18xx/wl18xx.h
+++ b/drivers/net/wireless/ti/wl18xx/wl18xx.h
@@ -26,10 +26,10 @@
/* minimum FW required for driver */
#define WL18XX_CHIP_VER 8
-#define WL18XX_IFTYPE_VER 5
+#define WL18XX_IFTYPE_VER 8
#define WL18XX_MAJOR_VER WLCORE_FW_VER_IGNORE
#define WL18XX_SUBTYPE_VER WLCORE_FW_VER_IGNORE
-#define WL18XX_MINOR_VER 39
+#define WL18XX_MINOR_VER 13
#define WL18XX_CMD_MAX_SIZE 740
@@ -40,7 +40,10 @@
#define WL18XX_NUM_MAC_ADDRESSES 3
-#define WL18XX_RX_BA_MAX_SESSIONS 5
+#define WL18XX_RX_BA_MAX_SESSIONS 13
+
+#define WL18XX_MAX_AP_STATIONS 10
+#define WL18XX_MAX_LINKS 16
struct wl18xx_priv {
/* buffer for sending commands to FW */
@@ -109,6 +112,59 @@ struct wl18xx_fw_status_priv {
u8 padding[3];
};
+struct wl18xx_fw_packet_counters {
+ /* Cumulative counter of released packets per AC */
+ u8 tx_released_pkts[NUM_TX_QUEUES];
+
+ /* Cumulative counter of freed packets per HLID */
+ u8 tx_lnk_free_pkts[WL18XX_MAX_LINKS];
+
+ /* Cumulative counter of released Voice memory blocks */
+ u8 tx_voice_released_blks;
+
+ /* Tx rate of the last transmitted packet */
+ u8 tx_last_rate;
+
+ u8 padding[2];
+} __packed;
+
+/* FW status registers */
+struct wl18xx_fw_status {
+ __le32 intr;
+ u8 fw_rx_counter;
+ u8 drv_rx_counter;
+ u8 reserved;
+ u8 tx_results_counter;
+ __le32 rx_pkt_descs[WL18XX_NUM_RX_DESCRIPTORS];
+
+ __le32 fw_localtime;
+
+ /*
+ * A bitmap (where each bit represents a single HLID)
+ * to indicate if the station is in PS mode.
+ */
+ __le32 link_ps_bitmap;
+
+ /*
+ * A bitmap (where each bit represents a single HLID) to indicate
+ * if the station is in Fast mode
+ */
+ __le32 link_fast_bitmap;
+
+ /* Cumulative counter of total released mem blocks since FW-reset */
+ __le32 total_released_blks;
+
+ /* Size (in Memory Blocks) of TX pool */
+ __le32 tx_total;
+
+ struct wl18xx_fw_packet_counters counters;
+
+ __le32 log_start_addr;
+
+ /* Private status to be used by the lower drivers */
+ struct wl18xx_fw_status_priv priv;
+} __packed;
+
#define WL18XX_PHY_VERSION_MAX_LEN 20
struct wl18xx_static_data_priv {
diff --git a/drivers/net/wireless/ti/wlcore/acx.c b/drivers/net/wireless/ti/wlcore/acx.c
index ec83675..b924cea 100644
--- a/drivers/net/wireless/ti/wlcore/acx.c
+++ b/drivers/net/wireless/ti/wlcore/acx.c
@@ -358,7 +358,8 @@ int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct acx_beacon_filter_option *beacon_filter = NULL;
int ret = 0;
- wl1271_debug(DEBUG_ACX, "acx beacon filter opt");
+ wl1271_debug(DEBUG_ACX, "acx beacon filter opt enable=%d",
+ enable_filter);
if (enable_filter &&
wl->conf.conn.bcn_filt_mode == CONF_BCN_FILT_MODE_DISABLED)
@@ -1591,7 +1592,8 @@ out:
return ret;
}
-int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr)
+int wl1271_acx_set_inconnection_sta(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif, u8 *addr)
{
struct wl1271_acx_inconnection_sta *acx = NULL;
int ret;
@@ -1603,6 +1605,7 @@ int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr)
return -ENOMEM;
memcpy(acx->addr, addr, ETH_ALEN);
+ acx->role_id = wlvif->role_id;
ret = wl1271_cmd_configure(wl, ACX_UPDATE_INCONNECTION_STA_LIST,
acx, sizeof(*acx));
diff --git a/drivers/net/wireless/ti/wlcore/acx.h b/drivers/net/wireless/ti/wlcore/acx.h
index 6dcfad9..954d57e 100644
--- a/drivers/net/wireless/ti/wlcore/acx.h
+++ b/drivers/net/wireless/ti/wlcore/acx.h
@@ -824,7 +824,8 @@ struct wl1271_acx_inconnection_sta {
struct acx_header header;
u8 addr[ETH_ALEN];
- u8 padding1[2];
+ u8 role_id;
+ u8 padding;
} __packed;
/*
@@ -1118,7 +1119,8 @@ int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
bool enable);
int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl, struct wl12xx_vif *wlvif);
int wl12xx_acx_config_ps(struct wl1271 *wl, struct wl12xx_vif *wlvif);
-int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr);
+int wl1271_acx_set_inconnection_sta(struct wl1271 *wl,
+ struct wl12xx_vif *wlvif, u8 *addr);
int wl1271_acx_fm_coex(struct wl1271 *wl);
int wl12xx_acx_set_rate_mgmt_params(struct wl1271 *wl);
int wl12xx_acx_config_hangover(struct wl1271 *wl);
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index 9b2ecf5..40dc30f 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -60,8 +60,8 @@ static int __wlcore_cmd_send(struct wl1271 *wl, u16 id, void *buf,
u16 status;
u16 poll_count = 0;
- if (WARN_ON(wl->state == WLCORE_STATE_RESTARTING &&
- id != CMD_STOP_FWLOGGER))
+ if (unlikely(wl->state == WLCORE_STATE_RESTARTING &&
+ id != CMD_STOP_FWLOGGER))
return -EIO;
cmd = buf;
@@ -312,8 +312,8 @@ static int wlcore_get_new_session_id(struct wl1271 *wl, u8 hlid)
int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid)
{
unsigned long flags;
- u8 link = find_first_zero_bit(wl->links_map, WL12XX_MAX_LINKS);
- if (link >= WL12XX_MAX_LINKS)
+ u8 link = find_first_zero_bit(wl->links_map, wl->num_links);
+ if (link >= wl->num_links)
return -EBUSY;
wl->session_ids[link] = wlcore_get_new_session_id(wl, link);
@@ -324,9 +324,14 @@ int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid)
__set_bit(link, wlvif->links_map);
spin_unlock_irqrestore(&wl->wl_lock, flags);
- /* take the last "freed packets" value from the current FW status */
- wl->links[link].prev_freed_pkts =
- wl->fw_status_2->counters.tx_lnk_free_pkts[link];
+ /*
+ * take the last "freed packets" value from the current FW status.
+ * on recovery, we might not have fw_status yet, and
+ * tx_lnk_free_pkts will be NULL. check for it.
+ */
+ if (wl->fw_status->counters.tx_lnk_free_pkts)
+ wl->links[link].prev_freed_pkts =
+ wl->fw_status->counters.tx_lnk_free_pkts[link];
wl->links[link].wlvif = wlvif;
/*
@@ -1527,6 +1532,7 @@ int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
cmd->sp_len = sta->max_sp;
cmd->wmm = sta->wme ? 1 : 0;
cmd->session_id = wl->session_ids[hlid];
+ cmd->role_id = wlvif->role_id;
for (i = 0; i < NUM_ACCESS_CATEGORIES_COPY; i++)
if (sta->wme && (sta->uapsd_queues & BIT(i)))
@@ -1563,7 +1569,8 @@ out:
return ret;
}
-int wl12xx_cmd_remove_peer(struct wl1271 *wl, u8 hlid)
+int wl12xx_cmd_remove_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 hlid)
{
struct wl12xx_cmd_remove_peer *cmd;
int ret;
@@ -1581,6 +1588,7 @@ int wl12xx_cmd_remove_peer(struct wl1271 *wl, u8 hlid)
/* We never send a deauth, mac80211 is in charge of this */
cmd->reason_opcode = 0;
cmd->send_deauth_flag = 0;
+ cmd->role_id = wlvif->role_id;
ret = wl1271_cmd_send(wl, CMD_REMOVE_PEER, cmd, sizeof(*cmd), 0);
if (ret < 0) {
diff --git a/drivers/net/wireless/ti/wlcore/cmd.h b/drivers/net/wireless/ti/wlcore/cmd.h
index 323d4a8..b084830 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.h
+++ b/drivers/net/wireless/ti/wlcore/cmd.h
@@ -88,7 +88,8 @@ int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id,
int wl12xx_croc(struct wl1271 *wl, u8 role_id);
int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct ieee80211_sta *sta, u8 hlid);
-int wl12xx_cmd_remove_peer(struct wl1271 *wl, u8 hlid);
+int wl12xx_cmd_remove_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
+ u8 hlid);
void wlcore_set_pending_regdomain_ch(struct wl1271 *wl, u16 channel,
enum ieee80211_band band);
int wlcore_cmd_regdomain_config_locked(struct wl1271 *wl);
@@ -206,7 +207,7 @@ enum cmd_templ {
#define WL1271_COMMAND_TIMEOUT 2000
#define WL1271_CMD_TEMPL_DFLT_SIZE 252
#define WL1271_CMD_TEMPL_MAX_SIZE 512
-#define WL1271_EVENT_TIMEOUT 1500
+#define WL1271_EVENT_TIMEOUT 5000
struct wl1271_cmd_header {
__le16 id;
@@ -594,6 +595,8 @@ struct wl12xx_cmd_add_peer {
u8 sp_len;
u8 wmm;
u8 session_id;
+ u8 role_id;
+ u8 padding[3];
} __packed;
struct wl12xx_cmd_remove_peer {
@@ -602,7 +605,7 @@ struct wl12xx_cmd_remove_peer {
u8 hlid;
u8 reason_opcode;
u8 send_deauth_flag;
- u8 padding1;
+ u8 role_id;
} __packed;
/*
diff --git a/drivers/net/wireless/ti/wlcore/event.c b/drivers/net/wireless/ti/wlcore/event.c
index 8d3b349..1f9a360 100644
--- a/drivers/net/wireless/ti/wlcore/event.c
+++ b/drivers/net/wireless/ti/wlcore/event.c
@@ -67,7 +67,7 @@ static void wl1271_stop_ba_event(struct wl1271 *wl, struct wl12xx_vif *wlvif)
u8 hlid;
struct wl1271_link *lnk;
for_each_set_bit(hlid, wlvif->ap.sta_hlid_map,
- WL12XX_MAX_LINKS) {
+ wl->num_links) {
lnk = &wl->links[hlid];
if (!lnk->ba_bitmap)
continue;
@@ -172,7 +172,7 @@ static void wlcore_disconnect_sta(struct wl1271 *wl, unsigned long sta_bitmap)
const u8 *addr;
int h;
- for_each_set_bit(h, &sta_bitmap, WL12XX_MAX_LINKS) {
+ for_each_set_bit(h, &sta_bitmap, wl->num_links) {
bool found = false;
/* find the ap vif connected to this sta */
wl12xx_for_each_wlvif_ap(wl, wlvif) {
diff --git a/drivers/net/wireless/ti/wlcore/hw_ops.h b/drivers/net/wireless/ti/wlcore/hw_ops.h
index 51f8d63..1555ff9 100644
--- a/drivers/net/wireless/ti/wlcore/hw_ops.h
+++ b/drivers/net/wireless/ti/wlcore/hw_ops.h
@@ -106,6 +106,15 @@ wlcore_hw_init_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif)
return 0;
}
+static inline void
+wlcore_hw_convert_fw_status(struct wl1271 *wl, void *raw_fw_status,
+ struct wl_fw_status *fw_status)
+{
+ BUG_ON(!wl->ops->convert_fw_status);
+
+ wl->ops->convert_fw_status(wl, raw_fw_status, fw_status);
+}
+
static inline u32
wlcore_hw_sta_get_ap_rate_mask(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
diff --git a/drivers/net/wireless/ti/wlcore/init.c b/drivers/net/wireless/ti/wlcore/init.c
index 7699f9d..199e941 100644
--- a/drivers/net/wireless/ti/wlcore/init.c
+++ b/drivers/net/wireless/ti/wlcore/init.c
@@ -287,8 +287,8 @@ static int wl1271_init_sta_beacon_filter(struct wl1271 *wl,
if (ret < 0)
return ret;
- /* enable beacon filtering */
- ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
+ /* disable beacon filtering until we get the first beacon */
+ ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
if (ret < 0)
return ret;
@@ -462,7 +462,7 @@ int wl1271_init_ap_rates(struct wl1271 *wl, struct wl12xx_vif *wlvif)
* If the basic rates contain OFDM rates, use OFDM only
* rates for unicast TX as well. Else use all supported rates.
*/
- if ((wlvif->basic_rate_set & CONF_TX_OFDM_RATES))
+ if (wl->ofdm_only_ap && (wlvif->basic_rate_set & CONF_TX_OFDM_RATES))
supported_rates = CONF_TX_OFDM_RATES;
else
supported_rates = CONF_TX_ENABLED_RATES;
diff --git a/drivers/net/wireless/ti/wlcore/io.h b/drivers/net/wireless/ti/wlcore/io.h
index 07e3d6a..0305729 100644
--- a/drivers/net/wireless/ti/wlcore/io.h
+++ b/drivers/net/wireless/ti/wlcore/io.h
@@ -60,7 +60,9 @@ static inline int __must_check wlcore_raw_write(struct wl1271 *wl, int addr,
{
int ret;
- if (test_bit(WL1271_FLAG_IO_FAILED, &wl->flags))
+ if (test_bit(WL1271_FLAG_IO_FAILED, &wl->flags) ||
+ WARN_ON((test_bit(WL1271_FLAG_IN_ELP, &wl->flags) &&
+ addr != HW_ACCESS_ELP_CTRL_REG)))
return -EIO;
ret = wl->if_ops->write(wl->dev, addr, buf, len, fixed);
@@ -76,7 +78,9 @@ static inline int __must_check wlcore_raw_read(struct wl1271 *wl, int addr,
{
int ret;
- if (test_bit(WL1271_FLAG_IO_FAILED, &wl->flags))
+ if (test_bit(WL1271_FLAG_IO_FAILED, &wl->flags) ||
+ WARN_ON((test_bit(WL1271_FLAG_IN_ELP, &wl->flags) &&
+ addr != HW_ACCESS_ELP_CTRL_REG)))
return -EIO;
ret = wl->if_ops->read(wl->dev, addr, buf, len, fixed);
diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
index b46b311..7aae5b3 100644
--- a/drivers/net/wireless/ti/wlcore/main.c
+++ b/drivers/net/wireless/ti/wlcore/main.c
@@ -345,24 +345,24 @@ static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
* Start high-level PS if the STA is asleep with enough blocks in FW.
* Make an exception if this is the only connected link. In this
* case FW-memory congestion is less of a problem.
- * Note that a single connected STA means 3 active links, since we must
- * account for the global and broadcast AP links. The "fw_ps" check
- * assures us the third link is a STA connected to the AP. Otherwise
- * the FW would not set the PSM bit.
+ * Note that a single connected STA means 2*ap_count + 1 active links,
+ * since we must account for the global and broadcast AP links
+ * for each AP. The "fw_ps" check assures us the other link is a STA
+ * connected to the AP. Otherwise the FW would not set the PSM bit.
*/
- else if (wl->active_link_count > 3 && fw_ps &&
+ else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
wl12xx_ps_link_start(wl, wlvif, hlid, true);
}
static void wl12xx_irq_update_links_status(struct wl1271 *wl,
struct wl12xx_vif *wlvif,
- struct wl_fw_status_2 *status)
+ struct wl_fw_status *status)
{
u32 cur_fw_ps_map;
u8 hlid;
- cur_fw_ps_map = le32_to_cpu(status->link_ps_bitmap);
+ cur_fw_ps_map = status->link_ps_bitmap;
if (wl->ap_fw_ps_map != cur_fw_ps_map) {
wl1271_debug(DEBUG_PSM,
"link ps prev 0x%x cur 0x%x changed 0x%x",
@@ -372,77 +372,73 @@ static void wl12xx_irq_update_links_status(struct wl1271 *wl,
wl->ap_fw_ps_map = cur_fw_ps_map;
}
- for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, WL12XX_MAX_LINKS)
+ for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
wl->links[hlid].allocated_pkts);
}
-static int wlcore_fw_status(struct wl1271 *wl,
- struct wl_fw_status_1 *status_1,
- struct wl_fw_status_2 *status_2)
+static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
{
struct wl12xx_vif *wlvif;
struct timespec ts;
u32 old_tx_blk_count = wl->tx_blocks_available;
int avail, freed_blocks;
int i;
- size_t status_len;
int ret;
struct wl1271_link *lnk;
- status_len = WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
- sizeof(*status_2) + wl->fw_status_priv_len;
-
- ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR, status_1,
- status_len, false);
+ ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
+ wl->raw_fw_status,
+ wl->fw_status_len, false);
if (ret < 0)
return ret;
+ wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status);
+
wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
"drv_rx_counter = %d, tx_results_counter = %d)",
- status_1->intr,
- status_1->fw_rx_counter,
- status_1->drv_rx_counter,
- status_1->tx_results_counter);
+ status->intr,
+ status->fw_rx_counter,
+ status->drv_rx_counter,
+ status->tx_results_counter);
for (i = 0; i < NUM_TX_QUEUES; i++) {
/* prevent wrap-around in freed-packets counter */
wl->tx_allocated_pkts[i] -=
- (status_2->counters.tx_released_pkts[i] -
+ (status->counters.tx_released_pkts[i] -
wl->tx_pkts_freed[i]) & 0xff;
- wl->tx_pkts_freed[i] = status_2->counters.tx_released_pkts[i];
+ wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
}
- for_each_set_bit(i, wl->links_map, WL12XX_MAX_LINKS) {
+ for_each_set_bit(i, wl->links_map, wl->num_links) {
u8 diff;
lnk = &wl->links[i];
/* prevent wrap-around in freed-packets counter */
- diff = (status_2->counters.tx_lnk_free_pkts[i] -
+ diff = (status->counters.tx_lnk_free_pkts[i] -
lnk->prev_freed_pkts) & 0xff;
if (diff == 0)
continue;
lnk->allocated_pkts -= diff;
- lnk->prev_freed_pkts = status_2->counters.tx_lnk_free_pkts[i];
+ lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i];
/* accumulate the prev_freed_pkts counter */
lnk->total_freed_pkts += diff;
}
/* prevent wrap-around in total blocks counter */
- if (likely(wl->tx_blocks_freed <=
- le32_to_cpu(status_2->total_released_blks)))
- freed_blocks = le32_to_cpu(status_2->total_released_blks) -
+ if (likely(wl->tx_blocks_freed <= status->total_released_blks))
+ freed_blocks = status->total_released_blks -
wl->tx_blocks_freed;
else
freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
- le32_to_cpu(status_2->total_released_blks);
+ status->total_released_blks;
- wl->tx_blocks_freed = le32_to_cpu(status_2->total_released_blks);
+ wl->tx_blocks_freed = status->total_released_blks;
wl->tx_allocated_blocks -= freed_blocks;
@@ -458,7 +454,7 @@ static int wlcore_fw_status(struct wl1271 *wl,
cancel_delayed_work(&wl->tx_watchdog_work);
}
- avail = le32_to_cpu(status_2->tx_total) - wl->tx_allocated_blocks;
+ avail = status->tx_total - wl->tx_allocated_blocks;
/*
* The FW might change the total number of TX memblocks before
@@ -477,15 +473,15 @@ static int wlcore_fw_status(struct wl1271 *wl,
/* for AP update num of allocated TX blocks per link and ps status */
wl12xx_for_each_wlvif_ap(wl, wlvif) {
- wl12xx_irq_update_links_status(wl, wlvif, status_2);
+ wl12xx_irq_update_links_status(wl, wlvif, status);
}
/* update the host-chipset time offset */
getnstimeofday(&ts);
wl->time_offset = (timespec_to_ns(&ts) >> 10) -
- (s64)le32_to_cpu(status_2->fw_localtime);
+ (s64)(status->fw_localtime);
- wl->fw_fast_lnk_map = le32_to_cpu(status_2->link_fast_bitmap);
+ wl->fw_fast_lnk_map = status->link_fast_bitmap;
return 0;
}
@@ -549,13 +545,13 @@ static int wlcore_irq_locked(struct wl1271 *wl)
clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
smp_mb__after_clear_bit();
- ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
+ ret = wlcore_fw_status(wl, wl->fw_status);
if (ret < 0)
goto out;
wlcore_hw_tx_immediate_compl(wl);
- intr = le32_to_cpu(wl->fw_status_1->intr);
+ intr = wl->fw_status->intr;
intr &= WLCORE_ALL_INTR_MASK;
if (!intr) {
done = true;
@@ -584,7 +580,7 @@ static int wlcore_irq_locked(struct wl1271 *wl)
if (likely(intr & WL1271_ACX_INTR_DATA)) {
wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
- ret = wlcore_rx(wl, wl->fw_status_1);
+ ret = wlcore_rx(wl, wl->fw_status);
if (ret < 0)
goto out;
@@ -786,10 +782,11 @@ out:
void wl12xx_queue_recovery_work(struct wl1271 *wl)
{
- WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
-
/* Avoid a recursive recovery */
if (wl->state == WLCORE_STATE_ON) {
+ WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
+ &wl->flags));
+
wl->state = WLCORE_STATE_RESTARTING;
set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
wl1271_ps_elp_wakeup(wl);
@@ -843,11 +840,11 @@ static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
wl12xx_cmd_stop_fwlog(wl);
/* Read the first memory block address */
- ret = wlcore_fw_status(wl, wl->fw_status_1, wl->fw_status_2);
+ ret = wlcore_fw_status(wl, wl->fw_status);
if (ret < 0)
goto out;
- addr = le32_to_cpu(wl->fw_status_2->log_start_addr);
+ addr = wl->fw_status->log_start_addr;
if (!addr)
goto out;
@@ -990,23 +987,23 @@ static int wlcore_fw_wakeup(struct wl1271 *wl)
static int wl1271_setup(struct wl1271 *wl)
{
- wl->fw_status_1 = kzalloc(WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc) +
- sizeof(*wl->fw_status_2) +
- wl->fw_status_priv_len, GFP_KERNEL);
- if (!wl->fw_status_1)
- return -ENOMEM;
+ wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
+ if (!wl->raw_fw_status)
+ goto err;
- wl->fw_status_2 = (struct wl_fw_status_2 *)
- (((u8 *) wl->fw_status_1) +
- WLCORE_FW_STATUS_1_LEN(wl->num_rx_desc));
+ wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
+ if (!wl->fw_status)
+ goto err;
wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
- if (!wl->tx_res_if) {
- kfree(wl->fw_status_1);
- return -ENOMEM;
- }
+ if (!wl->tx_res_if)
+ goto err;
return 0;
+err:
+ kfree(wl->fw_status);
+ kfree(wl->raw_fw_status);
+ return -ENOMEM;
}
static int wl12xx_set_power_on(struct wl1271 *wl)
@@ -1767,6 +1764,12 @@ static int wl1271_op_suspend(struct ieee80211_hw *hw,
flush_work(&wl->tx_work);
flush_delayed_work(&wl->elp_work);
+ /*
+ * Cancel the watchdog even if above tx_flush failed. We will detect
+ * it on resume anyway.
+ */
+ cancel_delayed_work(&wl->tx_watchdog_work);
+
return 0;
}
@@ -1824,6 +1827,13 @@ static int wl1271_op_resume(struct ieee80211_hw *hw)
out:
wl->wow_enabled = false;
+
+ /*
+ * Set a flag to re-init the watchdog on the first Tx after resume.
+ * That way we avoid possible conditions where Tx-complete interrupts
+ * fail to arrive and we perform a spurious recovery.
+ */
+ set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
mutex_unlock(&wl->mutex);
return 0;
@@ -1914,6 +1924,7 @@ static void wlcore_op_stop_locked(struct wl1271 *wl)
memset(wl->links_map, 0, sizeof(wl->links_map));
memset(wl->roc_map, 0, sizeof(wl->roc_map));
memset(wl->session_ids, 0, sizeof(wl->session_ids));
+ memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
wl->active_sta_count = 0;
wl->active_link_count = 0;
@@ -1938,9 +1949,10 @@ static void wlcore_op_stop_locked(struct wl1271 *wl)
wl1271_debugfs_reset(wl);
- kfree(wl->fw_status_1);
- wl->fw_status_1 = NULL;
- wl->fw_status_2 = NULL;
+ kfree(wl->raw_fw_status);
+ wl->raw_fw_status = NULL;
+ kfree(wl->fw_status);
+ wl->fw_status = NULL;
kfree(wl->tx_res_if);
wl->tx_res_if = NULL;
kfree(wl->target_mem_map);
@@ -2571,10 +2583,8 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl,
ieee80211_scan_completed(wl->hw, true);
}
- if (wl->sched_vif == wlvif) {
- ieee80211_sched_scan_stopped(wl->hw);
+ if (wl->sched_vif == wlvif)
wl->sched_vif = NULL;
- }
if (wl->roc_vif == vif) {
wl->roc_vif = NULL;
@@ -2931,6 +2941,11 @@ static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
if (ret < 0)
return ret;
+
+ /* disable beacon filtering */
+ ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
+ if (ret < 0)
+ return ret;
}
if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
@@ -3463,6 +3478,10 @@ static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
key_idx);
+ /* we don't handle unsetting of default key */
+ if (key_idx == -1)
+ return;
+
mutex_lock(&wl->mutex);
if (unlikely(wl->state != WLCORE_STATE_ON)) {
@@ -4298,6 +4317,13 @@ static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
}
}
+ if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) {
+ /* enable beacon filtering */
+ ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
+ if (ret < 0)
+ goto out;
+ }
+
ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
if (ret < 0)
goto out;
@@ -4651,7 +4677,7 @@ static int wl1271_allocate_sta(struct wl1271 *wl,
int ret;
- if (wl->active_sta_count >= AP_MAX_STATIONS) {
+ if (wl->active_sta_count >= wl->max_ap_stations) {
wl1271_warning("could not allocate HLID - too much stations");
return -EBUSY;
}
@@ -4754,7 +4780,7 @@ static int wl12xx_sta_remove(struct wl1271 *wl,
if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
return -EINVAL;
- ret = wl12xx_cmd_remove_peer(wl, wl_sta->hlid);
+ ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid);
if (ret < 0)
return ret;
@@ -5679,28 +5705,6 @@ static void wl1271_unregister_hw(struct wl1271 *wl)
}
-static const struct ieee80211_iface_limit wlcore_iface_limits[] = {
- {
- .max = 3,
- .types = BIT(NL80211_IFTYPE_STATION),
- },
- {
- .max = 1,
- .types = BIT(NL80211_IFTYPE_AP) |
- BIT(NL80211_IFTYPE_P2P_GO) |
- BIT(NL80211_IFTYPE_P2P_CLIENT),
- },
-};
-
-static struct ieee80211_iface_combination
-wlcore_iface_combinations[] = {
- {
- .max_interfaces = 3,
- .limits = wlcore_iface_limits,
- .n_limits = ARRAY_SIZE(wlcore_iface_limits),
- },
-};
-
static int wl1271_init_ieee80211(struct wl1271 *wl)
{
int i;
@@ -5733,7 +5737,8 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
IEEE80211_HW_AP_LINK_PS |
IEEE80211_HW_AMPDU_AGGREGATION |
IEEE80211_HW_TX_AMPDU_SETUP_IN_HW |
- IEEE80211_HW_QUEUE_CONTROL;
+ IEEE80211_HW_QUEUE_CONTROL |
+ IEEE80211_HW_CHANCTX_STA_CSA;
wl->hw->wiphy->cipher_suites = cipher_suites;
wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
@@ -5821,10 +5826,8 @@ static int wl1271_init_ieee80211(struct wl1271 *wl)
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
/* allowed interface combinations */
- wlcore_iface_combinations[0].num_different_channels = wl->num_channels;
- wl->hw->wiphy->iface_combinations = wlcore_iface_combinations;
- wl->hw->wiphy->n_iface_combinations =
- ARRAY_SIZE(wlcore_iface_combinations);
+ wl->hw->wiphy->iface_combinations = wl->iface_combinations;
+ wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
SET_IEEE80211_DEV(wl->hw, wl->dev);
@@ -5844,8 +5847,6 @@ struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
int i, j, ret;
unsigned int order;
- BUILD_BUG_ON(AP_MAX_STATIONS > WL12XX_MAX_LINKS);
-
hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
if (!hw) {
wl1271_error("could not alloc ieee80211_hw");
@@ -5867,8 +5868,12 @@ struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
wl->hw = hw;
+ /*
+ * wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
+ * we don't allocate any additional resource here, so that's fine.
+ */
for (i = 0; i < NUM_TX_QUEUES; i++)
- for (j = 0; j < WL12XX_MAX_LINKS; j++)
+ for (j = 0; j < WLCORE_MAX_LINKS; j++)
skb_queue_head_init(&wl->links[j].tx_queue[i]);
skb_queue_head_init(&wl->deferred_rx_queue);
@@ -6011,7 +6016,8 @@ int wlcore_free_hw(struct wl1271 *wl)
kfree(wl->nvs);
wl->nvs = NULL;
- kfree(wl->fw_status_1);
+ kfree(wl->raw_fw_status);
+ kfree(wl->fw_status);
kfree(wl->tx_res_if);
destroy_workqueue(wl->freezable_wq);
diff --git a/drivers/net/wireless/ti/wlcore/ps.c b/drivers/net/wireless/ti/wlcore/ps.c
index 26bfc36..b52516e 100644
--- a/drivers/net/wireless/ti/wlcore/ps.c
+++ b/drivers/net/wireless/ti/wlcore/ps.c
@@ -280,7 +280,11 @@ void wl12xx_ps_link_start(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct ieee80211_sta *sta;
struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
- if (test_bit(hlid, &wl->ap_ps_map))
+ if (WARN_ON_ONCE(wlvif->bss_type != BSS_TYPE_AP_BSS))
+ return;
+
+ if (!test_bit(hlid, wlvif->ap.sta_hlid_map) ||
+ test_bit(hlid, &wl->ap_ps_map))
return;
wl1271_debug(DEBUG_PSM, "start mac80211 PSM on hlid %d pkts %d "
diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c
index 6791a1a..e125974 100644
--- a/drivers/net/wireless/ti/wlcore/rx.c
+++ b/drivers/net/wireless/ti/wlcore/rx.c
@@ -203,9 +203,9 @@ static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
return is_data;
}
-int wlcore_rx(struct wl1271 *wl, struct wl_fw_status_1 *status)
+int wlcore_rx(struct wl1271 *wl, struct wl_fw_status *status)
{
- unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0};
+ unsigned long active_hlids[BITS_TO_LONGS(WLCORE_MAX_LINKS)] = {0};
u32 buf_size;
u32 fw_rx_counter = status->fw_rx_counter % wl->num_rx_desc;
u32 drv_rx_counter = wl->rx_counter % wl->num_rx_desc;
@@ -263,12 +263,12 @@ int wlcore_rx(struct wl1271 *wl, struct wl_fw_status_1 *status)
wl->aggr_buf + pkt_offset,
pkt_len, rx_align,
&hlid) == 1) {
- if (hlid < WL12XX_MAX_LINKS)
+ if (hlid < wl->num_links)
__set_bit(hlid, active_hlids);
else
WARN(1,
- "hlid exceeded WL12XX_MAX_LINKS "
- "(%d)\n", hlid);
+ "hlid (%d) exceeded MAX_LINKS\n",
+ hlid);
}
wl->rx_counter++;
@@ -302,7 +302,7 @@ int wl1271_rx_filter_enable(struct wl1271 *wl,
{
int ret;
- if (wl->rx_filter_enabled[index] == enable) {
+ if (!!test_bit(index, wl->rx_filter_enabled) == enable) {
wl1271_warning("Request to enable an already "
"enabled rx filter %d", index);
return 0;
@@ -316,7 +316,10 @@ int wl1271_rx_filter_enable(struct wl1271 *wl,
return ret;
}
- wl->rx_filter_enabled[index] = enable;
+ if (enable)
+ __set_bit(index, wl->rx_filter_enabled);
+ else
+ __clear_bit(index, wl->rx_filter_enabled);
return 0;
}
@@ -326,7 +329,7 @@ int wl1271_rx_filter_clear_all(struct wl1271 *wl)
int i, ret = 0;
for (i = 0; i < WL1271_MAX_RX_FILTERS; i++) {
- if (!wl->rx_filter_enabled[i])
+ if (!test_bit(i, wl->rx_filter_enabled))
continue;
ret = wl1271_rx_filter_enable(wl, i, 0, NULL);
if (ret)
diff --git a/drivers/net/wireless/ti/wlcore/rx.h b/drivers/net/wireless/ti/wlcore/rx.h
index 3363f60..a3b1618 100644
--- a/drivers/net/wireless/ti/wlcore/rx.h
+++ b/drivers/net/wireless/ti/wlcore/rx.h
@@ -142,7 +142,7 @@ struct wl1271_rx_descriptor {
u8 reserved;
} __packed;
-int wlcore_rx(struct wl1271 *wl, struct wl_fw_status_1 *status);
+int wlcore_rx(struct wl1271 *wl, struct wl_fw_status *status);
u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
int wl1271_rx_filter_enable(struct wl1271 *wl,
int index, bool enable,
diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c
index 87cd707..40b4311 100644
--- a/drivers/net/wireless/ti/wlcore/tx.c
+++ b/drivers/net/wireless/ti/wlcore/tx.c
@@ -101,7 +101,7 @@ static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl,
* authentication response. this way it won't get de-authed by FW
* when transmitting too soon.
*/
- wl1271_acx_set_inconnection_sta(wl, hdr->addr1);
+ wl1271_acx_set_inconnection_sta(wl, wlvif, hdr->addr1);
/*
* ROC for 1 second on the AP channel for completing the connection.
@@ -134,12 +134,12 @@ static void wl1271_tx_regulate_link(struct wl1271 *wl,
* into high-level PS and clean out its TX queues.
* Make an exception if this is the only connected link. In this
* case FW-memory congestion is less of a problem.
- * Note that a single connected STA means 3 active links, since we must
- * account for the global and broadcast AP links. The "fw_ps" check
- * assures us the third link is a STA connected to the AP. Otherwise
- * the FW would not set the PSM bit.
+ * Note that a single connected STA means 2*ap_count + 1 active links,
+ * since we must account for the global and broadcast AP links
+ * for each AP. The "fw_ps" check assures us the other link is a STA
+ * connected to the AP. Otherwise the FW would not set the PSM bit.
*/
- if (wl->active_link_count > 3 && fw_ps &&
+ if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
wl12xx_ps_link_start(wl, wlvif, hlid, true);
}
@@ -234,8 +234,13 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
wl->tx_blocks_available -= total_blocks;
wl->tx_allocated_blocks += total_blocks;
- /* If the FW was empty before, arm the Tx watchdog */
- if (wl->tx_allocated_blocks == total_blocks)
+ /*
+ * If the FW was empty before, arm the Tx watchdog. Also do
+ * this on the first Tx after resume, as we always cancel the
+ * watchdog on suspend.
+ */
+ if (wl->tx_allocated_blocks == total_blocks ||
+ test_and_clear_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags))
wl12xx_rearm_tx_watchdog_locked(wl);
ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
@@ -357,6 +362,10 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
ieee80211_has_protected(frame_control))
tx_attr |= TX_HW_ATTR_HOST_ENCRYPT;
+ /* send EAPOL frames as voice */
+ if (control->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO)
+ tx_attr |= TX_HW_ATTR_EAPOL_FRAME;
+
desc->tx_attr = cpu_to_le16(tx_attr);
wlcore_hw_set_tx_desc_csum(wl, desc, skb);
@@ -560,11 +569,11 @@ static struct sk_buff *wlcore_vif_dequeue_high_prio(struct wl1271 *wl,
int i, h, start_hlid;
/* start from the link after the last one */
- start_hlid = (wlvif->last_tx_hlid + 1) % WL12XX_MAX_LINKS;
+ start_hlid = (wlvif->last_tx_hlid + 1) % wl->num_links;
/* dequeue according to AC, round robin on each link */
- for (i = 0; i < WL12XX_MAX_LINKS; i++) {
- h = (start_hlid + i) % WL12XX_MAX_LINKS;
+ for (i = 0; i < wl->num_links; i++) {
+ h = (start_hlid + i) % wl->num_links;
/* only consider connected stations */
if (!test_bit(h, wlvif->links_map))
@@ -688,8 +697,8 @@ static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif,
skb_queue_head(&wl->links[hlid].tx_queue[q], skb);
/* make sure we dequeue the same packet next time */
- wlvif->last_tx_hlid = (hlid + WL12XX_MAX_LINKS - 1) %
- WL12XX_MAX_LINKS;
+ wlvif->last_tx_hlid = (hlid + wl->num_links - 1) %
+ wl->num_links;
}
spin_lock_irqsave(&wl->wl_lock, flags);
@@ -722,7 +731,7 @@ void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids)
timeout = wl->conf.rx_streaming.duration;
wl12xx_for_each_wlvif_sta(wl, wlvif) {
bool found = false;
- for_each_set_bit(hlid, active_hlids, WL12XX_MAX_LINKS) {
+ for_each_set_bit(hlid, active_hlids, wl->num_links) {
if (test_bit(hlid, wlvif->links_map)) {
found = true;
break;
@@ -759,7 +768,7 @@ int wlcore_tx_work_locked(struct wl1271 *wl)
struct wl1271_tx_hw_descr *desc;
u32 buf_offset = 0, last_len = 0;
bool sent_packets = false;
- unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0};
+ unsigned long active_hlids[BITS_TO_LONGS(WLCORE_MAX_LINKS)] = {0};
int ret = 0;
int bus_ret = 0;
u8 hlid;
@@ -1061,7 +1070,7 @@ void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif)
int i;
/* TX failure */
- for_each_set_bit(i, wlvif->links_map, WL12XX_MAX_LINKS) {
+ for_each_set_bit(i, wlvif->links_map, wl->num_links) {
if (wlvif->bss_type == BSS_TYPE_AP_BSS &&
i != wlvif->ap.bcast_hlid && i != wlvif->ap.global_hlid) {
/* this calls wl12xx_free_link */
@@ -1085,7 +1094,7 @@ void wl12xx_tx_reset(struct wl1271 *wl)
/* only reset the queues if something bad happened */
if (wl1271_tx_total_queue_count(wl) != 0) {
- for (i = 0; i < WL12XX_MAX_LINKS; i++)
+ for (i = 0; i < wl->num_links; i++)
wl1271_tx_reset_link_queues(wl, i);
for (i = 0; i < NUM_TX_QUEUES; i++)
@@ -1178,7 +1187,7 @@ void wl1271_tx_flush(struct wl1271 *wl)
WL1271_TX_FLUSH_TIMEOUT / 1000);
/* forcibly flush all Tx buffers on our queues */
- for (i = 0; i < WL12XX_MAX_LINKS; i++)
+ for (i = 0; i < wl->num_links; i++)
wl1271_tx_reset_link_queues(wl, i);
out_wake:
diff --git a/drivers/net/wireless/ti/wlcore/tx.h b/drivers/net/wireless/ti/wlcore/tx.h
index 35489c3..79cb3ff 100644
--- a/drivers/net/wireless/ti/wlcore/tx.h
+++ b/drivers/net/wireless/ti/wlcore/tx.h
@@ -37,6 +37,7 @@
#define TX_HW_ATTR_TX_CMPLT_REQ BIT(12)
#define TX_HW_ATTR_TX_DUMMY_REQ BIT(13)
#define TX_HW_ATTR_HOST_ENCRYPT BIT(14)
+#define TX_HW_ATTR_EAPOL_FRAME BIT(15)
#define TX_HW_ATTR_OFST_SAVE_RETRIES 0
#define TX_HW_ATTR_OFST_HEADER_PAD 1
diff --git a/drivers/net/wireless/ti/wlcore/wlcore.h b/drivers/net/wireless/ti/wlcore/wlcore.h
index 06efc12..95a5450 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore.h
@@ -73,6 +73,8 @@ struct wlcore_ops {
void (*tx_immediate_compl)(struct wl1271 *wl);
int (*hw_init)(struct wl1271 *wl);
int (*init_vif)(struct wl1271 *wl, struct wl12xx_vif *wlvif);
+ void (*convert_fw_status)(struct wl1271 *wl, void *raw_fw_status,
+ struct wl_fw_status *fw_status);
u32 (*sta_get_ap_rate_mask)(struct wl1271 *wl,
struct wl12xx_vif *wlvif);
int (*get_pg_ver)(struct wl1271 *wl, s8 *ver);
@@ -220,7 +222,7 @@ struct wl1271 {
int channel;
u8 system_hlid;
- unsigned long links_map[BITS_TO_LONGS(WL12XX_MAX_LINKS)];
+ unsigned long links_map[BITS_TO_LONGS(WLCORE_MAX_LINKS)];
unsigned long roles_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)];
unsigned long roc_map[BITS_TO_LONGS(WL12XX_MAX_ROLES)];
unsigned long rate_policies_map[
@@ -228,7 +230,7 @@ struct wl1271 {
unsigned long klv_templates_map[
BITS_TO_LONGS(WLCORE_MAX_KLV_TEMPLATES)];
- u8 session_ids[WL12XX_MAX_LINKS];
+ u8 session_ids[WLCORE_MAX_LINKS];
struct list_head wlvif_list;
@@ -346,8 +348,8 @@ struct wl1271 {
u32 buffer_cmd;
u32 buffer_busyword[WL1271_BUSY_WORD_CNT];
- struct wl_fw_status_1 *fw_status_1;
- struct wl_fw_status_2 *fw_status_2;
+ void *raw_fw_status;
+ struct wl_fw_status *fw_status;
struct wl1271_tx_hw_res_if *tx_res_if;
/* Current chipset configuration */
@@ -376,7 +378,7 @@ struct wl1271 {
* AP-mode - links indexed by HLID. The global and broadcast links
* are always active.
*/
- struct wl1271_link links[WL12XX_MAX_LINKS];
+ struct wl1271_link links[WLCORE_MAX_LINKS];
/* number of currently active links */
int active_link_count;
@@ -405,6 +407,9 @@ struct wl1271 {
/* AP-mode - number of currently connected stations */
int active_sta_count;
+ /* Flag determining whether AP should broadcast OFDM-only rates */
+ bool ofdm_only_ap;
+
/* last wlvif we transmitted from */
struct wl12xx_vif *last_wlvif;
@@ -434,6 +439,10 @@ struct wl1271 {
u32 num_tx_desc;
/* number of RX descriptors the HW supports. */
u32 num_rx_desc;
+ /* number of links the HW supports */
+ u8 num_links;
+ /* max stations a single AP can support */
+ u8 max_ap_stations;
/* translate HW Tx rates to standard rate-indices */
const u8 **band_rate_to_idx;
@@ -448,10 +457,11 @@ struct wl1271 {
struct ieee80211_sta_ht_cap ht_cap[WLCORE_NUM_BANDS];
/* size of the private FW status data */
+ size_t fw_status_len;
size_t fw_status_priv_len;
/* RX Data filter rule state - enabled/disabled */
- bool rx_filter_enabled[WL1271_MAX_RX_FILTERS];
+ unsigned long rx_filter_enabled[BITS_TO_LONGS(WL1271_MAX_RX_FILTERS)];
/* size of the private static data */
size_t static_data_priv_len;
@@ -476,8 +486,9 @@ struct wl1271 {
struct completion nvs_loading_complete;
- /* number of concurrent channels the HW supports */
- u32 num_channels;
+ /* interface combinations supported by the hw */
+ const struct ieee80211_iface_combination *iface_combinations;
+ u8 n_iface_combinations;
};
int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev);
diff --git a/drivers/net/wireless/ti/wlcore/wlcore_i.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h
index ce7261c..756e890 100644
--- a/drivers/net/wireless/ti/wlcore/wlcore_i.h
+++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h
@@ -58,10 +58,15 @@
#define WL1271_DEFAULT_DTIM_PERIOD 1
#define WL12XX_MAX_ROLES 4
-#define WL12XX_MAX_LINKS 12
#define WL12XX_INVALID_ROLE_ID 0xff
#define WL12XX_INVALID_LINK_ID 0xff
+/*
+ * max number of links allowed by all HWs.
+ * this is NOT the actual max links supported by the current hw.
+ */
+#define WLCORE_MAX_LINKS 16
+
/* the driver supports the 2.4Ghz and 5Ghz bands */
#define WLCORE_NUM_BANDS 2
@@ -118,72 +123,58 @@ struct wl1271_chip {
#define NUM_TX_QUEUES 4
-#define AP_MAX_STATIONS 8
-
-struct wl_fw_packet_counters {
- /* Cumulative counter of released packets per AC */
- u8 tx_released_pkts[NUM_TX_QUEUES];
-
- /* Cumulative counter of freed packets per HLID */
- u8 tx_lnk_free_pkts[WL12XX_MAX_LINKS];
-
- /* Cumulative counter of released Voice memory blocks */
- u8 tx_voice_released_blks;
-
- /* Tx rate of the last transmitted packet */
- u8 tx_last_rate;
-
- u8 padding[2];
-} __packed;
-
-/* FW status registers */
-struct wl_fw_status_1 {
- __le32 intr;
+struct wl_fw_status {
+ u32 intr;
u8 fw_rx_counter;
u8 drv_rx_counter;
- u8 reserved;
u8 tx_results_counter;
- __le32 rx_pkt_descs[0];
-} __packed;
-
-/*
- * Each HW arch has a different number of Rx descriptors.
- * The length of the status depends on it, since it holds an array
- * of descriptors.
- */
-#define WLCORE_FW_STATUS_1_LEN(num_rx_desc) \
- (sizeof(struct wl_fw_status_1) + \
- (sizeof(((struct wl_fw_status_1 *)0)->rx_pkt_descs[0])) * \
- num_rx_desc)
+ __le32 *rx_pkt_descs;
-struct wl_fw_status_2 {
- __le32 fw_localtime;
+ u32 fw_localtime;
/*
* A bitmap (where each bit represents a single HLID)
* to indicate if the station is in PS mode.
*/
- __le32 link_ps_bitmap;
+ u32 link_ps_bitmap;
/*
* A bitmap (where each bit represents a single HLID) to indicate
* if the station is in Fast mode
*/
- __le32 link_fast_bitmap;
+ u32 link_fast_bitmap;
/* Cumulative counter of total released mem blocks since FW-reset */
- __le32 total_released_blks;
+ u32 total_released_blks;
/* Size (in Memory Blocks) of TX pool */
- __le32 tx_total;
+ u32 tx_total;
+
+ struct {
+ /*
+ * Cumulative counter of released packets per AC
+ * (length of the array is NUM_TX_QUEUES)
+ */
+ u8 *tx_released_pkts;
- struct wl_fw_packet_counters counters;
+ /*
+ * Cumulative counter of freed packets per HLID
+ * (length of the array is wl->num_links)
+ */
+ u8 *tx_lnk_free_pkts;
+
+ /* Cumulative counter of released Voice memory blocks */
+ u8 tx_voice_released_blks;
- __le32 log_start_addr;
+ /* Tx rate of the last transmitted packet */
+ u8 tx_last_rate;
+ } counters;
+
+ u32 log_start_addr;
/* Private status to be used by the lower drivers */
- u8 priv[0];
-} __packed;
+ void *priv;
+};
#define WL1271_MAX_CHANNELS 64
struct wl1271_scan {
@@ -240,6 +231,7 @@ enum wl12xx_flags {
WL1271_FLAG_VIF_CHANGE_IN_PROGRESS,
WL1271_FLAG_INTENDED_FW_RECOVERY,
WL1271_FLAG_IO_FAILED,
+ WL1271_FLAG_REINIT_TX_WDOG,
};
enum wl12xx_vif_flags {
@@ -368,7 +360,7 @@ struct wl12xx_vif {
/* HLIDs bitmap of associated stations */
unsigned long sta_hlid_map[BITS_TO_LONGS(
- WL12XX_MAX_LINKS)];
+ WLCORE_MAX_LINKS)];
/* recoreded keys - set here before AP startup */
struct wl1271_ap_key *recorded_keys[MAX_NUM_KEYS];
@@ -385,7 +377,7 @@ struct wl12xx_vif {
/* counters of packets per AC, across all links in the vif */
int tx_queue_count[NUM_TX_QUEUES];
- unsigned long links_map[BITS_TO_LONGS(WL12XX_MAX_LINKS)];
+ unsigned long links_map[BITS_TO_LONGS(WLCORE_MAX_LINKS)];
u8 ssid[IEEE80211_MAX_SSID_LEN + 1];
u8 ssid_len;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index e30d800..a38f03d 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1282,16 +1282,10 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
np->rx_refill_timer.function = rx_refill_timeout;
err = -ENOMEM;
- np->stats = alloc_percpu(struct netfront_stats);
+ np->stats = netdev_alloc_pcpu_stats(struct netfront_stats);
if (np->stats == NULL)
goto exit;
- for_each_possible_cpu(i) {
- struct netfront_stats *xen_nf_stats;
- xen_nf_stats = per_cpu_ptr(np->stats, i);
- u64_stats_init(&xen_nf_stats->syncp);
- }
-
/* Initialise tx_skbs as a free chain containing every entry. */
np->tx_skb_freelist = 0;
for (i = 0; i < NET_TX_RING_SIZE; i++) {
diff --git a/drivers/of/of_net.c b/drivers/of/of_net.c
index a208a45..84215c1 100644
--- a/drivers/of/of_net.c
+++ b/drivers/of/of_net.c
@@ -12,28 +12,6 @@
#include <linux/export.h>
/**
- * It maps 'enum phy_interface_t' found in include/linux/phy.h
- * into the device tree binding of 'phy-mode', so that Ethernet
- * device driver can get phy interface from device tree.
- */
-static const char *phy_modes[] = {
- [PHY_INTERFACE_MODE_NA] = "",
- [PHY_INTERFACE_MODE_MII] = "mii",
- [PHY_INTERFACE_MODE_GMII] = "gmii",
- [PHY_INTERFACE_MODE_SGMII] = "sgmii",
- [PHY_INTERFACE_MODE_TBI] = "tbi",
- [PHY_INTERFACE_MODE_REVMII] = "rev-mii",
- [PHY_INTERFACE_MODE_RMII] = "rmii",
- [PHY_INTERFACE_MODE_RGMII] = "rgmii",
- [PHY_INTERFACE_MODE_RGMII_ID] = "rgmii-id",
- [PHY_INTERFACE_MODE_RGMII_RXID] = "rgmii-rxid",
- [PHY_INTERFACE_MODE_RGMII_TXID] = "rgmii-txid",
- [PHY_INTERFACE_MODE_RTBI] = "rtbi",
- [PHY_INTERFACE_MODE_SMII] = "smii",
- [PHY_INTERFACE_MODE_XGMII] = "xgmii",
-};
-
-/**
* of_get_phy_mode - Get phy mode for given device_node
* @np: Pointer to the given device_node
*
@@ -49,8 +27,8 @@ int of_get_phy_mode(struct device_node *np)
if (err < 0)
return err;
- for (i = 0; i < ARRAY_SIZE(phy_modes); i++)
- if (!strcasecmp(pm, phy_modes[i]))
+ for (i = 0; i < PHY_INTERFACE_MODE_MAX; i++)
+ if (!strcasecmp(pm, phy_modes(i)))
return i;
return -ENODEV;
diff --git a/drivers/staging/rtl8821ae/rc.c b/drivers/staging/rtl8821ae/rc.c
index d387f13..0cc32c6 100644
--- a/drivers/staging/rtl8821ae/rc.c
+++ b/drivers/staging/rtl8821ae/rc.c
@@ -286,7 +286,6 @@ static void rtl_rate_free_sta(void *rtlpriv,
}
static struct rate_control_ops rtl_rate_ops = {
- .module = NULL,
.name = "rtl_rc",
.alloc = rtl_rate_alloc,
.free = rtl_rate_free,
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index 677b4f0..6f76277 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -13,10 +13,17 @@
#define PHY_ID_BCM5461 0x002060c0
#define PHY_ID_BCM57780 0x03625d90
+#define PHY_ID_BCM7366 0x600d8490
+#define PHY_ID_BCM7439 0x600d8480
+#define PHY_ID_BCM7445 0x600d8510
+#define PHY_ID_BCM7XXX_28 0x600d8400
+
#define PHY_BCM_OUI_MASK 0xfffffc00
#define PHY_BCM_OUI_1 0x00206000
#define PHY_BCM_OUI_2 0x0143bc00
#define PHY_BCM_OUI_3 0x03625c00
+#define PHY_BCM_OUI_4 0x600d0000
+#define PHY_BCM_OUI_5 0x03625e00
#define PHY_BCM_FLAGS_MODE_COPPER 0x00000001
@@ -31,6 +38,59 @@
#define PHY_BRCM_EXT_IBND_TX_ENABLE 0x00002000
#define PHY_BRCM_CLEAR_RGMII_MODE 0x00004000
#define PHY_BRCM_DIS_TXCRXC_NOENRGY 0x00008000
+/* Broadcom BCM7xxx specific workarounds */
+#define PHY_BRCM_100MBPS_WAR 0x00010000
#define PHY_BCM_FLAGS_VALID 0x80000000
+/* Broadcom BCM54XX register definitions, common to most Broadcom PHYs */
+#define MII_BCM54XX_ECR 0x10 /* BCM54xx extended control register */
+#define MII_BCM54XX_ECR_IM 0x1000 /* Interrupt mask */
+#define MII_BCM54XX_ECR_IF 0x0800 /* Interrupt force */
+
+#define MII_BCM54XX_ESR 0x11 /* BCM54xx extended status register */
+#define MII_BCM54XX_ESR_IS 0x1000 /* Interrupt status */
+
+#define MII_BCM54XX_EXP_DATA 0x15 /* Expansion register data */
+#define MII_BCM54XX_EXP_SEL 0x17 /* Expansion register select */
+#define MII_BCM54XX_EXP_SEL_SSD 0x0e00 /* Secondary SerDes select */
+#define MII_BCM54XX_EXP_SEL_ER 0x0f00 /* Expansion register select */
+
+#define MII_BCM54XX_AUX_CTL 0x18 /* Auxiliary control register */
+#define MII_BCM54XX_ISR 0x1a /* BCM54xx interrupt status register */
+#define MII_BCM54XX_IMR 0x1b /* BCM54xx interrupt mask register */
+#define MII_BCM54XX_INT_CRCERR 0x0001 /* CRC error */
+#define MII_BCM54XX_INT_LINK 0x0002 /* Link status changed */
+#define MII_BCM54XX_INT_SPEED 0x0004 /* Link speed change */
+#define MII_BCM54XX_INT_DUPLEX 0x0008 /* Duplex mode changed */
+#define MII_BCM54XX_INT_LRS 0x0010 /* Local receiver status changed */
+#define MII_BCM54XX_INT_RRS 0x0020 /* Remote receiver status changed */
+#define MII_BCM54XX_INT_SSERR 0x0040 /* Scrambler synchronization error */
+#define MII_BCM54XX_INT_UHCD 0x0080 /* Unsupported HCD negotiated */
+#define MII_BCM54XX_INT_NHCD 0x0100 /* No HCD */
+#define MII_BCM54XX_INT_NHCDL 0x0200 /* No HCD link */
+#define MII_BCM54XX_INT_ANPR 0x0400 /* Auto-negotiation page received */
+#define MII_BCM54XX_INT_LC 0x0800 /* All counters below 128 */
+#define MII_BCM54XX_INT_HC 0x1000 /* Counter above 32768 */
+#define MII_BCM54XX_INT_MDIX 0x2000 /* MDIX status change */
+#define MII_BCM54XX_INT_PSERR 0x4000 /* Pair swap error */
+
+#define MII_BCM54XX_SHD 0x1c /* 0x1c shadow registers */
+#define MII_BCM54XX_SHD_WRITE 0x8000
+#define MII_BCM54XX_SHD_VAL(x) ((x & 0x1f) << 10)
+#define MII_BCM54XX_SHD_DATA(x) ((x & 0x3ff) << 0)
+
+/*
+ * AUXILIARY CONTROL SHADOW ACCESS REGISTERS. (PHY REG 0x18)
+ */
+#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x0000
+#define MII_BCM54XX_AUXCTL_ACTL_TX_6DB 0x0400
+#define MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA 0x0800
+
+#define MII_BCM54XX_AUXCTL_MISC_WREN 0x8000
+#define MII_BCM54XX_AUXCTL_MISC_FORCE_AMDIX 0x0200
+#define MII_BCM54XX_AUXCTL_MISC_RDSEL_MISC 0x7000
+#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC 0x0007
+
+#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x0000
+
#endif /* _LINUX_BRCMPHY_H */
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index fb0ab65..dc5f902 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -124,6 +124,8 @@ unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx);
void can_free_echo_skb(struct net_device *dev, unsigned int idx);
struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf);
+struct sk_buff *alloc_canfd_skb(struct net_device *dev,
+ struct canfd_frame **cfd);
struct sk_buff *alloc_can_err_skb(struct net_device *dev,
struct can_frame **cf);
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index c8e3e7e3..0a114d0 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -183,6 +183,9 @@ static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings)
* hold the RTNL lock.
*
* See the structures used by these operations for further documentation.
+ * Note that for all operations using a structure ending with a zero-
+ * length array, the array is allocated separately in the kernel and
+ * is passed to the driver as an additional parameter.
*
* See &struct net_device and &struct net_device_ops for documentation
* of the generic netdev features interface.
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index e526a8c..5f34935 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -597,6 +597,20 @@ static inline int ieee80211_is_qos_nullfunc(__le16 fc)
}
/**
+ * ieee80211_is_bufferable_mmpdu - check if frame is bufferable MMPDU
+ * @fc: frame control field in little-endian byteorder
+ */
+static inline bool ieee80211_is_bufferable_mmpdu(__le16 fc)
+{
+ /* IEEE 802.11-2012, definition of "bufferable management frame";
+ * note that this ignores the IBSS special case. */
+ return ieee80211_is_mgmt(fc) &&
+ (ieee80211_is_action(fc) ||
+ ieee80211_is_disassoc(fc) ||
+ ieee80211_is_deauth(fc));
+}
+
+/**
* ieee80211_is_first_frag - check if IEEE80211_SCTL_FRAG is not set
* @seq_ctrl: frame sequence control bytes in little-endian byteorder
*/
@@ -1636,51 +1650,22 @@ enum ieee80211_reasoncode {
enum ieee80211_eid {
WLAN_EID_SSID = 0,
WLAN_EID_SUPP_RATES = 1,
- WLAN_EID_FH_PARAMS = 2,
+ WLAN_EID_FH_PARAMS = 2, /* reserved now */
WLAN_EID_DS_PARAMS = 3,
WLAN_EID_CF_PARAMS = 4,
WLAN_EID_TIM = 5,
WLAN_EID_IBSS_PARAMS = 6,
- WLAN_EID_CHALLENGE = 16,
-
WLAN_EID_COUNTRY = 7,
WLAN_EID_HP_PARAMS = 8,
WLAN_EID_HP_TABLE = 9,
WLAN_EID_REQUEST = 10,
-
WLAN_EID_QBSS_LOAD = 11,
WLAN_EID_EDCA_PARAM_SET = 12,
WLAN_EID_TSPEC = 13,
WLAN_EID_TCLAS = 14,
WLAN_EID_SCHEDULE = 15,
- WLAN_EID_TS_DELAY = 43,
- WLAN_EID_TCLAS_PROCESSING = 44,
- WLAN_EID_QOS_CAPA = 46,
- /* 802.11z */
- WLAN_EID_LINK_ID = 101,
- /* 802.11s */
- WLAN_EID_MESH_CONFIG = 113,
- WLAN_EID_MESH_ID = 114,
- WLAN_EID_LINK_METRIC_REPORT = 115,
- WLAN_EID_CONGESTION_NOTIFICATION = 116,
- WLAN_EID_PEER_MGMT = 117,
- WLAN_EID_CHAN_SWITCH_PARAM = 118,
- WLAN_EID_MESH_AWAKE_WINDOW = 119,
- WLAN_EID_BEACON_TIMING = 120,
- WLAN_EID_MCCAOP_SETUP_REQ = 121,
- WLAN_EID_MCCAOP_SETUP_RESP = 122,
- WLAN_EID_MCCAOP_ADVERT = 123,
- WLAN_EID_MCCAOP_TEARDOWN = 124,
- WLAN_EID_GANN = 125,
- WLAN_EID_RANN = 126,
- WLAN_EID_PREQ = 130,
- WLAN_EID_PREP = 131,
- WLAN_EID_PERR = 132,
- WLAN_EID_PXU = 137,
- WLAN_EID_PXUC = 138,
- WLAN_EID_AUTH_MESH_PEER_EXCH = 139,
- WLAN_EID_MIC = 140,
-
+ WLAN_EID_CHALLENGE = 16,
+ /* 17-31 reserved for challenge text extension */
WLAN_EID_PWR_CONSTRAINT = 32,
WLAN_EID_PWR_CAPABILITY = 33,
WLAN_EID_TPC_REQUEST = 34,
@@ -1691,66 +1676,114 @@ enum ieee80211_eid {
WLAN_EID_MEASURE_REPORT = 39,
WLAN_EID_QUIET = 40,
WLAN_EID_IBSS_DFS = 41,
-
WLAN_EID_ERP_INFO = 42,
- WLAN_EID_EXT_SUPP_RATES = 50,
-
+ WLAN_EID_TS_DELAY = 43,
+ WLAN_EID_TCLAS_PROCESSING = 44,
WLAN_EID_HT_CAPABILITY = 45,
- WLAN_EID_HT_OPERATION = 61,
- WLAN_EID_SECONDARY_CHANNEL_OFFSET = 62,
-
+ WLAN_EID_QOS_CAPA = 46,
+ /* 47 reserved for Broadcom */
WLAN_EID_RSN = 48,
- WLAN_EID_MMIE = 76,
- WLAN_EID_VENDOR_SPECIFIC = 221,
- WLAN_EID_QOS_PARAMETER = 222,
-
+ WLAN_EID_802_15_COEX = 49,
+ WLAN_EID_EXT_SUPP_RATES = 50,
WLAN_EID_AP_CHAN_REPORT = 51,
WLAN_EID_NEIGHBOR_REPORT = 52,
WLAN_EID_RCPI = 53,
+ WLAN_EID_MOBILITY_DOMAIN = 54,
+ WLAN_EID_FAST_BSS_TRANSITION = 55,
+ WLAN_EID_TIMEOUT_INTERVAL = 56,
+ WLAN_EID_RIC_DATA = 57,
+ WLAN_EID_DSE_REGISTERED_LOCATION = 58,
+ WLAN_EID_SUPPORTED_REGULATORY_CLASSES = 59,
+ WLAN_EID_EXT_CHANSWITCH_ANN = 60,
+ WLAN_EID_HT_OPERATION = 61,
+ WLAN_EID_SECONDARY_CHANNEL_OFFSET = 62,
WLAN_EID_BSS_AVG_ACCESS_DELAY = 63,
WLAN_EID_ANTENNA_INFO = 64,
WLAN_EID_RSNI = 65,
WLAN_EID_MEASUREMENT_PILOT_TX_INFO = 66,
WLAN_EID_BSS_AVAILABLE_CAPACITY = 67,
WLAN_EID_BSS_AC_ACCESS_DELAY = 68,
+ WLAN_EID_TIME_ADVERTISEMENT = 69,
WLAN_EID_RRM_ENABLED_CAPABILITIES = 70,
WLAN_EID_MULTIPLE_BSSID = 71,
WLAN_EID_BSS_COEX_2040 = 72,
WLAN_EID_OVERLAP_BSS_SCAN_PARAM = 74,
- WLAN_EID_EXT_CAPABILITY = 127,
-
- WLAN_EID_MOBILITY_DOMAIN = 54,
- WLAN_EID_FAST_BSS_TRANSITION = 55,
- WLAN_EID_TIMEOUT_INTERVAL = 56,
- WLAN_EID_RIC_DATA = 57,
WLAN_EID_RIC_DESCRIPTOR = 75,
-
- WLAN_EID_DSE_REGISTERED_LOCATION = 58,
- WLAN_EID_SUPPORTED_REGULATORY_CLASSES = 59,
- WLAN_EID_EXT_CHANSWITCH_ANN = 60,
-
- WLAN_EID_VHT_CAPABILITY = 191,
- WLAN_EID_VHT_OPERATION = 192,
- WLAN_EID_OPMODE_NOTIF = 199,
- WLAN_EID_WIDE_BW_CHANNEL_SWITCH = 194,
- WLAN_EID_CHANNEL_SWITCH_WRAPPER = 196,
- WLAN_EID_EXTENDED_BSS_LOAD = 193,
- WLAN_EID_VHT_TX_POWER_ENVELOPE = 195,
- WLAN_EID_AID = 197,
- WLAN_EID_QUIET_CHANNEL = 198,
-
- /* 802.11ad */
+ WLAN_EID_MMIE = 76,
+ WLAN_EID_ASSOC_COMEBACK_TIME = 77,
+ WLAN_EID_EVENT_REQUEST = 78,
+ WLAN_EID_EVENT_REPORT = 79,
+ WLAN_EID_DIAGNOSTIC_REQUEST = 80,
+ WLAN_EID_DIAGNOSTIC_REPORT = 81,
+ WLAN_EID_LOCATION_PARAMS = 82,
WLAN_EID_NON_TX_BSSID_CAP = 83,
+ WLAN_EID_SSID_LIST = 84,
+ WLAN_EID_MULTI_BSSID_IDX = 85,
+ WLAN_EID_FMS_DESCRIPTOR = 86,
+ WLAN_EID_FMS_REQUEST = 87,
+ WLAN_EID_FMS_RESPONSE = 88,
+ WLAN_EID_QOS_TRAFFIC_CAPA = 89,
+ WLAN_EID_BSS_MAX_IDLE_PERIOD = 90,
+ WLAN_EID_TSF_REQUEST = 91,
+ WLAN_EID_TSF_RESPOSNE = 92,
+ WLAN_EID_WNM_SLEEP_MODE = 93,
+ WLAN_EID_TIM_BCAST_REQ = 94,
+ WLAN_EID_TIM_BCAST_RESP = 95,
+ WLAN_EID_COLL_IF_REPORT = 96,
+ WLAN_EID_CHANNEL_USAGE = 97,
+ WLAN_EID_TIME_ZONE = 98,
+ WLAN_EID_DMS_REQUEST = 99,
+ WLAN_EID_DMS_RESPONSE = 100,
+ WLAN_EID_LINK_ID = 101,
+ WLAN_EID_WAKEUP_SCHEDUL = 102,
+ /* 103 reserved */
+ WLAN_EID_CHAN_SWITCH_TIMING = 104,
+ WLAN_EID_PTI_CONTROL = 105,
+ WLAN_EID_PU_BUFFER_STATUS = 106,
+ WLAN_EID_INTERWORKING = 107,
+ WLAN_EID_ADVERTISEMENT_PROTOCOL = 108,
+ WLAN_EID_EXPEDITED_BW_REQ = 109,
+ WLAN_EID_QOS_MAP_SET = 110,
+ WLAN_EID_ROAMING_CONSORTIUM = 111,
+ WLAN_EID_EMERGENCY_ALERT = 112,
+ WLAN_EID_MESH_CONFIG = 113,
+ WLAN_EID_MESH_ID = 114,
+ WLAN_EID_LINK_METRIC_REPORT = 115,
+ WLAN_EID_CONGESTION_NOTIFICATION = 116,
+ WLAN_EID_PEER_MGMT = 117,
+ WLAN_EID_CHAN_SWITCH_PARAM = 118,
+ WLAN_EID_MESH_AWAKE_WINDOW = 119,
+ WLAN_EID_BEACON_TIMING = 120,
+ WLAN_EID_MCCAOP_SETUP_REQ = 121,
+ WLAN_EID_MCCAOP_SETUP_RESP = 122,
+ WLAN_EID_MCCAOP_ADVERT = 123,
+ WLAN_EID_MCCAOP_TEARDOWN = 124,
+ WLAN_EID_GANN = 125,
+ WLAN_EID_RANN = 126,
+ WLAN_EID_EXT_CAPABILITY = 127,
+ /* 128, 129 reserved for Agere */
+ WLAN_EID_PREQ = 130,
+ WLAN_EID_PREP = 131,
+ WLAN_EID_PERR = 132,
+ /* 133-136 reserved for Cisco */
+ WLAN_EID_PXU = 137,
+ WLAN_EID_PXUC = 138,
+ WLAN_EID_AUTH_MESH_PEER_EXCH = 139,
+ WLAN_EID_MIC = 140,
+ WLAN_EID_DESTINATION_URI = 141,
+ WLAN_EID_UAPSD_COEX = 142,
WLAN_EID_WAKEUP_SCHEDULE = 143,
WLAN_EID_EXT_SCHEDULE = 144,
WLAN_EID_STA_AVAILABILITY = 145,
WLAN_EID_DMG_TSPEC = 146,
WLAN_EID_DMG_AT = 147,
WLAN_EID_DMG_CAP = 148,
+ /* 149-150 reserved for Cisco */
WLAN_EID_DMG_OPERATION = 151,
WLAN_EID_DMG_BSS_PARAM_CHANGE = 152,
WLAN_EID_DMG_BEAM_REFINEMENT = 153,
WLAN_EID_CHANNEL_MEASURE_FEEDBACK = 154,
+ /* 155-156 reserved for Cisco */
WLAN_EID_AWAKE_WINDOW = 157,
WLAN_EID_MULTI_BAND = 158,
WLAN_EID_ADDBA_EXT = 159,
@@ -1767,11 +1800,34 @@ enum ieee80211_eid {
WLAN_EID_MULTIPLE_MAC_ADDR = 170,
WLAN_EID_U_PID = 171,
WLAN_EID_DMG_LINK_ADAPT_ACK = 172,
+ /* 173 reserved for Symbol */
+ WLAN_EID_MCCAOP_ADV_OVERVIEW = 174,
WLAN_EID_QUIET_PERIOD_REQ = 175,
+ /* 176 reserved for Symbol */
WLAN_EID_QUIET_PERIOD_RESP = 177,
+ /* 178-179 reserved for Symbol */
+ /* 180 reserved for ISO/IEC 20011 */
WLAN_EID_EPAC_POLICY = 182,
WLAN_EID_CLISTER_TIME_OFF = 183,
+ WLAN_EID_INTER_AC_PRIO = 184,
+ WLAN_EID_SCS_DESCRIPTOR = 185,
+ WLAN_EID_QLOAD_REPORT = 186,
+ WLAN_EID_HCCA_TXOP_UPDATE_COUNT = 187,
+ WLAN_EID_HL_STREAM_ID = 188,
+ WLAN_EID_GCR_GROUP_ADDR = 189,
WLAN_EID_ANTENNA_SECTOR_ID_PATTERN = 190,
+ WLAN_EID_VHT_CAPABILITY = 191,
+ WLAN_EID_VHT_OPERATION = 192,
+ WLAN_EID_EXTENDED_BSS_LOAD = 193,
+ WLAN_EID_WIDE_BW_CHANNEL_SWITCH = 194,
+ WLAN_EID_VHT_TX_POWER_ENVELOPE = 195,
+ WLAN_EID_CHANNEL_SWITCH_WRAPPER = 196,
+ WLAN_EID_AID = 197,
+ WLAN_EID_QUIET_CHANNEL = 198,
+ WLAN_EID_OPMODE_NOTIF = 199,
+
+ WLAN_EID_VENDOR_SPECIFIC = 221,
+ WLAN_EID_QOS_PARAMETER = 222,
};
/* Action category code */
@@ -2192,10 +2248,10 @@ static inline u8 *ieee80211_get_DA(struct ieee80211_hdr *hdr)
}
/**
- * ieee80211_is_robust_mgmt_frame - check if frame is a robust management frame
+ * _ieee80211_is_robust_mgmt_frame - check if frame is a robust management frame
* @hdr: the frame (buffer must include at least the first octet of payload)
*/
-static inline bool ieee80211_is_robust_mgmt_frame(struct ieee80211_hdr *hdr)
+static inline bool _ieee80211_is_robust_mgmt_frame(struct ieee80211_hdr *hdr)
{
if (ieee80211_is_disassoc(hdr->frame_control) ||
ieee80211_is_deauth(hdr->frame_control))
@@ -2224,6 +2280,17 @@ static inline bool ieee80211_is_robust_mgmt_frame(struct ieee80211_hdr *hdr)
}
/**
+ * ieee80211_is_robust_mgmt_frame - check if skb contains a robust mgmt frame
+ * @skb: the skb containing the frame, length will be checked
+ */
+static inline bool ieee80211_is_robust_mgmt_frame(struct sk_buff *skb)
+{
+ if (skb->len < 25)
+ return false;
+ return _ieee80211_is_robust_mgmt_frame((void *)skb->data);
+}
+
+/**
* ieee80211_is_public_action - check if frame is a public action frame
* @hdr: the frame
* @len: length of the frame
diff --git a/include/linux/mlx4/driver.h b/include/linux/mlx4/driver.h
index c257e1b..022055c 100644
--- a/include/linux/mlx4/driver.h
+++ b/include/linux/mlx4/driver.h
@@ -64,4 +64,16 @@ void mlx4_unregister_interface(struct mlx4_interface *intf);
void *mlx4_get_protocol_dev(struct mlx4_dev *dev, enum mlx4_protocol proto, int port);
+static inline u64 mlx4_mac_to_u64(u8 *addr)
+{
+ u64 mac = 0;
+ int i;
+
+ for (i = 0; i < ETH_ALEN; i++) {
+ mac <<= 8;
+ mac |= addr[i];
+ }
+ return mac;
+}
+
#endif /* MLX4_DRIVER_H */
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index 59f8ba8..b66e761 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -270,9 +270,14 @@ enum {
struct mlx4_wqe_ctrl_seg {
__be32 owner_opcode;
- __be16 vlan_tag;
- u8 ins_vlan;
- u8 fence_size;
+ union {
+ struct {
+ __be16 vlan_tag;
+ u8 ins_vlan;
+ u8 fence_size;
+ };
+ __be32 bf_qpn;
+ };
/*
* High 24 bits are SRC remote buffer; low 8 bits are flags:
* [7] SO (strong ordering)
diff --git a/include/linux/mpls.h b/include/linux/mpls.h
new file mode 100644
index 0000000..9999145
--- /dev/null
+++ b/include/linux/mpls.h
@@ -0,0 +1,6 @@
+#ifndef _LINUX_MPLS_H
+#define _LINUX_MPLS_H
+
+#include <uapi/linux/mpls.h>
+
+#endif /* _LINUX_MPLS_H */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index e8eeebd..1a86948 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1147,6 +1147,89 @@ struct net_device_ops {
void *priv);
};
+/**
+ * enum net_device_priv_flags - &struct net_device priv_flags
+ *
+ * These are the &struct net_device, they are only set internally
+ * by drivers and used in the kernel. These flags are invisible to
+ * userspace, this means that the order of these flags can change
+ * during any kernel release.
+ *
+ * You should have a pretty good reason to be extending these flags.
+ *
+ * @IFF_802_1Q_VLAN: 802.1Q VLAN device
+ * @IFF_EBRIDGE: Ethernet bridging device
+ * @IFF_SLAVE_INACTIVE: bonding slave not the curr. active
+ * @IFF_MASTER_8023AD: bonding master, 802.3ad
+ * @IFF_MASTER_ALB: bonding master, balance-alb
+ * @IFF_BONDING: bonding master or slave
+ * @IFF_SLAVE_NEEDARP: need ARPs for validation
+ * @IFF_ISATAP: ISATAP interface (RFC4214)
+ * @IFF_MASTER_ARPMON: bonding master, ARP mon in use
+ * @IFF_WAN_HDLC: WAN HDLC device
+ * @IFF_XMIT_DST_RELEASE: dev_hard_start_xmit() is allowed to
+ * release skb->dst
+ * @IFF_DONT_BRIDGE: disallow bridging this ether dev
+ * @IFF_DISABLE_NETPOLL: disable netpoll at run-time
+ * @IFF_MACVLAN_PORT: device used as macvlan port
+ * @IFF_BRIDGE_PORT: device used as bridge port
+ * @IFF_OVS_DATAPATH: device used as Open vSwitch datapath port
+ * @IFF_TX_SKB_SHARING: The interface supports sharing skbs on transmit
+ * @IFF_UNICAST_FLT: Supports unicast filtering
+ * @IFF_TEAM_PORT: device used as team port
+ * @IFF_SUPP_NOFCS: device supports sending custom FCS
+ * @IFF_LIVE_ADDR_CHANGE: device supports hardware address
+ * change when it's running
+ * @IFF_MACVLAN: Macvlan device
+ */
+enum netdev_priv_flags {
+ IFF_802_1Q_VLAN = 1<<0,
+ IFF_EBRIDGE = 1<<1,
+ IFF_SLAVE_INACTIVE = 1<<2,
+ IFF_MASTER_8023AD = 1<<3,
+ IFF_MASTER_ALB = 1<<4,
+ IFF_BONDING = 1<<5,
+ IFF_SLAVE_NEEDARP = 1<<6,
+ IFF_ISATAP = 1<<7,
+ IFF_MASTER_ARPMON = 1<<8,
+ IFF_WAN_HDLC = 1<<9,
+ IFF_XMIT_DST_RELEASE = 1<<10,
+ IFF_DONT_BRIDGE = 1<<11,
+ IFF_DISABLE_NETPOLL = 1<<12,
+ IFF_MACVLAN_PORT = 1<<13,
+ IFF_BRIDGE_PORT = 1<<14,
+ IFF_OVS_DATAPATH = 1<<15,
+ IFF_TX_SKB_SHARING = 1<<16,
+ IFF_UNICAST_FLT = 1<<17,
+ IFF_TEAM_PORT = 1<<18,
+ IFF_SUPP_NOFCS = 1<<19,
+ IFF_LIVE_ADDR_CHANGE = 1<<20,
+ IFF_MACVLAN = 1<<21,
+};
+
+#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN
+#define IFF_EBRIDGE IFF_EBRIDGE
+#define IFF_SLAVE_INACTIVE IFF_SLAVE_INACTIVE
+#define IFF_MASTER_8023AD IFF_MASTER_8023AD
+#define IFF_MASTER_ALB IFF_MASTER_ALB
+#define IFF_BONDING IFF_BONDING
+#define IFF_SLAVE_NEEDARP IFF_SLAVE_NEEDARP
+#define IFF_ISATAP IFF_ISATAP
+#define IFF_MASTER_ARPMON IFF_MASTER_ARPMON
+#define IFF_WAN_HDLC IFF_WAN_HDLC
+#define IFF_XMIT_DST_RELEASE IFF_XMIT_DST_RELEASE
+#define IFF_DONT_BRIDGE IFF_DONT_BRIDGE
+#define IFF_DISABLE_NETPOLL IFF_DISABLE_NETPOLL
+#define IFF_MACVLAN_PORT IFF_MACVLAN_PORT
+#define IFF_BRIDGE_PORT IFF_BRIDGE_PORT
+#define IFF_OVS_DATAPATH IFF_OVS_DATAPATH
+#define IFF_TX_SKB_SHARING IFF_TX_SKB_SHARING
+#define IFF_UNICAST_FLT IFF_UNICAST_FLT
+#define IFF_TEAM_PORT IFF_TEAM_PORT
+#define IFF_SUPP_NOFCS IFF_SUPP_NOFCS
+#define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE
+#define IFF_MACVLAN IFF_MACVLAN
+
/*
* The DEVICE structure.
* Actually, this whole structure is a big mistake. It mixes I/O
@@ -1279,6 +1362,10 @@ struct net_device {
* that share the same link
* layer address
*/
+ unsigned short dev_port; /* Used to differentiate
+ * devices that share the same
+ * function
+ */
spinlock_t addr_list_lock;
struct netdev_hw_addr_list uc; /* Unicast mac addresses */
struct netdev_hw_addr_list mc; /* Multicast mac addresses */
@@ -1316,13 +1403,7 @@ struct net_device {
/*
* Cache lines mostly used on receive path (including eth_type_trans())
*/
- unsigned long last_rx; /* Time of last Rx
- * This should not be set in
- * drivers, unless really needed,
- * because network stack (bonding)
- * use it if/when necessary, to
- * avoid dirtying this cache line.
- */
+ unsigned long last_rx; /* Time of last Rx */
/* Interface address info used in eth_type_trans() */
unsigned char *dev_addr; /* hw address, (before bcast
@@ -1729,6 +1810,20 @@ struct pcpu_sw_netstats {
struct u64_stats_sync syncp;
};
+#define netdev_alloc_pcpu_stats(type) \
+({ \
+ typeof(type) *pcpu_stats = alloc_percpu(type); \
+ if (pcpu_stats) { \
+ int i; \
+ for_each_possible_cpu(i) { \
+ typeof(type) *stat; \
+ stat = per_cpu_ptr(pcpu_stats, i); \
+ u64_stats_init(&stat->syncp); \
+ } \
+ } \
+ pcpu_stats; \
+})
+
#include <linux/notifier.h>
/* netdevice notifier chain. Please remember to update the rtnetlink
diff --git a/include/linux/nl802154.h b/include/linux/nl802154.h
index fd4f2d1..e110b8c 100644
--- a/include/linux/nl802154.h
+++ b/include/linux/nl802154.h
@@ -70,6 +70,16 @@ enum {
IEEE802154_ATTR_PHY_NAME,
IEEE802154_ATTR_DEV_TYPE,
+ IEEE802154_ATTR_TXPOWER,
+ IEEE802154_ATTR_LBT_ENABLED,
+ IEEE802154_ATTR_CCA_MODE,
+ IEEE802154_ATTR_CCA_ED_LEVEL,
+ IEEE802154_ATTR_CSMA_RETRIES,
+ IEEE802154_ATTR_CSMA_MIN_BE,
+ IEEE802154_ATTR_CSMA_MAX_BE,
+
+ IEEE802154_ATTR_FRAME_RETRIES,
+
__IEEE802154_ATTR_MAX,
};
@@ -122,6 +132,8 @@ enum {
IEEE802154_ADD_IFACE,
IEEE802154_DEL_IFACE,
+ IEEE802154_SET_PHYPARAMS,
+
__IEEE802154_CMD_MAX,
};
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 565188c..24126c4 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -74,8 +74,53 @@ typedef enum {
PHY_INTERFACE_MODE_RTBI,
PHY_INTERFACE_MODE_SMII,
PHY_INTERFACE_MODE_XGMII,
+ PHY_INTERFACE_MODE_MOCA,
+ PHY_INTERFACE_MODE_MAX,
} phy_interface_t;
+/**
+ * It maps 'enum phy_interface_t' found in include/linux/phy.h
+ * into the device tree binding of 'phy-mode', so that Ethernet
+ * device driver can get phy interface from device tree.
+ */
+static inline const char *phy_modes(phy_interface_t interface)
+{
+ switch (interface) {
+ case PHY_INTERFACE_MODE_NA:
+ return "";
+ case PHY_INTERFACE_MODE_MII:
+ return "mii";
+ case PHY_INTERFACE_MODE_GMII:
+ return "gmii";
+ case PHY_INTERFACE_MODE_SGMII:
+ return "sgmii";
+ case PHY_INTERFACE_MODE_TBI:
+ return "tbi";
+ case PHY_INTERFACE_MODE_REVMII:
+ return "rev-mii";
+ case PHY_INTERFACE_MODE_RMII:
+ return "rmii";
+ case PHY_INTERFACE_MODE_RGMII:
+ return "rgmii";
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ return "rgmii-id";
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ return "rgmii-rxid";
+ case PHY_INTERFACE_MODE_RGMII_TXID:
+ return "rgmii-txid";
+ case PHY_INTERFACE_MODE_RTBI:
+ return "rtbi";
+ case PHY_INTERFACE_MODE_SMII:
+ return "smii";
+ case PHY_INTERFACE_MODE_XGMII:
+ return "xgmii";
+ case PHY_INTERFACE_MODE_MOCA:
+ return "moca";
+ default:
+ return "unknown";
+ }
+}
+
#define PHY_INIT_TIMEOUT 100000
#define PHY_STATE_TIME 1
@@ -308,6 +353,7 @@ struct phy_device {
struct phy_c45_device_ids c45_ids;
bool is_c45;
bool is_internal;
+ bool has_fixups;
enum phy_state state;
@@ -394,6 +440,11 @@ struct phy_driver {
u32 flags;
/*
+ * Called to issue a PHY software reset
+ */
+ int (*soft_reset)(struct phy_device *phydev);
+
+ /*
* Called to initialize the PHY,
* including after a reset
*/
@@ -417,6 +468,9 @@ struct phy_driver {
*/
int (*config_aneg)(struct phy_device *phydev);
+ /* Determines the auto negotiation result */
+ int (*aneg_done)(struct phy_device *phydev);
+
/* Determines the negotiated speed and duplex */
int (*read_status)(struct phy_device *phydev);
@@ -612,10 +666,12 @@ static inline int phy_read_status(struct phy_device *phydev)
int genphy_setup_forced(struct phy_device *phydev);
int genphy_restart_aneg(struct phy_device *phydev);
int genphy_config_aneg(struct phy_device *phydev);
+int genphy_aneg_done(struct phy_device *phydev);
int genphy_update_link(struct phy_device *phydev);
int genphy_read_status(struct phy_device *phydev);
int genphy_suspend(struct phy_device *phydev);
int genphy_resume(struct phy_device *phydev);
+int genphy_soft_reset(struct phy_device *phydev);
void phy_driver_unregister(struct phy_driver *drv);
void phy_drivers_unregister(struct phy_driver *drv, int n);
int phy_driver_register(struct phy_driver *new_driver);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 5e1e6f2..03db95a 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -32,6 +32,7 @@
#include <linux/hrtimer.h>
#include <linux/dma-mapping.h>
#include <linux/netdev_features.h>
+#include <linux/sched.h>
#include <net/flow_keys.h>
/* A. Checksumming of received packets by device.
@@ -356,11 +357,62 @@ typedef unsigned int sk_buff_data_t;
typedef unsigned char *sk_buff_data_t;
#endif
+/**
+ * struct skb_mstamp - multi resolution time stamps
+ * @stamp_us: timestamp in us resolution
+ * @stamp_jiffies: timestamp in jiffies
+ */
+struct skb_mstamp {
+ union {
+ u64 v64;
+ struct {
+ u32 stamp_us;
+ u32 stamp_jiffies;
+ };
+ };
+};
+
+/**
+ * skb_mstamp_get - get current timestamp
+ * @cl: place to store timestamps
+ */
+static inline void skb_mstamp_get(struct skb_mstamp *cl)
+{
+ u64 val = local_clock();
+
+ do_div(val, NSEC_PER_USEC);
+ cl->stamp_us = (u32)val;
+ cl->stamp_jiffies = (u32)jiffies;
+}
+
+/**
+ * skb_mstamp_delta - compute the difference in usec between two skb_mstamp
+ * @t1: pointer to newest sample
+ * @t0: pointer to oldest sample
+ */
+static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1,
+ const struct skb_mstamp *t0)
+{
+ s32 delta_us = t1->stamp_us - t0->stamp_us;
+ u32 delta_jiffies = t1->stamp_jiffies - t0->stamp_jiffies;
+
+ /* If delta_us is negative, this might be because interval is too big,
+ * or local_clock() drift is too big : fallback using jiffies.
+ */
+ if (delta_us <= 0 ||
+ delta_jiffies >= (INT_MAX / (USEC_PER_SEC / HZ)))
+
+ delta_us = jiffies_to_usecs(delta_jiffies);
+
+ return delta_us;
+}
+
+
/**
* struct sk_buff - socket buffer
* @next: Next buffer in list
* @prev: Previous buffer in list
- * @tstamp: Time we arrived
+ * @tstamp: Time we arrived/left
* @sk: Socket we are owned by
* @dev: Device we arrived on/are leaving by
* @cb: Control buffer. Free for use by every layer. Put private vars here
@@ -429,7 +481,10 @@ struct sk_buff {
struct sk_buff *next;
struct sk_buff *prev;
- ktime_t tstamp;
+ union {
+ ktime_t tstamp;
+ struct skb_mstamp skb_mstamp;
+ };
struct sock *sk;
struct net_device *dev;
@@ -691,6 +746,8 @@ struct sk_buff *skb_realloc_headroom(struct sk_buff *skb,
unsigned int headroom);
struct sk_buff *skb_copy_expand(const struct sk_buff *skb, int newheadroom,
int newtailroom, gfp_t priority);
+int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
+ int offset, int len);
int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset,
int len);
int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer);
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 4ad0706..2399468 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -201,10 +201,10 @@ struct tcp_sock {
u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */
/* RTT measurement */
- u32 srtt; /* smoothed round trip time << 3 */
- u32 mdev; /* medium deviation */
- u32 mdev_max; /* maximal mdev for the last rtt period */
- u32 rttvar; /* smoothed mdev_max */
+ u32 srtt_us; /* smoothed round trip time << 3 in usecs */
+ u32 mdev_us; /* medium deviation */
+ u32 mdev_max_us; /* maximal mdev for the last rtt period */
+ u32 rttvar_us; /* smoothed mdev_max */
u32 rtt_seq; /* sequence number to update rttvar */
u32 packets_out; /* Packets which are "in flight" */
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 788d837..3ee4c92 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -89,7 +89,7 @@ struct tc_action_ops {
struct module *owner;
int (*act)(struct sk_buff *, const struct tc_action *, struct tcf_result *);
int (*dump)(struct sk_buff *, struct tc_action *, int, int);
- int (*cleanup)(struct tc_action *, int bind);
+ void (*cleanup)(struct tc_action *, int bind);
int (*lookup)(struct tc_action *, u32);
int (*init)(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action *act, int ovr,
@@ -98,20 +98,18 @@ struct tc_action_ops {
};
int tcf_hash_search(struct tc_action *a, u32 index);
-void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo);
-int tcf_hash_release(struct tcf_common *p, int bind,
- struct tcf_hashinfo *hinfo);
+void tcf_hash_destroy(struct tc_action *a);
+int tcf_hash_release(struct tc_action *a, int bind);
u32 tcf_hash_new_index(struct tcf_hashinfo *hinfo);
-struct tcf_common *tcf_hash_check(u32 index, struct tc_action *a,
- int bind);
-struct tcf_common *tcf_hash_create(u32 index, struct nlattr *est,
- struct tc_action *a, int size,
- int bind);
-void tcf_hash_insert(struct tcf_common *p, struct tcf_hashinfo *hinfo);
+int tcf_hash_check(u32 index, struct tc_action *a, int bind);
+int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a,
+ int size, int bind);
+void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est);
+void tcf_hash_insert(struct tc_action *a);
-int tcf_register_action(struct tc_action_ops *a);
+int tcf_register_action(struct tc_action_ops *a, unsigned int mask);
int tcf_unregister_action(struct tc_action_ops *a);
-void tcf_action_destroy(struct list_head *actions, int bind);
+int tcf_action_destroy(struct list_head *actions, int bind);
int tcf_action_exec(struct sk_buff *skb, const struct list_head *actions,
struct tcf_result *res);
int tcf_action_init(struct net *net, struct nlattr *nla,
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 50e39a8..933a9f2 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -314,7 +314,7 @@ static inline bool ipv6_addr_is_multicast(const struct in6_addr *addr)
static inline bool ipv6_addr_is_ll_all_nodes(const struct in6_addr *addr)
{
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
- __u64 *p = (__u64 *)addr;
+ __be64 *p = (__be64 *)addr;
return ((p[0] ^ cpu_to_be64(0xff02000000000000UL)) | (p[1] ^ cpu_to_be64(1))) == 0UL;
#else
return ((addr->s6_addr32[0] ^ htonl(0xff020000)) |
@@ -326,7 +326,7 @@ static inline bool ipv6_addr_is_ll_all_nodes(const struct in6_addr *addr)
static inline bool ipv6_addr_is_ll_all_routers(const struct in6_addr *addr)
{
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
- __u64 *p = (__u64 *)addr;
+ __be64 *p = (__be64 *)addr;
return ((p[0] ^ cpu_to_be64(0xff02000000000000UL)) | (p[1] ^ cpu_to_be64(2))) == 0UL;
#else
return ((addr->s6_addr32[0] ^ htonl(0xff020000)) |
@@ -343,7 +343,7 @@ static inline bool ipv6_addr_is_isatap(const struct in6_addr *addr)
static inline bool ipv6_addr_is_solict_mult(const struct in6_addr *addr)
{
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
- __u64 *p = (__u64 *)addr;
+ __be64 *p = (__be64 *)addr;
return ((p[0] ^ cpu_to_be64(0xff02000000000000UL)) |
((p[1] ^ cpu_to_be64(0x00000001ff000000UL)) &
cpu_to_be64(0xffffffffff000000UL))) == 0UL;
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index b1f84b0..9f90554 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -1394,10 +1394,12 @@ struct cfg80211_scan_request {
/**
* struct cfg80211_match_set - sets of attributes to match
*
- * @ssid: SSID to be matched
+ * @ssid: SSID to be matched; may be zero-length for no match (RSSI only)
+ * @rssi_thold: don't report scan results below this threshold (in s32 dBm)
*/
struct cfg80211_match_set {
struct cfg80211_ssid ssid;
+ s32 rssi_thold;
};
/**
@@ -1420,7 +1422,8 @@ struct cfg80211_match_set {
* @dev: the interface
* @scan_start: start time of the scheduled scan
* @channels: channels to scan
- * @rssi_thold: don't report scan results below this threshold (in s32 dBm)
+ * @min_rssi_thold: for drivers only supporting a single threshold, this
+ * contains the minimum over all matchsets
*/
struct cfg80211_sched_scan_request {
struct cfg80211_ssid *ssids;
@@ -1433,7 +1436,7 @@ struct cfg80211_sched_scan_request {
u32 flags;
struct cfg80211_match_set *match_sets;
int n_match_sets;
- s32 rssi_thold;
+ s32 min_rssi_thold;
/* internal */
struct wiphy *wiphy;
@@ -1701,8 +1704,14 @@ struct cfg80211_ibss_params {
*
* @channel: The channel to use or %NULL if not specified (auto-select based
* on scan results)
+ * @channel_hint: The channel of the recommended BSS for initial connection or
+ * %NULL if not specified
* @bssid: The AP BSSID or %NULL if not specified (auto-select based on scan
* results)
+ * @bssid_hint: The recommended AP BSSID for initial connection to the BSS or
+ * %NULL if not specified. Unlike the @bssid parameter, the driver is
+ * allowed to ignore this @bssid_hint if it has knowledge of a better BSS
+ * to use.
* @ssid: SSID
* @ssid_len: Length of ssid in octets
* @auth_type: Authentication type (algorithm)
@@ -1725,11 +1734,13 @@ struct cfg80211_ibss_params {
*/
struct cfg80211_connect_params {
struct ieee80211_channel *channel;
- u8 *bssid;
- u8 *ssid;
+ struct ieee80211_channel *channel_hint;
+ const u8 *bssid;
+ const u8 *bssid_hint;
+ const u8 *ssid;
size_t ssid_len;
enum nl80211_auth_type auth_type;
- u8 *ie;
+ const u8 *ie;
size_t ie_len;
bool privacy;
enum nl80211_mfp mfp;
@@ -1768,6 +1779,7 @@ struct cfg80211_bitrate_mask {
u32 legacy;
u8 ht_mcs[IEEE80211_HT_MCS_MASK_LEN];
u16 vht_mcs[NL80211_VHT_NSS_MAX];
+ enum nl80211_txrate_gi gi;
} control[IEEE80211_NUM_BANDS];
};
/**
@@ -2875,6 +2887,11 @@ struct wiphy_vendor_command {
* @n_vendor_commands: number of vendor commands
* @vendor_events: array of vendor events supported by the hardware
* @n_vendor_events: number of vendor events
+ *
+ * @max_ap_assoc_sta: maximum number of associated stations supported in AP mode
+ * (including P2P GO) or 0 to indicate no such limit is advertised. The
+ * driver is allowed to advertise a theoretical limit that it can reach in
+ * some cases, but may not always reach.
*/
struct wiphy {
/* assign these fields before you register the wiphy */
@@ -2990,6 +3007,8 @@ struct wiphy {
const struct nl80211_vendor_cmd_info *vendor_events;
int n_vendor_commands, n_vendor_events;
+ u16 max_ap_assoc_sta;
+
char priv[0] __aligned(NETDEV_ALIGN);
};
@@ -3127,8 +3146,8 @@ struct cfg80211_cached_keys;
* @identifier: (private) Identifier used in nl80211 to identify this
* wireless device if it has no netdev
* @current_bss: (private) Used by the internal configuration code
- * @channel: (private) Used by the internal configuration code to track
- * the user-set AP, monitor and WDS channel
+ * @chandef: (private) Used by the internal configuration code to track
+ * the user-set channel definition.
* @preset_chandef: (private) Used by the internal configuration code to
* track the channel to be used for AP later
* @bssid: (private) Used by the internal configuration code
@@ -3192,9 +3211,7 @@ struct wireless_dev {
struct cfg80211_internal_bss *current_bss; /* associated / joined */
struct cfg80211_chan_def preset_chandef;
-
- /* for AP and mesh channel tracking */
- struct ieee80211_channel *channel;
+ struct cfg80211_chan_def chandef;
bool ibss_fixed;
bool ibss_dfs_possible;
@@ -3876,6 +3893,7 @@ void cfg80211_michael_mic_failure(struct net_device *dev, const u8 *addr,
*
* @dev: network device
* @bssid: the BSSID of the IBSS joined
+ * @channel: the channel of the IBSS joined
* @gfp: allocation flags
*
* This function notifies cfg80211 that the device joined an IBSS or
@@ -3885,7 +3903,8 @@ void cfg80211_michael_mic_failure(struct net_device *dev, const u8 *addr,
* with the locally generated beacon -- this guarantees that there is
* always a scan result for this IBSS. cfg80211 will handle the rest.
*/
-void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid, gfp_t gfp);
+void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid,
+ struct ieee80211_channel *channel, gfp_t gfp);
/**
* cfg80211_notify_new_candidate - notify cfg80211 of a new mesh peer candidate
diff --git a/include/net/flow.h b/include/net/flow.h
index d23e7fa..bee3741 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -218,9 +218,10 @@ struct flow_cache_object *flow_cache_lookup(struct net *net,
const struct flowi *key, u16 family,
u8 dir, flow_resolve_t resolver,
void *ctx);
+int flow_cache_init(struct net *net);
-void flow_cache_flush(void);
-void flow_cache_flush_deferred(void);
+void flow_cache_flush(struct net *net);
+void flow_cache_flush_deferred(struct net *net);
extern atomic_t flow_cache_genid;
#endif
diff --git a/include/net/flowcache.h b/include/net/flowcache.h
new file mode 100644
index 0000000..c8f665e
--- /dev/null
+++ b/include/net/flowcache.h
@@ -0,0 +1,25 @@
+#ifndef _NET_FLOWCACHE_H
+#define _NET_FLOWCACHE_H
+
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/timer.h>
+#include <linux/notifier.h>
+
+struct flow_cache_percpu {
+ struct hlist_head *hash_table;
+ int hash_count;
+ u32 hash_rnd;
+ int hash_rnd_recalc;
+ struct tasklet_struct flush_tasklet;
+};
+
+struct flow_cache {
+ u32 hash_shift;
+ struct flow_cache_percpu __percpu *percpu;
+ struct notifier_block hotcpu_notifier;
+ int low_watermark;
+ int high_watermark;
+ struct timer_list rnd_timer;
+};
+#endif /* _NET_FLOWCACHE_H */
diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h
index 8b5b714..b0fd947 100644
--- a/include/net/ieee80211_radiotap.h
+++ b/include/net/ieee80211_radiotap.h
@@ -316,6 +316,10 @@ enum ieee80211_radiotap_type {
#define IEEE80211_RADIOTAP_VHT_FLAG_LDPC_EXTRA_OFDM_SYM 0x10
#define IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED 0x20
+#define IEEE80211_RADIOTAP_CODING_LDPC_USER0 0x01
+#define IEEE80211_RADIOTAP_CODING_LDPC_USER1 0x02
+#define IEEE80211_RADIOTAP_CODING_LDPC_USER2 0x04
+#define IEEE80211_RADIOTAP_CODING_LDPC_USER3 0x08
/* helpers */
static inline int ieee80211_get_radiotap_len(unsigned char *data)
diff --git a/include/net/ieee802154_netdev.h b/include/net/ieee802154_netdev.h
index 8196d5d..97b2e34 100644
--- a/include/net/ieee802154_netdev.h
+++ b/include/net/ieee802154_netdev.h
@@ -29,6 +29,12 @@
#include <net/af_ieee802154.h>
+struct ieee802154_frag_info {
+ __be16 d_tag;
+ u16 d_size;
+ u8 d_offset;
+};
+
/*
* A control block of skb passed between the ARPHRD_IEEE802154 device
* and other stack parts.
@@ -39,6 +45,7 @@ struct ieee802154_mac_cb {
struct ieee802154_addr da;
u8 flags;
u8 seq;
+ struct ieee802154_frag_info frag_info;
};
static inline struct ieee802154_mac_cb *mac_cb(struct sk_buff *skb)
diff --git a/include/net/ip.h b/include/net/ip.h
index 23be0fd..b885d75 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -266,7 +266,8 @@ int ip_dont_fragment(struct sock *sk, struct dst_entry *dst)
static inline bool ip_sk_accept_pmtu(const struct sock *sk)
{
- return inet_sk(sk)->pmtudisc != IP_PMTUDISC_INTERFACE;
+ return inet_sk(sk)->pmtudisc != IP_PMTUDISC_INTERFACE &&
+ inet_sk(sk)->pmtudisc != IP_PMTUDISC_OMIT;
}
static inline bool ip_sk_use_pmtu(const struct sock *sk)
@@ -274,6 +275,12 @@ static inline bool ip_sk_use_pmtu(const struct sock *sk)
return inet_sk(sk)->pmtudisc < IP_PMTUDISC_PROBE;
}
+static inline bool ip_sk_local_df(const struct sock *sk)
+{
+ return inet_sk(sk)->pmtudisc < IP_PMTUDISC_DO ||
+ inet_sk(sk)->pmtudisc == IP_PMTUDISC_OMIT;
+}
+
static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
bool forwarding)
{
@@ -489,7 +496,8 @@ int ip_options_rcv_srr(struct sk_buff *skb);
void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb);
void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb);
-int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc);
+int ip_cmsg_send(struct net *net, struct msghdr *msg,
+ struct ipcm_cookie *ipc, bool allow_ipv6);
int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
unsigned int optlen);
int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 017badb..00e3f12 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -171,7 +171,14 @@ static inline int ip6_skb_dst_mtu(struct sk_buff *skb)
static inline bool ip6_sk_accept_pmtu(const struct sock *sk)
{
- return inet6_sk(sk)->pmtudisc != IPV6_PMTUDISC_INTERFACE;
+ return inet6_sk(sk)->pmtudisc != IPV6_PMTUDISC_INTERFACE &&
+ inet6_sk(sk)->pmtudisc != IPV6_PMTUDISC_OMIT;
+}
+
+static inline bool ip6_sk_local_df(const struct sock *sk)
+{
+ return inet6_sk(sk)->pmtudisc < IPV6_PMTUDISC_DO ||
+ inet6_sk(sk)->pmtudisc == IPV6_PMTUDISC_OMIT;
}
static inline struct in6_addr *rt6_nexthop(struct rt6_info *rt)
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index f4ab2fb..4f0f29d 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -808,9 +808,6 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
* @RX_FLAG_HT: HT MCS was used and rate_idx is MCS index
* @RX_FLAG_VHT: VHT MCS was used and rate_index is MCS index
* @RX_FLAG_40MHZ: HT40 (40 MHz) was used
- * @RX_FLAG_80MHZ: 80 MHz was used
- * @RX_FLAG_80P80MHZ: 80+80 MHz was used
- * @RX_FLAG_160MHZ: 160 MHz was used
* @RX_FLAG_SHORT_GI: Short guard interval was used
* @RX_FLAG_NO_SIGNAL_VAL: The signal strength value is not present.
* Valid only for data frames (mainly A-MPDU)
@@ -830,6 +827,7 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
* on this subframe
* @RX_FLAG_AMPDU_DELIM_CRC_KNOWN: The delimiter CRC field is known (the CRC
* is stored in the @ampdu_delimiter_crc field)
+ * @RX_FLAG_LDPC: LDPC was used
* @RX_FLAG_STBC_MASK: STBC 2 bit bitmask. 1 - Nss=1, 2 - Nss=2, 3 - Nss=3
* @RX_FLAG_10MHZ: 10 MHz (half channel) was used
* @RX_FLAG_5MHZ: 5 MHz (quarter channel) was used
@@ -866,9 +864,7 @@ enum mac80211_rx_flags {
RX_FLAG_AMPDU_DELIM_CRC_KNOWN = BIT(20),
RX_FLAG_MACTIME_END = BIT(21),
RX_FLAG_VHT = BIT(22),
- RX_FLAG_80MHZ = BIT(23),
- RX_FLAG_80P80MHZ = BIT(24),
- RX_FLAG_160MHZ = BIT(25),
+ RX_FLAG_LDPC = BIT(23),
RX_FLAG_STBC_MASK = BIT(26) | BIT(27),
RX_FLAG_10MHZ = BIT(28),
RX_FLAG_5MHZ = BIT(29),
@@ -878,6 +874,21 @@ enum mac80211_rx_flags {
#define RX_FLAG_STBC_SHIFT 26
/**
+ * enum mac80211_rx_vht_flags - receive VHT flags
+ *
+ * These flags are used with the @vht_flag member of
+ * &struct ieee80211_rx_status.
+ * @RX_VHT_FLAG_80MHZ: 80 MHz was used
+ * @RX_VHT_FLAG_80P80MHZ: 80+80 MHz was used
+ * @RX_VHT_FLAG_160MHZ: 160 MHz was used
+ */
+enum mac80211_rx_vht_flags {
+ RX_VHT_FLAG_80MHZ = BIT(0),
+ RX_VHT_FLAG_80P80MHZ = BIT(1),
+ RX_VHT_FLAG_160MHZ = BIT(2),
+};
+
+/**
* struct ieee80211_rx_status - receive status
*
* The low-level driver should provide this information (the subset
@@ -902,26 +913,19 @@ enum mac80211_rx_flags {
* HT or VHT is used (%RX_FLAG_HT/%RX_FLAG_VHT)
* @vht_nss: number of streams (VHT only)
* @flag: %RX_FLAG_*
+ * @vht_flag: %RX_VHT_FLAG_*
* @rx_flags: internal RX flags for mac80211
* @ampdu_reference: A-MPDU reference number, must be a different value for
* each A-MPDU but the same for each subframe within one A-MPDU
* @ampdu_delimiter_crc: A-MPDU delimiter CRC
- * @vendor_radiotap_bitmap: radiotap vendor namespace presence bitmap
- * @vendor_radiotap_len: radiotap vendor namespace length
- * @vendor_radiotap_align: radiotap vendor namespace alignment. Note
- * that the actual data must be at the start of the SKB data
- * already.
- * @vendor_radiotap_oui: radiotap vendor namespace OUI
- * @vendor_radiotap_subns: radiotap vendor sub namespace
*/
struct ieee80211_rx_status {
u64 mactime;
u32 device_timestamp;
u32 ampdu_reference;
u32 flag;
- u32 vendor_radiotap_bitmap;
- u16 vendor_radiotap_len;
u16 freq;
+ u8 vht_flag;
u8 rate_idx;
u8 vht_nss;
u8 rx_flags;
@@ -931,9 +935,6 @@ struct ieee80211_rx_status {
u8 chains;
s8 chain_signal[IEEE80211_MAX_CHAINS];
u8 ampdu_delimiter_crc;
- u8 vendor_radiotap_align;
- u8 vendor_radiotap_oui[3];
- u8 vendor_radiotap_subns;
};
/**
@@ -2750,11 +2751,13 @@ enum ieee80211_roc_type {
* @channel_switch_beacon: Starts a channel switch to a new channel.
* Beacons are modified to include CSA or ECSA IEs before calling this
* function. The corresponding count fields in these IEs must be
- * decremented, and when they reach zero the driver must call
+ * decremented, and when they reach 1 the driver must call
* ieee80211_csa_finish(). Drivers which use ieee80211_beacon_get()
* get the csa counter decremented by mac80211, but must check if it is
- * zero using ieee80211_csa_is_complete() after the beacon has been
+ * 1 using ieee80211_csa_is_complete() after the beacon has been
* transmitted and then call ieee80211_csa_finish().
+ * If the CSA count starts as zero or 1, this function will not be called,
+ * since there won't be any time to beacon before the switch anyway.
*
* @join_ibss: Join an IBSS (on an IBSS interface); this is called after all
* information in bss_conf is set up and the beacon can be retrieved. A
@@ -3452,13 +3455,13 @@ static inline struct sk_buff *ieee80211_beacon_get(struct ieee80211_hw *hw,
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
*
* After a channel switch announcement was scheduled and the counter in this
- * announcement hit zero, this function must be called by the driver to
+ * announcement hits 1, this function must be called by the driver to
* notify mac80211 that the channel can be changed.
*/
void ieee80211_csa_finish(struct ieee80211_vif *vif);
/**
- * ieee80211_csa_is_complete - find out if counters reached zero
+ * ieee80211_csa_is_complete - find out if counters reached 1
* @vif: &struct ieee80211_vif pointer from the add_interface callback.
*
* This function returns whether the channel switch counters reached zero.
@@ -4451,7 +4454,6 @@ struct ieee80211_tx_rate_control {
};
struct rate_control_ops {
- struct module *module;
const char *name;
void *(*alloc)(struct ieee80211_hw *hw, struct dentry *debugfsdir);
void (*free)(void *priv);
@@ -4553,8 +4555,8 @@ int rate_control_set_rates(struct ieee80211_hw *hw,
struct ieee80211_sta *pubsta,
struct ieee80211_sta_rates *rates);
-int ieee80211_rate_control_register(struct rate_control_ops *ops);
-void ieee80211_rate_control_unregister(struct rate_control_ops *ops);
+int ieee80211_rate_control_register(const struct rate_control_ops *ops);
+void ieee80211_rate_control_unregister(const struct rate_control_ops *ops);
static inline bool
conf_is_ht20(struct ieee80211_conf *conf)
diff --git a/include/net/mac802154.h b/include/net/mac802154.h
index 807d6b7..8ca3d04 100644
--- a/include/net/mac802154.h
+++ b/include/net/mac802154.h
@@ -113,6 +113,32 @@ struct ieee802154_dev {
* Set radio for listening on specific address.
* Set the device for listening on specified address.
* Returns either zero, or negative errno.
+ *
+ * set_txpower:
+ * Set radio transmit power in dB. Called with pib_lock held.
+ * Returns either zero, or negative errno.
+ *
+ * set_lbt
+ * Enables or disables listen before talk on the device. Called with
+ * pib_lock held.
+ * Returns either zero, or negative errno.
+ *
+ * set_cca_mode
+ * Sets the CCA mode used by the device. Called with pib_lock held.
+ * Returns either zero, or negative errno.
+ *
+ * set_cca_ed_level
+ * Sets the CCA energy detection threshold in dBm. Called with pib_lock
+ * held.
+ * Returns either zero, or negative errno.
+ *
+ * set_csma_params
+ * Sets the CSMA parameter set for the PHY. Called with pib_lock held.
+ * Returns either zero, or negative errno.
+ *
+ * set_frame_retries
+ * Sets the retransmission attempt limit. Called with pib_lock held.
+ * Returns either zero, or negative errno.
*/
struct ieee802154_ops {
struct module *owner;
@@ -129,6 +155,15 @@ struct ieee802154_ops {
unsigned long changed);
int (*ieee_addr)(struct ieee802154_dev *dev,
u8 addr[IEEE802154_ADDR_LEN]);
+ int (*set_txpower)(struct ieee802154_dev *dev, int db);
+ int (*set_lbt)(struct ieee802154_dev *dev, bool on);
+ int (*set_cca_mode)(struct ieee802154_dev *dev, u8 mode);
+ int (*set_cca_ed_level)(struct ieee802154_dev *dev,
+ s32 level);
+ int (*set_csma_params)(struct ieee802154_dev *dev,
+ u8 min_be, u8 max_be, u8 retries);
+ int (*set_frame_retries)(struct ieee802154_dev *dev,
+ s8 retries);
};
/* Basic interface to register ieee802154 device */
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 991dcd9..79387f7 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -15,6 +15,7 @@
#include <net/netns/packet.h>
#include <net/netns/ipv4.h>
#include <net/netns/ipv6.h>
+#include <net/netns/ieee802154_6lowpan.h>
#include <net/netns/sctp.h>
#include <net/netns/dccp.h>
#include <net/netns/netfilter.h>
@@ -90,6 +91,9 @@ struct net {
#if IS_ENABLED(CONFIG_IPV6)
struct netns_ipv6 ipv6;
#endif
+#if IS_ENABLED(CONFIG_IEEE802154_6LOWPAN)
+ struct netns_ieee802154_lowpan ieee802154_lowpan;
+#endif
#if defined(CONFIG_IP_SCTP) || defined(CONFIG_IP_SCTP_MODULE)
struct netns_sctp sctp;
#endif
diff --git a/include/net/netns/ieee802154_6lowpan.h b/include/net/netns/ieee802154_6lowpan.h
new file mode 100644
index 0000000..079030c
--- /dev/null
+++ b/include/net/netns/ieee802154_6lowpan.h
@@ -0,0 +1,22 @@
+/*
+ * ieee802154 6lowpan in net namespaces
+ */
+
+#include <net/inet_frag.h>
+
+#ifndef __NETNS_IEEE802154_6LOWPAN_H__
+#define __NETNS_IEEE802154_6LOWPAN_H__
+
+struct netns_sysctl_lowpan {
+#ifdef CONFIG_SYSCTL
+ struct ctl_table_header *frags_hdr;
+#endif
+};
+
+struct netns_ieee802154_lowpan {
+ struct netns_sysctl_lowpan sysctl;
+ struct netns_frags frags;
+ u16 max_dsize;
+};
+
+#endif
diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h
index 1006a26..51f0dce 100644
--- a/include/net/netns/xfrm.h
+++ b/include/net/netns/xfrm.h
@@ -6,6 +6,7 @@
#include <linux/workqueue.h>
#include <linux/xfrm.h>
#include <net/dst_ops.h>
+#include <net/flowcache.h>
struct ctl_table_header;
@@ -58,9 +59,18 @@ struct netns_xfrm {
struct dst_ops xfrm6_dst_ops;
#endif
spinlock_t xfrm_state_lock;
- spinlock_t xfrm_policy_sk_bundle_lock;
rwlock_t xfrm_policy_lock;
struct mutex xfrm_cfg_mutex;
+
+ /* flow cache part */
+ struct flow_cache flow_cache_global;
+ struct kmem_cache *flow_cachep;
+ atomic_t flow_cache_genid;
+ struct list_head flow_cache_gc_list;
+ spinlock_t flow_cache_gc_lock;
+ struct work_struct flow_cache_gc_work;
+ struct work_struct flow_cache_flush_work;
+ struct mutex flow_flush_sem;
};
#endif
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index 661e45d..72240e5a 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -140,7 +140,7 @@ struct net_device *rtnl_create_link(struct net *net, char *ifname,
struct nlattr *tb[]);
int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm);
-extern const struct nla_policy ifla_policy[IFLA_MAX+1];
+int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len);
#define MODULE_ALIAS_RTNL_LINK(kind) MODULE_ALIAS("rtnl-link-" kind)
diff --git a/include/net/tc_act/tc_csum.h b/include/net/tc_act/tc_csum.h
index 9e8710b..fa8f5fa 100644
--- a/include/net/tc_act/tc_csum.h
+++ b/include/net/tc_act/tc_csum.h
@@ -9,7 +9,7 @@ struct tcf_csum {
u32 update_flags;
};
-#define to_tcf_csum(pc) \
- container_of(pc,struct tcf_csum,common)
+#define to_tcf_csum(a) \
+ container_of(a->priv,struct tcf_csum,common)
#endif /* __NET_TC_CSUM_H */
diff --git a/include/net/tc_act/tc_defact.h b/include/net/tc_act/tc_defact.h
index 65f024b..9763dcb 100644
--- a/include/net/tc_act/tc_defact.h
+++ b/include/net/tc_act/tc_defact.h
@@ -8,7 +8,7 @@ struct tcf_defact {
u32 tcfd_datalen;
void *tcfd_defdata;
};
-#define to_defact(pc) \
- container_of(pc, struct tcf_defact, common)
+#define to_defact(a) \
+ container_of(a->priv, struct tcf_defact, common)
#endif /* __NET_TC_DEF_H */
diff --git a/include/net/tc_act/tc_gact.h b/include/net/tc_act/tc_gact.h
index 9e3f676..9fc9b57 100644
--- a/include/net/tc_act/tc_gact.h
+++ b/include/net/tc_act/tc_gact.h
@@ -11,7 +11,7 @@ struct tcf_gact {
int tcfg_paction;
#endif
};
-#define to_gact(pc) \
- container_of(pc, struct tcf_gact, common)
+#define to_gact(a) \
+ container_of(a->priv, struct tcf_gact, common)
#endif /* __NET_TC_GACT_H */
diff --git a/include/net/tc_act/tc_ipt.h b/include/net/tc_act/tc_ipt.h
index f7d25df..c0f4193 100644
--- a/include/net/tc_act/tc_ipt.h
+++ b/include/net/tc_act/tc_ipt.h
@@ -11,7 +11,7 @@ struct tcf_ipt {
char *tcfi_tname;
struct xt_entry_target *tcfi_t;
};
-#define to_ipt(pc) \
- container_of(pc, struct tcf_ipt, common)
+#define to_ipt(a) \
+ container_of(a->priv, struct tcf_ipt, common)
#endif /* __NET_TC_IPT_H */
diff --git a/include/net/tc_act/tc_mirred.h b/include/net/tc_act/tc_mirred.h
index cfe2943..4dd77a1 100644
--- a/include/net/tc_act/tc_mirred.h
+++ b/include/net/tc_act/tc_mirred.h
@@ -11,7 +11,7 @@ struct tcf_mirred {
struct net_device *tcfm_dev;
struct list_head tcfm_list;
};
-#define to_mirred(pc) \
- container_of(pc, struct tcf_mirred, common)
+#define to_mirred(a) \
+ container_of(a->priv, struct tcf_mirred, common)
#endif /* __NET_TC_MIR_H */
diff --git a/include/net/tc_act/tc_nat.h b/include/net/tc_act/tc_nat.h
index 4a691f3..63d8e9c 100644
--- a/include/net/tc_act/tc_nat.h
+++ b/include/net/tc_act/tc_nat.h
@@ -13,9 +13,9 @@ struct tcf_nat {
u32 flags;
};
-static inline struct tcf_nat *to_tcf_nat(struct tcf_common *pc)
+static inline struct tcf_nat *to_tcf_nat(struct tc_action *a)
{
- return container_of(pc, struct tcf_nat, common);
+ return container_of(a->priv, struct tcf_nat, common);
}
#endif /* __NET_TC_NAT_H */
diff --git a/include/net/tc_act/tc_pedit.h b/include/net/tc_act/tc_pedit.h
index e6f6e15..5b80998 100644
--- a/include/net/tc_act/tc_pedit.h
+++ b/include/net/tc_act/tc_pedit.h
@@ -9,7 +9,7 @@ struct tcf_pedit {
unsigned char tcfp_flags;
struct tc_pedit_key *tcfp_keys;
};
-#define to_pedit(pc) \
- container_of(pc, struct tcf_pedit, common)
+#define to_pedit(a) \
+ container_of(a->priv, struct tcf_pedit, common)
#endif /* __NET_TC_PED_H */
diff --git a/include/net/tc_act/tc_skbedit.h b/include/net/tc_act/tc_skbedit.h
index dd5d86f..0df9a0d 100644
--- a/include/net/tc_act/tc_skbedit.h
+++ b/include/net/tc_act/tc_skbedit.h
@@ -29,7 +29,7 @@ struct tcf_skbedit {
u16 queue_mapping;
/* XXX: 16-bit pad here? */
};
-#define to_skbedit(pc) \
- container_of(pc, struct tcf_skbedit, common)
+#define to_skbedit(a) \
+ container_of(a->priv, struct tcf_skbedit, common)
#endif /* __NET_TC_SKBEDIT_H */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 8c4dd63..bb253b9 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -31,6 +31,7 @@
#include <linux/crypto.h>
#include <linux/cryptohash.h>
#include <linux/kref.h>
+#include <linux/ktime.h>
#include <net/inet_connection_sock.h>
#include <net/inet_timewait_sock.h>
@@ -478,7 +479,6 @@ int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
struct ip_options *opt);
#ifdef CONFIG_SYN_COOKIES
-#include <linux/ktime.h>
/* Syncookies use a monotonic timer which increments every 64 seconds.
* This counter is used both as a hash input and partially encoded into
@@ -619,7 +619,7 @@ static inline void tcp_bound_rto(const struct sock *sk)
static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
{
- return (tp->srtt >> 3) + tp->rttvar;
+ return usecs_to_jiffies((tp->srtt_us >> 3) + tp->rttvar_us);
}
static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
@@ -656,6 +656,11 @@ static inline u32 tcp_rto_min(struct sock *sk)
return rto_min;
}
+static inline u32 tcp_rto_min_us(struct sock *sk)
+{
+ return jiffies_to_usecs(tcp_rto_min(sk));
+}
+
/* Compute the actual receive window we are currently advertising.
* Rcv_nxt can be after the window if our peer push more data
* than the offered window.
@@ -778,7 +783,6 @@ enum tcp_ca_event {
#define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX)
#define TCP_CONG_NON_RESTRICTED 0x1
-#define TCP_CONG_RTT_STAMP 0x2
struct tcp_congestion_ops {
struct list_head list;
@@ -791,8 +795,6 @@ struct tcp_congestion_ops {
/* return slow start threshold (required) */
u32 (*ssthresh)(struct sock *sk);
- /* lower bound for congestion window (optional) */
- u32 (*min_cwnd)(const struct sock *sk);
/* do new cwnd calculation (required) */
void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked, u32 in_flight);
/* call before changing ca_state (optional) */
@@ -827,7 +829,6 @@ void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w);
extern struct tcp_congestion_ops tcp_init_congestion_ops;
u32 tcp_reno_ssthresh(struct sock *sk);
void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight);
-u32 tcp_reno_min_cwnd(const struct sock *sk);
extern struct tcp_congestion_ops tcp_reno;
static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
diff --git a/include/net/wpan-phy.h b/include/net/wpan-phy.h
index b52bda8..10ab0fc 100644
--- a/include/net/wpan-phy.h
+++ b/include/net/wpan-phy.h
@@ -37,15 +37,22 @@ struct wpan_phy {
struct mutex pib_lock;
/*
- * This is a PIB according to 802.15.4-2006.
+ * This is a PIB according to 802.15.4-2011.
* We do not provide timing-related variables, as they
* aren't used outside of driver
*/
u8 current_channel;
u8 current_page;
u32 channels_supported[32];
- u8 transmit_power;
+ s8 transmit_power;
u8 cca_mode;
+ u8 min_be;
+ u8 max_be;
+ u8 csma_retries;
+ s8 frame_retries;
+
+ bool lbt;
+ s32 cca_ed_level;
struct device dev;
int idx;
@@ -54,6 +61,14 @@ struct wpan_phy {
const char *name, int type);
void (*del_iface)(struct wpan_phy *phy, struct net_device *dev);
+ int (*set_txpower)(struct wpan_phy *phy, int db);
+ int (*set_lbt)(struct wpan_phy *phy, bool on);
+ int (*set_cca_mode)(struct wpan_phy *phy, u8 cca_mode);
+ int (*set_cca_ed_level)(struct wpan_phy *phy, int level);
+ int (*set_csma_params)(struct wpan_phy *phy, u8 min_be, u8 max_be,
+ u8 retries);
+ int (*set_frame_retries)(struct wpan_phy *phy, s8 retries);
+
char priv[0] __attribute__((__aligned__(NETDEV_ALIGN)));
};
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index fb5654a..23bfd45 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -118,11 +118,10 @@
struct xfrm_state_walk {
struct list_head all;
u8 state;
- union {
- u8 dying;
- u8 proto;
- };
+ u8 dying;
+ u8 proto;
u32 seq;
+ struct xfrm_filter *filter;
};
/* Full description of state of transformer. */
@@ -594,21 +593,33 @@ struct xfrm_mgr {
const struct xfrm_migrate *m,
int num_bundles,
const struct xfrm_kmaddress *k);
+ bool (*is_alive)(const struct km_event *c);
};
int xfrm_register_km(struct xfrm_mgr *km);
int xfrm_unregister_km(struct xfrm_mgr *km);
+struct xfrm_tunnel_skb_cb {
+ union {
+ struct inet_skb_parm h4;
+ struct inet6_skb_parm h6;
+ } header;
+
+ union {
+ struct ip_tunnel *ip4;
+ struct ip6_tnl *ip6;
+ } tunnel;
+};
+
+#define XFRM_TUNNEL_SKB_CB(__skb) ((struct xfrm_tunnel_skb_cb *)&((__skb)->cb[0]))
+
/*
* This structure is used for the duration where packets are being
* transformed by IPsec. As soon as the packet leaves IPsec the
* area beyond the generic IP part may be overwritten.
*/
struct xfrm_skb_cb {
- union {
- struct inet_skb_parm h4;
- struct inet6_skb_parm h6;
- } header;
+ struct xfrm_tunnel_skb_cb header;
/* Sequence number for replay protection. */
union {
@@ -630,10 +641,7 @@ struct xfrm_skb_cb {
* to transmit header information to the mode input/output functions.
*/
struct xfrm_mode_skb_cb {
- union {
- struct inet_skb_parm h4;
- struct inet6_skb_parm h6;
- } header;
+ struct xfrm_tunnel_skb_cb header;
/* Copied from header for IPv4, always set to zero and DF for IPv6. */
__be16 id;
@@ -665,10 +673,7 @@ struct xfrm_mode_skb_cb {
* related information.
*/
struct xfrm_spi_skb_cb {
- union {
- struct inet_skb_parm h4;
- struct inet6_skb_parm h6;
- } header;
+ struct xfrm_tunnel_skb_cb header;
unsigned int daddroff;
unsigned int family;
@@ -1347,6 +1352,18 @@ struct xfrm_algo_desc {
struct sadb_alg desc;
};
+/* XFRM protocol handlers. */
+struct xfrm4_protocol {
+ int (*handler)(struct sk_buff *skb);
+ int (*input_handler)(struct sk_buff *skb, int nexthdr, __be32 spi,
+ int encap_type);
+ int (*cb_handler)(struct sk_buff *skb, int err);
+ int (*err_handler)(struct sk_buff *skb, u32 info);
+
+ struct xfrm4_protocol __rcu *next;
+ int priority;
+};
+
/* XFRM tunnel handlers. */
struct xfrm_tunnel {
int (*handler)(struct sk_buff *skb);
@@ -1405,7 +1422,8 @@ static inline void xfrm_sysctl_fini(struct net *net)
}
#endif
-void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto);
+void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto,
+ struct xfrm_filter *filter);
int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
int (*func)(struct xfrm_state *, int, void*), void *);
void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net);
@@ -1497,18 +1515,22 @@ int xfrm4_rcv(struct sk_buff *skb);
static inline int xfrm4_rcv_spi(struct sk_buff *skb, int nexthdr, __be32 spi)
{
- return xfrm4_rcv_encap(skb, nexthdr, spi, 0);
+ XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
+ XFRM_SPI_SKB_CB(skb)->family = AF_INET;
+ XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
+ return xfrm_input(skb, nexthdr, spi, 0);
}
int xfrm4_extract_output(struct xfrm_state *x, struct sk_buff *skb);
int xfrm4_prepare_output(struct xfrm_state *x, struct sk_buff *skb);
int xfrm4_output(struct sk_buff *skb);
int xfrm4_output_finish(struct sk_buff *skb);
+int xfrm4_rcv_cb(struct sk_buff *skb, u8 protocol, int err);
+int xfrm4_protocol_register(struct xfrm4_protocol *handler, unsigned char protocol);
+int xfrm4_protocol_deregister(struct xfrm4_protocol *handler, unsigned char protocol);
int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family);
int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family);
void xfrm4_local_error(struct sk_buff *skb, u32 mtu);
-int xfrm4_mode_tunnel_input_register(struct xfrm_tunnel_notifier *handler);
-int xfrm4_mode_tunnel_input_deregister(struct xfrm_tunnel_notifier *handler);
int xfrm6_mode_tunnel_input_register(struct xfrm_tunnel_notifier *handler);
int xfrm6_mode_tunnel_input_deregister(struct xfrm_tunnel_notifier *handler);
int xfrm6_extract_header(struct sk_buff *skb);
@@ -1646,6 +1668,20 @@ static inline int xfrm_aevent_is_on(struct net *net)
rcu_read_unlock();
return ret;
}
+
+static inline int xfrm_acquire_is_on(struct net *net)
+{
+ struct sock *nlsk;
+ int ret = 0;
+
+ rcu_read_lock();
+ nlsk = rcu_dereference(net->xfrm.nlsk);
+ if (nlsk)
+ ret = netlink_has_listeners(nlsk, XFRMNLGRP_ACQUIRE);
+ rcu_read_unlock();
+
+ return ret;
+}
#endif
static inline int aead_len(struct xfrm_algo_aead *alg)
@@ -1748,4 +1784,36 @@ static inline int xfrm_mark_put(struct sk_buff *skb, const struct xfrm_mark *m)
return ret;
}
+static inline int xfrm_rcv_cb(struct sk_buff *skb, unsigned int family,
+ u8 protocol, int err)
+{
+ switch(family) {
+#ifdef CONFIG_INET
+ case AF_INET:
+ return xfrm4_rcv_cb(skb, protocol, err);
+#endif
+ }
+ return 0;
+}
+
+static inline int xfrm_tunnel_check(struct sk_buff *skb, struct xfrm_state *x,
+ unsigned int family)
+{
+ bool tunnel = false;
+
+ switch(family) {
+ case AF_INET:
+ if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4)
+ tunnel = true;
+ break;
+ case AF_INET6:
+ if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6)
+ tunnel = true;
+ break;
+ }
+ if (tunnel && !(x->outer_mode->flags & XFRM_MODE_FLAG_TUNNEL))
+ return -EINVAL;
+
+ return 0;
+}
#endif /* _NET_XFRM_H */
diff --git a/include/uapi/linux/can.h b/include/uapi/linux/can.h
index e52958d..5d9d1d14 100644
--- a/include/uapi/linux/can.h
+++ b/include/uapi/linux/can.h
@@ -8,6 +8,38 @@
* Copyright (c) 2002-2007 Volkswagen Group Electronic Research
* All rights reserved.
*
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Volkswagen nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * Alternatively, provided that this notice is retained in full, this
+ * software may be distributed under the terms of the GNU General
+ * Public License ("GPL") version 2, in which case the provisions of the
+ * GPL apply INSTEAD OF those given above.
+ *
+ * The provided data structures and external interfaces from this code
+ * are not restricted to be used by modules with a GPL compatible license.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
+ * DAMAGE.
*/
#ifndef CAN_H
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index 38dbafa..fd161e9 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -16,37 +16,97 @@
#include <linux/types.h>
#include <linux/if_ether.h>
-/* This should work for both 32 and 64 bit userland. */
+/* All structures exposed to userland should be defined such that they
+ * have the same layout for 32-bit and 64-bit userland.
+ */
+
+/**
+ * struct ethtool_cmd - link control and status
+ * @cmd: Command number = %ETHTOOL_GSET or %ETHTOOL_SSET
+ * @supported: Bitmask of %SUPPORTED_* flags for the link modes,
+ * physical connectors and other link features for which the
+ * interface supports autonegotiation or auto-detection.
+ * Read-only.
+ * @advertising: Bitmask of %ADVERTISED_* flags for the link modes,
+ * physical connectors and other link features that are
+ * advertised through autonegotiation or enabled for
+ * auto-detection.
+ * @speed: Low bits of the speed
+ * @duplex: Duplex mode; one of %DUPLEX_*
+ * @port: Physical connector type; one of %PORT_*
+ * @phy_address: MDIO address of PHY (transceiver); 0 or 255 if not
+ * applicable. For clause 45 PHYs this is the PRTAD.
+ * @transceiver: Historically used to distinguish different possible
+ * PHY types, but not in a consistent way. Deprecated.
+ * @autoneg: Enable/disable autonegotiation and auto-detection;
+ * either %AUTONEG_DISABLE or %AUTONEG_ENABLE
+ * @mdio_support: Bitmask of %ETH_MDIO_SUPPORTS_* flags for the MDIO
+ * protocols supported by the interface; 0 if unknown.
+ * Read-only.
+ * @maxtxpkt: Historically used to report TX IRQ coalescing; now
+ * obsoleted by &struct ethtool_coalesce. Read-only; deprecated.
+ * @maxrxpkt: Historically used to report RX IRQ coalescing; now
+ * obsoleted by &struct ethtool_coalesce. Read-only; deprecated.
+ * @speed_hi: High bits of the speed
+ * @eth_tp_mdix: Ethernet twisted-pair MDI(-X) status; one of
+ * %ETH_TP_MDI_*. If the status is unknown or not applicable, the
+ * value will be %ETH_TP_MDI_INVALID. Read-only.
+ * @eth_tp_mdix_ctrl: Ethernet twisted pair MDI(-X) control; one of
+ * %ETH_TP_MDI_*. If MDI(-X) control is not implemented, reads
+ * yield %ETH_TP_MDI_INVALID and writes may be ignored or rejected.
+ * When written successfully, the link should be renegotiated if
+ * necessary.
+ * @lp_advertising: Bitmask of %ADVERTISED_* flags for the link modes
+ * and other link features that the link partner advertised
+ * through autonegotiation; 0 if unknown or not applicable.
+ * Read-only.
+ *
+ * The link speed in Mbps is split between @speed and @speed_hi. Use
+ * the ethtool_cmd_speed() and ethtool_cmd_speed_set() functions to
+ * access it.
+ *
+ * If autonegotiation is disabled, the speed and @duplex represent the
+ * fixed link mode and are writable if the driver supports multiple
+ * link modes. If it is enabled then they are read-only; if the link
+ * is up they represent the negotiated link mode; if the link is down,
+ * the speed is 0, %SPEED_UNKNOWN or the highest enabled speed and
+ * @duplex is %DUPLEX_UNKNOWN or the best enabled duplex mode.
+ *
+ * Some hardware interfaces may have multiple PHYs and/or physical
+ * connectors fitted or do not allow the driver to detect which are
+ * fitted. For these interfaces @port and/or @phy_address may be
+ * writable, possibly dependent on @autoneg being %AUTONEG_DISABLE.
+ * Otherwise, attempts to write different values may be ignored or
+ * rejected.
+ *
+ * Users should assume that all fields not marked read-only are
+ * writable and subject to validation by the driver. They should use
+ * %ETHTOOL_GSET to get the current values before making specific
+ * changes and then applying them with %ETHTOOL_SSET.
+ *
+ * Drivers that implement set_settings() should validate all fields
+ * other than @cmd that are not described as read-only or deprecated,
+ * and must ignore all fields described as read-only.
+ *
+ * Deprecated fields should be ignored by both users and drivers.
+ */
struct ethtool_cmd {
__u32 cmd;
- __u32 supported; /* Features this interface supports */
- __u32 advertising; /* Features this interface advertises */
- __u16 speed; /* The forced speed (lower bits) in
- * Mbps. Please use
- * ethtool_cmd_speed()/_set() to
- * access it */
- __u8 duplex; /* Duplex, half or full */
- __u8 port; /* Which connector port */
- __u8 phy_address; /* MDIO PHY address (PRTAD for clause 45).
- * May be read-only or read-write
- * depending on the driver.
- */
- __u8 transceiver; /* Which transceiver to use */
- __u8 autoneg; /* Enable or disable autonegotiation */
- __u8 mdio_support; /* MDIO protocols supported. Read-only.
- * Not set by all drivers.
- */
- __u32 maxtxpkt; /* Tx pkts before generating tx int */
- __u32 maxrxpkt; /* Rx pkts before generating rx int */
- __u16 speed_hi; /* The forced speed (upper
- * bits) in Mbps. Please use
- * ethtool_cmd_speed()/_set() to
- * access it */
- __u8 eth_tp_mdix; /* twisted pair MDI-X status */
- __u8 eth_tp_mdix_ctrl; /* twisted pair MDI-X control, when set,
- * link should be renegotiated if necessary
- */
- __u32 lp_advertising; /* Features the link partner advertises */
+ __u32 supported;
+ __u32 advertising;
+ __u16 speed;
+ __u8 duplex;
+ __u8 port;
+ __u8 phy_address;
+ __u8 transceiver;
+ __u8 autoneg;
+ __u8 mdio_support;
+ __u32 maxtxpkt;
+ __u32 maxrxpkt;
+ __u16 speed_hi;
+ __u8 eth_tp_mdix;
+ __u8 eth_tp_mdix_ctrl;
+ __u32 lp_advertising;
__u32 reserved[2];
};
@@ -79,37 +139,68 @@ static inline __u32 ethtool_cmd_speed(const struct ethtool_cmd *ep)
#define ETHTOOL_FWVERS_LEN 32
#define ETHTOOL_BUSINFO_LEN 32
-/* these strings are set to whatever the driver author decides... */
+
+/**
+ * struct ethtool_drvinfo - general driver and device information
+ * @cmd: Command number = %ETHTOOL_GDRVINFO
+ * @driver: Driver short name. This should normally match the name
+ * in its bus driver structure (e.g. pci_driver::name). Must
+ * not be an empty string.
+ * @version: Driver version string; may be an empty string
+ * @fw_version: Firmware version string; may be an empty string
+ * @bus_info: Device bus address. This should match the dev_name()
+ * string for the underlying bus device, if there is one. May be
+ * an empty string.
+ * @n_priv_flags: Number of flags valid for %ETHTOOL_GPFLAGS and
+ * %ETHTOOL_SPFLAGS commands; also the number of strings in the
+ * %ETH_SS_PRIV_FLAGS set
+ * @n_stats: Number of u64 statistics returned by the %ETHTOOL_GSTATS
+ * command; also the number of strings in the %ETH_SS_STATS set
+ * @testinfo_len: Number of results returned by the %ETHTOOL_TEST
+ * command; also the number of strings in the %ETH_SS_TEST set
+ * @eedump_len: Size of EEPROM accessible through the %ETHTOOL_GEEPROM
+ * and %ETHTOOL_SEEPROM commands, in bytes
+ * @regdump_len: Size of register dump returned by the %ETHTOOL_GREGS
+ * command, in bytes
+ *
+ * Users can use the %ETHTOOL_GSSET_INFO command to get the number of
+ * strings in any string set (from Linux 2.6.34).
+ *
+ * Drivers should set at most @driver, @version, @fw_version and
+ * @bus_info in their get_drvinfo() implementation. The ethtool
+ * core fills in the other fields using other driver operations.
+ */
struct ethtool_drvinfo {
__u32 cmd;
- char driver[32]; /* driver short name, "tulip", "eepro100" */
- char version[32]; /* driver version string */
- char fw_version[ETHTOOL_FWVERS_LEN]; /* firmware version string */
- char bus_info[ETHTOOL_BUSINFO_LEN]; /* Bus info for this IF. */
- /* For PCI devices, use pci_name(pci_dev). */
+ char driver[32];
+ char version[32];
+ char fw_version[ETHTOOL_FWVERS_LEN];
+ char bus_info[ETHTOOL_BUSINFO_LEN];
char reserved1[32];
char reserved2[12];
- /*
- * Some struct members below are filled in
- * using ops->get_sset_count(). Obtaining
- * this info from ethtool_drvinfo is now
- * deprecated; Use ETHTOOL_GSSET_INFO
- * instead.
- */
- __u32 n_priv_flags; /* number of flags valid in ETHTOOL_GPFLAGS */
- __u32 n_stats; /* number of u64's from ETHTOOL_GSTATS */
+ __u32 n_priv_flags;
+ __u32 n_stats;
__u32 testinfo_len;
- __u32 eedump_len; /* Size of data from ETHTOOL_GEEPROM (bytes) */
- __u32 regdump_len; /* Size of data from ETHTOOL_GREGS (bytes) */
+ __u32 eedump_len;
+ __u32 regdump_len;
};
#define SOPASS_MAX 6
-/* wake-on-lan settings */
+
+/**
+ * struct ethtool_wolinfo - Wake-On-Lan configuration
+ * @cmd: Command number = %ETHTOOL_GWOL or %ETHTOOL_SWOL
+ * @supported: Bitmask of %WAKE_* flags for supported Wake-On-Lan modes.
+ * Read-only.
+ * @wolopts: Bitmask of %WAKE_* flags for enabled Wake-On-Lan modes.
+ * @sopass: SecureOn(tm) password; meaningful only if %WAKE_MAGICSECURE
+ * is set in @wolopts.
+ */
struct ethtool_wolinfo {
__u32 cmd;
__u32 supported;
__u32 wolopts;
- __u8 sopass[SOPASS_MAX]; /* SecureOn(tm) password */
+ __u8 sopass[SOPASS_MAX];
};
/* for passing single values */
@@ -118,20 +209,51 @@ struct ethtool_value {
__u32 data;
};
-/* for passing big chunks of data */
+/**
+ * struct ethtool_regs - hardware register dump
+ * @cmd: Command number = %ETHTOOL_GREGS
+ * @version: Dump format version. This is driver-specific and may
+ * distinguish different chips/revisions. Drivers must use new
+ * version numbers whenever the dump format changes in an
+ * incompatible way.
+ * @len: On entry, the real length of @data. On return, the number of
+ * bytes used.
+ * @data: Buffer for the register dump
+ *
+ * Users should use %ETHTOOL_GDRVINFO to find the maximum length of
+ * a register dump for the interface. They must allocate the buffer
+ * immediately following this structure.
+ */
struct ethtool_regs {
__u32 cmd;
- __u32 version; /* driver-specific, indicates different chips/revs */
- __u32 len; /* bytes */
+ __u32 version;
+ __u32 len;
__u8 data[0];
};
-/* for passing EEPROM chunks */
+/**
+ * struct ethtool_eeprom - EEPROM dump
+ * @cmd: Command number = %ETHTOOL_GEEPROM, %ETHTOOL_GMODULEEEPROM or
+ * %ETHTOOL_SEEPROM
+ * @magic: A 'magic cookie' value to guard against accidental changes.
+ * The value passed in to %ETHTOOL_SEEPROM must match the value
+ * returned by %ETHTOOL_GEEPROM for the same device. This is
+ * unused when @cmd is %ETHTOOL_GMODULEEEPROM.
+ * @offset: Offset within the EEPROM to begin reading/writing, in bytes
+ * @len: On entry, number of bytes to read/write. On successful
+ * return, number of bytes actually read/written. In case of
+ * error, this may indicate at what point the error occurred.
+ * @data: Buffer to read/write from
+ *
+ * Users may use %ETHTOOL_GDRVINFO or %ETHTOOL_GMODULEINFO to find
+ * the length of an on-board or module EEPROM, respectively. They
+ * must allocate the buffer immediately following this structure.
+ */
struct ethtool_eeprom {
__u32 cmd;
__u32 magic;
- __u32 offset; /* in bytes */
- __u32 len; /* in bytes */
+ __u32 offset;
+ __u32 len;
__u8 data[0];
};
@@ -229,17 +351,18 @@ struct ethtool_modinfo {
* @rate_sample_interval: How often to do adaptive coalescing packet rate
* sampling, measured in seconds. Must not be zero.
*
- * Each pair of (usecs, max_frames) fields specifies this exit
- * condition for interrupt coalescing:
+ * Each pair of (usecs, max_frames) fields specifies that interrupts
+ * should be coalesced until
* (usecs > 0 && time_since_first_completion >= usecs) ||
* (max_frames > 0 && completed_frames >= max_frames)
+ *
* It is illegal to set both usecs and max_frames to zero as this
* would cause interrupts to never be generated. To disable
* coalescing, set usecs = 0 and max_frames = 1.
*
* Some implementations ignore the value of max_frames and use the
- * condition:
- * time_since_first_completion >= usecs
+ * condition time_since_first_completion >= usecs
+ *
* This is deprecated. Drivers for hardware that does not support
* counting completions should validate that max_frames == !rx_usecs.
*
@@ -279,22 +402,37 @@ struct ethtool_coalesce {
__u32 rate_sample_interval;
};
-/* for configuring RX/TX ring parameters */
+/**
+ * struct ethtool_ringparam - RX/TX ring parameters
+ * @cmd: Command number = %ETHTOOL_GRINGPARAM or %ETHTOOL_SRINGPARAM
+ * @rx_max_pending: Maximum supported number of pending entries per
+ * RX ring. Read-only.
+ * @rx_mini_max_pending: Maximum supported number of pending entries
+ * per RX mini ring. Read-only.
+ * @rx_jumbo_max_pending: Maximum supported number of pending entries
+ * per RX jumbo ring. Read-only.
+ * @tx_max_pending: Maximum supported number of pending entries per
+ * TX ring. Read-only.
+ * @rx_pending: Current maximum number of pending entries per RX ring
+ * @rx_mini_pending: Current maximum number of pending entries per RX
+ * mini ring
+ * @rx_jumbo_pending: Current maximum number of pending entries per RX
+ * jumbo ring
+ * @tx_pending: Current maximum supported number of pending entries
+ * per TX ring
+ *
+ * If the interface does not have separate RX mini and/or jumbo rings,
+ * @rx_mini_max_pending and/or @rx_jumbo_max_pending will be 0.
+ *
+ * There may also be driver-dependent minimum values for the number
+ * of entries per ring.
+ */
struct ethtool_ringparam {
- __u32 cmd; /* ETHTOOL_{G,S}RINGPARAM */
-
- /* Read only attributes. These indicate the maximum number
- * of pending RX/TX ring entries the driver will allow the
- * user to set.
- */
+ __u32 cmd;
__u32 rx_max_pending;
__u32 rx_mini_max_pending;
__u32 rx_jumbo_max_pending;
__u32 tx_max_pending;
-
- /* Values changeable by the user. The valid values are
- * in the range 1 to the "*_max_pending" counterpart above.
- */
__u32 rx_pending;
__u32 rx_mini_pending;
__u32 rx_jumbo_pending;
@@ -329,51 +467,96 @@ struct ethtool_channels {
__u32 combined_count;
};
-/* for configuring link flow control parameters */
+/**
+ * struct ethtool_pauseparam - Ethernet pause (flow control) parameters
+ * @cmd: Command number = %ETHTOOL_GPAUSEPARAM or %ETHTOOL_SPAUSEPARAM
+ * @autoneg: Flag to enable autonegotiation of pause frame use
+ * @rx_pause: Flag to enable reception of pause frames
+ * @tx_pause: Flag to enable transmission of pause frames
+ *
+ * Drivers should reject a non-zero setting of @autoneg when
+ * autoneogotiation is disabled (or not supported) for the link.
+ *
+ * If the link is autonegotiated, drivers should use
+ * mii_advertise_flowctrl() or similar code to set the advertised
+ * pause frame capabilities based on the @rx_pause and @tx_pause flags,
+ * even if @autoneg is zero. They should also allow the advertised
+ * pause frame capabilities to be controlled directly through the
+ * advertising field of &struct ethtool_cmd.
+ *
+ * If @autoneg is non-zero, the MAC is configured to send and/or
+ * receive pause frames according to the result of autonegotiation.
+ * Otherwise, it is configured directly based on the @rx_pause and
+ * @tx_pause flags.
+ */
struct ethtool_pauseparam {
- __u32 cmd; /* ETHTOOL_{G,S}PAUSEPARAM */
-
- /* If the link is being auto-negotiated (via ethtool_cmd.autoneg
- * being true) the user may set 'autoneg' here non-zero to have the
- * pause parameters be auto-negotiated too. In such a case, the
- * {rx,tx}_pause values below determine what capabilities are
- * advertised.
- *
- * If 'autoneg' is zero or the link is not being auto-negotiated,
- * then {rx,tx}_pause force the driver to use/not-use pause
- * flow control.
- */
+ __u32 cmd;
__u32 autoneg;
__u32 rx_pause;
__u32 tx_pause;
};
#define ETH_GSTRING_LEN 32
+
+/**
+ * enum ethtool_stringset - string set ID
+ * @ETH_SS_TEST: Self-test result names, for use with %ETHTOOL_TEST
+ * @ETH_SS_STATS: Statistic names, for use with %ETHTOOL_GSTATS
+ * @ETH_SS_PRIV_FLAGS: Driver private flag names, for use with
+ * %ETHTOOL_GPFLAGS and %ETHTOOL_SPFLAGS
+ * @ETH_SS_NTUPLE_FILTERS: Previously used with %ETHTOOL_GRXNTUPLE;
+ * now deprecated
+ * @ETH_SS_FEATURES: Device feature names
+ */
enum ethtool_stringset {
ETH_SS_TEST = 0,
ETH_SS_STATS,
ETH_SS_PRIV_FLAGS,
- ETH_SS_NTUPLE_FILTERS, /* Do not use, GRXNTUPLE is now deprecated */
+ ETH_SS_NTUPLE_FILTERS,
ETH_SS_FEATURES,
};
-/* for passing string sets for data tagging */
+/**
+ * struct ethtool_gstrings - string set for data tagging
+ * @cmd: Command number = %ETHTOOL_GSTRINGS
+ * @string_set: String set ID; one of &enum ethtool_stringset
+ * @len: On return, the number of strings in the string set
+ * @data: Buffer for strings. Each string is null-padded to a size of
+ * %ETH_GSTRING_LEN.
+ *
+ * Users must use %ETHTOOL_GSSET_INFO to find the number of strings in
+ * the string set. They must allocate a buffer of the appropriate
+ * size immediately following this structure.
+ */
struct ethtool_gstrings {
- __u32 cmd; /* ETHTOOL_GSTRINGS */
- __u32 string_set; /* string set id e.c. ETH_SS_TEST, etc*/
- __u32 len; /* number of strings in the string set */
+ __u32 cmd;
+ __u32 string_set;
+ __u32 len;
__u8 data[0];
};
+/**
+ * struct ethtool_sset_info - string set information
+ * @cmd: Command number = %ETHTOOL_GSSET_INFO
+ * @sset_mask: On entry, a bitmask of string sets to query, with bits
+ * numbered according to &enum ethtool_stringset. On return, a
+ * bitmask of those string sets queried that are supported.
+ * @data: Buffer for string set sizes. On return, this contains the
+ * size of each string set that was queried and supported, in
+ * order of ID.
+ *
+ * Example: The user passes in @sset_mask = 0x7 (sets 0, 1, 2) and on
+ * return @sset_mask == 0x6 (sets 1, 2). Then @data[0] contains the
+ * size of set 1 and @data[1] contains the size of set 2.
+ *
+ * Users must allocate a buffer of the appropriate size (4 * number of
+ * sets queried) immediately following this structure.
+ */
struct ethtool_sset_info {
- __u32 cmd; /* ETHTOOL_GSSET_INFO */
+ __u32 cmd;
__u32 reserved;
- __u64 sset_mask; /* input: each bit selects an sset to query */
- /* output: each bit a returned sset */
- __u32 data[0]; /* ETH_SS_xxx count, in order, based on bits
- in sset_mask. One bit implies one
- __u32, two bits implies two
- __u32's, etc. */
+ __u64 sset_mask;
+ __u32 data[0];
};
/**
@@ -393,24 +576,58 @@ enum ethtool_test_flags {
ETH_TEST_FL_EXTERNAL_LB_DONE = (1 << 3),
};
-/* for requesting NIC test and getting results*/
+/**
+ * struct ethtool_test - device self-test invocation
+ * @cmd: Command number = %ETHTOOL_TEST
+ * @flags: A bitmask of flags from &enum ethtool_test_flags. Some
+ * flags may be set by the user on entry; others may be set by
+ * the driver on return.
+ * @len: On return, the number of test results
+ * @data: Array of test results
+ *
+ * Users must use %ETHTOOL_GSSET_INFO or %ETHTOOL_GDRVINFO to find the
+ * number of test results that will be returned. They must allocate a
+ * buffer of the appropriate size (8 * number of results) immediately
+ * following this structure.
+ */
struct ethtool_test {
- __u32 cmd; /* ETHTOOL_TEST */
- __u32 flags; /* ETH_TEST_FL_xxx */
+ __u32 cmd;
+ __u32 flags;
__u32 reserved;
- __u32 len; /* result length, in number of u64 elements */
+ __u32 len;
__u64 data[0];
};
-/* for dumping NIC-specific statistics */
+/**
+ * struct ethtool_stats - device-specific statistics
+ * @cmd: Command number = %ETHTOOL_GSTATS
+ * @n_stats: On return, the number of statistics
+ * @data: Array of statistics
+ *
+ * Users must use %ETHTOOL_GSSET_INFO or %ETHTOOL_GDRVINFO to find the
+ * number of statistics that will be returned. They must allocate a
+ * buffer of the appropriate size (8 * number of statistics)
+ * immediately following this structure.
+ */
struct ethtool_stats {
- __u32 cmd; /* ETHTOOL_GSTATS */
- __u32 n_stats; /* number of u64's being returned */
+ __u32 cmd;
+ __u32 n_stats;
__u64 data[0];
};
+/**
+ * struct ethtool_perm_addr - permanent hardware address
+ * @cmd: Command number = %ETHTOOL_GPERMADDR
+ * @size: On entry, the size of the buffer. On return, the size of the
+ * address. The command fails if the buffer is too small.
+ * @data: Buffer for the address
+ *
+ * Users must allocate the buffer immediately following this structure.
+ * A buffer size of %MAX_ADDR_LEN should be sufficient for any address
+ * type.
+ */
struct ethtool_perm_addr {
- __u32 cmd; /* ETHTOOL_GPERMADDR */
+ __u32 cmd;
__u32 size;
__u8 data[0];
};
@@ -593,7 +810,7 @@ struct ethtool_rx_flow_spec {
* %ETHTOOL_SRXCLSRLINS may add the rule at any suitable unused
* location, and may remove a rule at a later location (lower
* priority) that matches exactly the same set of flows. The special
- * values are: %RX_CLS_LOC_ANY, selecting any location;
+ * values are %RX_CLS_LOC_ANY, selecting any location;
* %RX_CLS_LOC_FIRST, selecting the first suitable location (maximum
* priority); and %RX_CLS_LOC_LAST, selecting the last suitable
* location (minimum priority). Additional special values may be
@@ -704,9 +921,6 @@ struct ethtool_flash {
* for %ETHTOOL_GET_DUMP_FLAG command
* @data: data collected for get dump data operation
*/
-
-#define ETH_FW_DUMP_DISABLE 0
-
struct ethtool_dump {
__u32 cmd;
__u32 version;
@@ -715,6 +929,8 @@ struct ethtool_dump {
__u8 data[0];
};
+#define ETH_FW_DUMP_DISABLE 0
+
/* for returning and changing feature sets */
/**
@@ -734,8 +950,9 @@ struct ethtool_get_features_block {
/**
* struct ethtool_gfeatures - command to get state of device's features
* @cmd: command number = %ETHTOOL_GFEATURES
- * @size: in: number of elements in the features[] array;
- * out: number of elements in features[] needed to hold all features
+ * @size: On entry, the number of elements in the features[] array;
+ * on return, the number of elements in features[] needed to hold
+ * all features
* @features: state of features
*/
struct ethtool_gfeatures {
@@ -905,7 +1122,6 @@ enum ethtool_sfeatures_retval_bits {
#define SPARC_ETH_GSET ETHTOOL_GSET
#define SPARC_ETH_SSET ETHTOOL_SSET
-/* Indicates what features are supported by the interface. */
#define SUPPORTED_10baseT_Half (1 << 0)
#define SUPPORTED_10baseT_Full (1 << 1)
#define SUPPORTED_100baseT_Half (1 << 2)
@@ -934,7 +1150,6 @@ enum ethtool_sfeatures_retval_bits {
#define SUPPORTED_40000baseSR4_Full (1 << 25)
#define SUPPORTED_40000baseLR4_Full (1 << 26)
-/* Indicates what features are advertised by the interface. */
#define ADVERTISED_10baseT_Half (1 << 0)
#define ADVERTISED_10baseT_Full (1 << 1)
#define ADVERTISED_100baseT_Half (1 << 2)
@@ -999,9 +1214,7 @@ enum ethtool_sfeatures_retval_bits {
#define XCVR_DUMMY2 0x03
#define XCVR_DUMMY3 0x04
-/* Enable or disable autonegotiation. If this is set to enable,
- * the forced link modes above are completely ignored.
- */
+/* Enable or disable autonegotiation. */
#define AUTONEG_DISABLE 0x00
#define AUTONEG_ENABLE 0x01
diff --git a/include/uapi/linux/if.h b/include/uapi/linux/if.h
index d758163..9cf2394 100644
--- a/include/uapi/linux/if.h
+++ b/include/uapi/linux/if.h
@@ -27,65 +27,91 @@
#define IFALIASZ 256
#include <linux/hdlc/ioctl.h>
-/* Standard interface flags (netdevice->flags). */
-#define IFF_UP 0x1 /* interface is up */
-#define IFF_BROADCAST 0x2 /* broadcast address valid */
-#define IFF_DEBUG 0x4 /* turn on debugging */
-#define IFF_LOOPBACK 0x8 /* is a loopback net */
-#define IFF_POINTOPOINT 0x10 /* interface is has p-p link */
-#define IFF_NOTRAILERS 0x20 /* avoid use of trailers */
-#define IFF_RUNNING 0x40 /* interface RFC2863 OPER_UP */
-#define IFF_NOARP 0x80 /* no ARP protocol */
-#define IFF_PROMISC 0x100 /* receive all packets */
-#define IFF_ALLMULTI 0x200 /* receive all multicast packets*/
-
-#define IFF_MASTER 0x400 /* master of a load balancer */
-#define IFF_SLAVE 0x800 /* slave of a load balancer */
-
-#define IFF_MULTICAST 0x1000 /* Supports multicast */
-
-#define IFF_PORTSEL 0x2000 /* can set media type */
-#define IFF_AUTOMEDIA 0x4000 /* auto media select active */
-#define IFF_DYNAMIC 0x8000 /* dialup device with changing addresses*/
-
-#define IFF_LOWER_UP 0x10000 /* driver signals L1 up */
-#define IFF_DORMANT 0x20000 /* driver signals dormant */
+/**
+ * enum net_device_flags - &struct net_device flags
+ *
+ * These are the &struct net_device flags, they can be set by drivers, the
+ * kernel and some can be triggered by userspace. Userspace can query and
+ * set these flags using userspace utilities but there is also a sysfs
+ * entry available for all dev flags which can be queried and set. These flags
+ * are shared for all types of net_devices. The sysfs entries are available
+ * via /sys/class/net/<dev>/flags. Flags which can be toggled through sysfs
+ * are annotated below, note that only a few flags can be toggled and some
+ * other flags are always always preserved from the original net_device flags
+ * even if you try to set them via sysfs. Flags which are always preserved
+ * are kept under the flag grouping @IFF_VOLATILE. Flags which are volatile
+ * are annotated below as such.
+ *
+ * You should have a pretty good reason to be extending these flags.
+ *
+ * @IFF_UP: interface is up. Can be toggled through sysfs.
+ * @IFF_BROADCAST: broadcast address valid. Volatile.
+ * @IFF_DEBUG: turn on debugging. Can be toggled through sysfs.
+ * @IFF_LOOPBACK: is a loopback net. Volatile.
+ * @IFF_POINTOPOINT: interface is has p-p link. Volatile.
+ * @IFF_NOTRAILERS: avoid use of trailers. Can be toggled through sysfs.
+ * Volatile.
+ * @IFF_RUNNING: interface RFC2863 OPER_UP. Volatile.
+ * @IFF_NOARP: no ARP protocol. Can be toggled through sysfs. Volatile.
+ * @IFF_PROMISC: receive all packets. Can be toggled through sysfs.
+ * @IFF_ALLMULTI: receive all multicast packets. Can be toggled through
+ * sysfs.
+ * @IFF_MASTER: master of a load balancer. Volatile.
+ * @IFF_SLAVE: slave of a load balancer. Volatile.
+ * @IFF_MULTICAST: Supports multicast. Can be toggled through sysfs.
+ * @IFF_PORTSEL: can set media type. Can be toggled through sysfs.
+ * @IFF_AUTOMEDIA: auto media select active. Can be toggled through sysfs.
+ * @IFF_DYNAMIC: dialup device with changing addresses. Can be toggled
+ * through sysfs.
+ * @IFF_LOWER_UP: driver signals L1 up. Volatile.
+ * @IFF_DORMANT: driver signals dormant. Volatile.
+ * @IFF_ECHO: echo sent packets. Volatile.
+ */
+enum net_device_flags {
+ IFF_UP = 1<<0, /* sysfs */
+ IFF_BROADCAST = 1<<1, /* volatile */
+ IFF_DEBUG = 1<<2, /* sysfs */
+ IFF_LOOPBACK = 1<<3, /* volatile */
+ IFF_POINTOPOINT = 1<<4, /* volatile */
+ IFF_NOTRAILERS = 1<<5, /* sysfs */
+ IFF_RUNNING = 1<<6, /* volatile */
+ IFF_NOARP = 1<<7, /* sysfs */
+ IFF_PROMISC = 1<<8, /* sysfs */
+ IFF_ALLMULTI = 1<<9, /* sysfs */
+ IFF_MASTER = 1<<10, /* volatile */
+ IFF_SLAVE = 1<<11, /* volatile */
+ IFF_MULTICAST = 1<<12, /* sysfs */
+ IFF_PORTSEL = 1<<13, /* sysfs */
+ IFF_AUTOMEDIA = 1<<14, /* sysfs */
+ IFF_DYNAMIC = 1<<15, /* sysfs */
+ IFF_LOWER_UP = 1<<16, /* volatile */
+ IFF_DORMANT = 1<<17, /* volatile */
+ IFF_ECHO = 1<<18, /* volatile */
+};
-#define IFF_ECHO 0x40000 /* echo sent packets */
+#define IFF_UP IFF_UP
+#define IFF_BROADCAST IFF_BROADCAST
+#define IFF_DEBUG IFF_DEBUG
+#define IFF_LOOPBACK IFF_LOOPBACK
+#define IFF_POINTOPOINT IFF_POINTOPOINT
+#define IFF_NOTRAILERS IFF_NOTRAILERS
+#define IFF_RUNNING IFF_RUNNING
+#define IFF_NOARP IFF_NOARP
+#define IFF_PROMISC IFF_PROMISC
+#define IFF_ALLMULTI IFF_ALLMULTI
+#define IFF_MASTER IFF_MASTER
+#define IFF_SLAVE IFF_SLAVE
+#define IFF_MULTICAST IFF_MULTICAST
+#define IFF_PORTSEL IFF_PORTSEL
+#define IFF_AUTOMEDIA IFF_AUTOMEDIA
+#define IFF_DYNAMIC IFF_DYNAMIC
+#define IFF_LOWER_UP IFF_LOWER_UP
+#define IFF_DORMANT IFF_DORMANT
+#define IFF_ECHO IFF_ECHO
#define IFF_VOLATILE (IFF_LOOPBACK|IFF_POINTOPOINT|IFF_BROADCAST|IFF_ECHO|\
IFF_MASTER|IFF_SLAVE|IFF_RUNNING|IFF_LOWER_UP|IFF_DORMANT)
-/* Private (from user) interface flags (netdevice->priv_flags). */
-#define IFF_802_1Q_VLAN 0x1 /* 802.1Q VLAN device. */
-#define IFF_EBRIDGE 0x2 /* Ethernet bridging device. */
-#define IFF_SLAVE_INACTIVE 0x4 /* bonding slave not the curr. active */
-#define IFF_MASTER_8023AD 0x8 /* bonding master, 802.3ad. */
-#define IFF_MASTER_ALB 0x10 /* bonding master, balance-alb. */
-#define IFF_BONDING 0x20 /* bonding master or slave */
-#define IFF_SLAVE_NEEDARP 0x40 /* need ARPs for validation */
-#define IFF_ISATAP 0x80 /* ISATAP interface (RFC4214) */
-#define IFF_MASTER_ARPMON 0x100 /* bonding master, ARP mon in use */
-#define IFF_WAN_HDLC 0x200 /* WAN HDLC device */
-#define IFF_XMIT_DST_RELEASE 0x400 /* dev_hard_start_xmit() is allowed to
- * release skb->dst
- */
-#define IFF_DONT_BRIDGE 0x800 /* disallow bridging this ether dev */
-#define IFF_DISABLE_NETPOLL 0x1000 /* disable netpoll at run-time */
-#define IFF_MACVLAN_PORT 0x2000 /* device used as macvlan port */
-#define IFF_BRIDGE_PORT 0x4000 /* device used as bridge port */
-#define IFF_OVS_DATAPATH 0x8000 /* device used as Open vSwitch
- * datapath port */
-#define IFF_TX_SKB_SHARING 0x10000 /* The interface supports sharing
- * skbs on transmit */
-#define IFF_UNICAST_FLT 0x20000 /* Supports unicast filtering */
-#define IFF_TEAM_PORT 0x40000 /* device used as team port */
-#define IFF_SUPP_NOFCS 0x80000 /* device supports sending custom FCS */
-#define IFF_LIVE_ADDR_CHANGE 0x100000 /* device supports hardware address
- * change when it's running */
-#define IFF_MACVLAN 0x200000 /* Macvlan device */
-
-
#define IF_GET_IFACE 0x0001 /* for querying only */
#define IF_GET_PROTO 0x0002
diff --git a/include/uapi/linux/if_ether.h b/include/uapi/linux/if_ether.h
index 2ce0f6a..750ba67 100644
--- a/include/uapi/linux/if_ether.h
+++ b/include/uapi/linux/if_ether.h
@@ -68,11 +68,11 @@
#define ETH_P_SLOW 0x8809 /* Slow Protocol. See 802.3ad 43B */
#define ETH_P_WCCP 0x883E /* Web-cache coordination protocol
* defined in draft-wilson-wrec-wccp-v2-00.txt */
-#define ETH_P_PPP_DISC 0x8863 /* PPPoE discovery messages */
-#define ETH_P_PPP_SES 0x8864 /* PPPoE session messages */
#define ETH_P_MPLS_UC 0x8847 /* MPLS Unicast traffic */
#define ETH_P_MPLS_MC 0x8848 /* MPLS Multicast traffic */
#define ETH_P_ATMMPOA 0x884c /* MultiProtocol Over ATM */
+#define ETH_P_PPP_DISC 0x8863 /* PPPoE discovery messages */
+#define ETH_P_PPP_SES 0x8864 /* PPPoE session messages */
#define ETH_P_LINK_CTL 0x886c /* HPNA, wlan link local tunnel */
#define ETH_P_ATMFATE 0x8884 /* Frame-based ATM Transport
* over Ethernet
@@ -89,6 +89,7 @@
#define ETH_P_FCOE 0x8906 /* Fibre Channel over Ethernet */
#define ETH_P_TDLS 0x890D /* TDLS */
#define ETH_P_FIP 0x8914 /* FCoE Initialization Protocol */
+#define ETH_P_80221 0x8917 /* IEEE 802.21 Media Independent Handover Protocol */
#define ETH_P_QINQ1 0x9100 /* deprecated QinQ VLAN [ NOT AN OFFICIALLY REGISTERED ID ] */
#define ETH_P_QINQ2 0x9200 /* deprecated QinQ VLAN [ NOT AN OFFICIALLY REGISTERED ID ] */
#define ETH_P_QINQ3 0x9300 /* deprecated QinQ VLAN [ NOT AN OFFICIALLY REGISTERED ID ] */
diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h
index 393c5de..c33a65e 100644
--- a/include/uapi/linux/in.h
+++ b/include/uapi/linux/in.h
@@ -120,6 +120,10 @@ struct in_addr {
* this socket to prevent accepting spoofed ones.
*/
#define IP_PMTUDISC_INTERFACE 4
+/* weaker version of IP_PMTUDISC_INTERFACE, which allos packets to get
+ * fragmented if they exeed the interface mtu
+ */
+#define IP_PMTUDISC_OMIT 5
#define IP_MULTICAST_IF 32
#define IP_MULTICAST_TTL 33
diff --git a/include/uapi/linux/in6.h b/include/uapi/linux/in6.h
index e9a1d2d..0d8e0f0 100644
--- a/include/uapi/linux/in6.h
+++ b/include/uapi/linux/in6.h
@@ -185,6 +185,10 @@ struct in6_flowlabel_req {
* also see comments on IP_PMTUDISC_INTERFACE
*/
#define IPV6_PMTUDISC_INTERFACE 4
+/* weaker version of IPV6_PMTUDISC_INTERFACE, which allows packets to
+ * get fragmented if they exceed the interface mtu
+ */
+#define IPV6_PMTUDISC_OMIT 5
/* Flowlabel */
#define IPV6_FLOWLABEL_MGR 32
diff --git a/include/uapi/linux/mpls.h b/include/uapi/linux/mpls.h
new file mode 100644
index 0000000..bc9abfe
--- /dev/null
+++ b/include/uapi/linux/mpls.h
@@ -0,0 +1,34 @@
+#ifndef _UAPI_MPLS_H
+#define _UAPI_MPLS_H
+
+#include <linux/types.h>
+#include <asm/byteorder.h>
+
+/* Reference: RFC 5462, RFC 3032
+ *
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Label | TC |S| TTL |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * Label: Label Value, 20 bits
+ * TC: Traffic Class field, 3 bits
+ * S: Bottom of Stack, 1 bit
+ * TTL: Time to Live, 8 bits
+ */
+
+struct mpls_label {
+ __be32 entry;
+};
+
+#define MPLS_LS_LABEL_MASK 0xFFFFF000
+#define MPLS_LS_LABEL_SHIFT 12
+#define MPLS_LS_TC_MASK 0x00000E00
+#define MPLS_LS_TC_SHIFT 9
+#define MPLS_LS_S_MASK 0x00000100
+#define MPLS_LS_S_SHIFT 8
+#define MPLS_LS_TTL_MASK 0x000000FF
+#define MPLS_LS_TTL_SHIFT 0
+
+#endif /* _UAPI_MPLS_H */
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 91054fd..a12e6ca 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -418,8 +418,18 @@
* %NL80211_ATTR_SSID attribute, and can optionally specify the association
* IEs in %NL80211_ATTR_IE, %NL80211_ATTR_AUTH_TYPE, %NL80211_ATTR_USE_MFP,
* %NL80211_ATTR_MAC, %NL80211_ATTR_WIPHY_FREQ, %NL80211_ATTR_CONTROL_PORT,
- * %NL80211_ATTR_CONTROL_PORT_ETHERTYPE and
- * %NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT.
+ * %NL80211_ATTR_CONTROL_PORT_ETHERTYPE,
+ * %NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT, %NL80211_ATTR_MAC_HINT, and
+ * %NL80211_ATTR_WIPHY_FREQ_HINT.
+ * If included, %NL80211_ATTR_MAC and %NL80211_ATTR_WIPHY_FREQ are
+ * restrictions on BSS selection, i.e., they effectively prevent roaming
+ * within the ESS. %NL80211_ATTR_MAC_HINT and %NL80211_ATTR_WIPHY_FREQ_HINT
+ * can be included to provide a recommendation of the initial BSS while
+ * allowing the driver to roam to other BSSes within the ESS and also to
+ * ignore this recommendation if the indicated BSS is not ideal. Only one
+ * set of BSSID,frequency parameters is used (i.e., either the enforcing
+ * %NL80211_ATTR_MAC,%NL80211_ATTR_WIPHY_FREQ or the less strict
+ * %NL80211_ATTR_MAC_HINT and %NL80211_ATTR_WIPHY_FREQ_HINT).
* Background scan period can optionally be
* specified in %NL80211_ATTR_BG_SCAN_PERIOD,
* if not specified default background scan configuration
@@ -1555,6 +1565,16 @@ enum nl80211_commands {
* data is in the format defined for the payload of the QoS Map Set element
* in IEEE Std 802.11-2012, 8.4.2.97.
*
+ * @NL80211_ATTR_MAC_HINT: MAC address recommendation as initial BSS
+ * @NL80211_ATTR_WIPHY_FREQ_HINT: frequency of the recommended initial BSS
+ *
+ * @NL80211_ATTR_MAX_AP_ASSOC_STA: Device attribute that indicates how many
+ * associated stations are supported in AP mode (including P2P GO); u32.
+ * Since drivers may not have a fixed limit on the maximum number (e.g.,
+ * other concurrent operations may affect this), drivers are allowed to
+ * advertise values that cannot always be met. In such cases, an attempt
+ * to add a new station entry with @NL80211_CMD_NEW_STATION may fail.
+ *
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
*/
@@ -1883,6 +1903,11 @@ enum nl80211_attrs {
NL80211_ATTR_QOS_MAP,
+ NL80211_ATTR_MAC_HINT,
+ NL80211_ATTR_WIPHY_FREQ_HINT,
+
+ NL80211_ATTR_MAX_AP_ASSOC_STA,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -2412,7 +2437,10 @@ enum nl80211_reg_type {
* in KHz. This is not a center a frequency but an actual regulatory
* band edge.
* @NL80211_ATTR_FREQ_RANGE_MAX_BW: maximum allowed bandwidth for this
- * frequency range, in KHz.
+ * frequency range, in KHz. If not present or 0, maximum available
+ * bandwidth should be calculated base on contiguous rules and wider
+ * channels will be allowed to cross multiple contiguous/overlapping
+ * frequency ranges.
* @NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN: the maximum allowed antenna gain
* for a given frequency range. The value is in mBi (100 * dBi).
* If you don't have one then don't send this.
@@ -2442,9 +2470,15 @@ enum nl80211_reg_rule_attr {
* enum nl80211_sched_scan_match_attr - scheduled scan match attributes
* @__NL80211_SCHED_SCAN_MATCH_ATTR_INVALID: attribute number 0 is reserved
* @NL80211_SCHED_SCAN_MATCH_ATTR_SSID: SSID to be used for matching,
- * only report BSS with matching SSID.
+ * only report BSS with matching SSID.
* @NL80211_SCHED_SCAN_MATCH_ATTR_RSSI: RSSI threshold (in dBm) for reporting a
- * BSS in scan results. Filtering is turned off if not specified.
+ * BSS in scan results. Filtering is turned off if not specified. Note that
+ * if this attribute is in a match set of its own, then it is treated as
+ * the default value for all matchsets with an SSID, rather than being a
+ * matchset of its own without an RSSI filter. This is due to problems with
+ * how this API was implemented in the past. Also, due to the same problem,
+ * the only way to create a matchset with only an RSSI filter (with this
+ * attribute) is if there's only a single matchset with the RSSI attribute.
* @NL80211_SCHED_SCAN_MATCH_ATTR_MAX: highest scheduled scan filter
* attribute number currently defined
* @__NL80211_SCHED_SCAN_MATCH_ATTR_AFTER_LAST: internal use
@@ -3131,6 +3165,7 @@ enum nl80211_key_attributes {
* in an array of MCS numbers.
* @NL80211_TXRATE_VHT: VHT rates allowed for TX rate selection,
* see &struct nl80211_txrate_vht
+ * @NL80211_TXRATE_GI: configure GI, see &enum nl80211_txrate_gi
* @__NL80211_TXRATE_AFTER_LAST: internal
* @NL80211_TXRATE_MAX: highest TX rate attribute
*/
@@ -3139,6 +3174,7 @@ enum nl80211_tx_rate_attributes {
NL80211_TXRATE_LEGACY,
NL80211_TXRATE_HT,
NL80211_TXRATE_VHT,
+ NL80211_TXRATE_GI,
/* keep last */
__NL80211_TXRATE_AFTER_LAST,
@@ -3156,6 +3192,12 @@ struct nl80211_txrate_vht {
__u16 mcs[NL80211_VHT_NSS_MAX];
};
+enum nl80211_txrate_gi {
+ NL80211_TXRATE_DEFAULT_GI,
+ NL80211_TXRATE_FORCE_SGI,
+ NL80211_TXRATE_FORCE_LGI,
+};
+
/**
* enum nl80211_band - Frequency band
* @NL80211_BAND_2GHZ: 2.4 GHz ISM band
diff --git a/include/uapi/linux/pfkeyv2.h b/include/uapi/linux/pfkeyv2.h
index 0b80c80..ada7f01 100644
--- a/include/uapi/linux/pfkeyv2.h
+++ b/include/uapi/linux/pfkeyv2.h
@@ -235,6 +235,18 @@ struct sadb_x_kmaddress {
} __attribute__((packed));
/* sizeof(struct sadb_x_kmaddress) == 8 */
+/* To specify the SA dump filter */
+struct sadb_x_filter {
+ __u16 sadb_x_filter_len;
+ __u16 sadb_x_filter_exttype;
+ __u32 sadb_x_filter_saddr[4];
+ __u32 sadb_x_filter_daddr[4];
+ __u16 sadb_x_filter_family;
+ __u8 sadb_x_filter_splen;
+ __u8 sadb_x_filter_dplen;
+} __attribute__((packed));
+/* sizeof(struct sadb_x_filter) == 40 */
+
/* Message types */
#define SADB_RESERVED 0
#define SADB_GETSPI 1
@@ -358,7 +370,8 @@ struct sadb_x_kmaddress {
#define SADB_X_EXT_SEC_CTX 24
/* Used with MIGRATE to pass @ to IKE for negotiation */
#define SADB_X_EXT_KMADDRESS 25
-#define SADB_EXT_MAX 25
+#define SADB_X_EXT_FILTER 26
+#define SADB_EXT_MAX 26
/* Identity Extension values */
#define SADB_IDENTTYPE_RESERVED 0
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index bbaba22..df40137 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -252,6 +252,7 @@ enum
LINUX_MIB_TCPCHALLENGEACK, /* TCPChallengeACK */
LINUX_MIB_TCPSYNCHALLENGE, /* TCPSYNChallenge */
LINUX_MIB_TCPFASTOPENACTIVE, /* TCPFastOpenActive */
+ LINUX_MIB_TCPFASTOPENACTIVEFAIL, /* TCPFastOpenActiveFail */
LINUX_MIB_TCPFASTOPENPASSIVE, /* TCPFastOpenPassive*/
LINUX_MIB_TCPFASTOPENPASSIVEFAIL, /* TCPFastOpenPassiveFail */
LINUX_MIB_TCPFASTOPENLISTENOVERFLOW, /* TCPFastOpenListenOverflow */
@@ -259,6 +260,11 @@ enum
LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES, /* TCPSpuriousRtxHostQueues */
LINUX_MIB_BUSYPOLLRXPACKETS, /* BusyPollRxPackets */
LINUX_MIB_TCPAUTOCORKING, /* TCPAutoCorking */
+ LINUX_MIB_TCPFROMZEROWINDOWADV, /* TCPFromZeroWindowAdv */
+ LINUX_MIB_TCPTOZEROWINDOWADV, /* TCPToZeroWindowAdv */
+ LINUX_MIB_TCPWANTZEROWINDOWADV, /* TCPWantZeroWindowAdv */
+ LINUX_MIB_TCPSYNRETRANS, /* TCPSynRetrans */
+ LINUX_MIB_TCPORIGDATASENT, /* TCPOrigDataSent */
__LINUX_MIB_MAX
};
diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h
index 377f1e5..3b97183 100644
--- a/include/uapi/linux/tcp.h
+++ b/include/uapi/linux/tcp.h
@@ -186,6 +186,9 @@ struct tcp_info {
__u32 tcpi_rcv_space;
__u32 tcpi_total_retrans;
+
+ __u64 tcpi_pacing_rate;
+ __u64 tcpi_max_pacing_rate;
};
/* for TCP_MD5SIG socket option */
diff --git a/include/uapi/linux/tcp_metrics.h b/include/uapi/linux/tcp_metrics.h
index 54a37b1..9353392 100644
--- a/include/uapi/linux/tcp_metrics.h
+++ b/include/uapi/linux/tcp_metrics.h
@@ -11,12 +11,15 @@
#define TCP_METRICS_GENL_VERSION 0x1
enum tcp_metric_index {
- TCP_METRIC_RTT,
- TCP_METRIC_RTTVAR,
+ TCP_METRIC_RTT, /* in ms units */
+ TCP_METRIC_RTTVAR, /* in ms units */
TCP_METRIC_SSTHRESH,
TCP_METRIC_CWND,
TCP_METRIC_REORDERING,
+ TCP_METRIC_RTT_US, /* in usec units */
+ TCP_METRIC_RTTVAR_US, /* in usec units */
+
/* Always last. */
__TCP_METRIC_MAX,
};
diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h
index a8cd6a4..6550c67 100644
--- a/include/uapi/linux/xfrm.h
+++ b/include/uapi/linux/xfrm.h
@@ -298,6 +298,8 @@ enum xfrm_attr_type_t {
XFRMA_TFCPAD, /* __u32 */
XFRMA_REPLAY_ESN_VAL, /* struct xfrm_replay_esn */
XFRMA_SA_EXTRA_FLAGS, /* __u32 */
+ XFRMA_PROTO, /* __u8 */
+ XFRMA_FILTER, /* struct xfrm_filter */
__XFRMA_MAX
#define XFRMA_MAX (__XFRMA_MAX - 1)
@@ -474,6 +476,14 @@ struct xfrm_user_mapping {
__be16 new_sport;
};
+struct xfrm_filter {
+ xfrm_address_t saddr;
+ xfrm_address_t daddr;
+ __u16 family;
+ __u8 splen;
+ __u8 dplen;
+};
+
#ifndef __KERNEL__
/* backwards compatibility for userspace */
#define XFRMGRP_ACQUIRE 1
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index de51c48..566adbf 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -556,7 +556,7 @@ static const struct net_device_ops vlan_netdev_ops;
static int vlan_dev_init(struct net_device *dev)
{
struct net_device *real_dev = vlan_dev_priv(dev)->real_dev;
- int subclass = 0, i;
+ int subclass = 0;
netif_carrier_off(dev);
@@ -606,17 +606,10 @@ static int vlan_dev_init(struct net_device *dev)
vlan_dev_set_lockdep_class(dev, subclass);
- vlan_dev_priv(dev)->vlan_pcpu_stats = alloc_percpu(struct vlan_pcpu_stats);
+ vlan_dev_priv(dev)->vlan_pcpu_stats = netdev_alloc_pcpu_stats(struct vlan_pcpu_stats);
if (!vlan_dev_priv(dev)->vlan_pcpu_stats)
return -ENOMEM;
- for_each_possible_cpu(i) {
- struct vlan_pcpu_stats *vlan_stat;
- vlan_stat = per_cpu_ptr(vlan_dev_priv(dev)->vlan_pcpu_stats, i);
- u64_stats_init(&vlan_stat->syncp);
- }
-
-
return 0;
}
diff --git a/net/appletalk/aarp.c b/net/appletalk/aarp.c
index d27b86d..d1c55d8 100644
--- a/net/appletalk/aarp.c
+++ b/net/appletalk/aarp.c
@@ -926,7 +926,7 @@ static struct aarp_entry *iter_next(struct aarp_iter_state *iter, loff_t *pos)
struct aarp_entry *entry;
rescan:
- while(ct < AARP_HASH_SIZE) {
+ while (ct < AARP_HASH_SIZE) {
for (entry = table[ct]; entry; entry = entry->next) {
if (!pos || ++off == *pos) {
iter->table = table;
@@ -995,7 +995,7 @@ static const char *dt2str(unsigned long ticks)
{
static char buf[32];
- sprintf(buf, "%ld.%02ld", ticks / HZ, ((ticks % HZ) * 100 ) / HZ);
+ sprintf(buf, "%ld.%02ld", ticks / HZ, ((ticks % HZ) * 100) / HZ);
return buf;
}
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 02806c6..786ee2f 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -293,7 +293,7 @@ static int atif_probe_device(struct atalk_iface *atif)
/* Perform AARP probing for a proxy address */
static int atif_proxy_probe_device(struct atalk_iface *atif,
- struct atalk_addr* proxy_addr)
+ struct atalk_addr *proxy_addr)
{
int netrange = ntohs(atif->nets.nr_lastnet) -
ntohs(atif->nets.nr_firstnet) + 1;
@@ -581,7 +581,7 @@ out:
}
/* Delete a route. Find it and discard it */
-static int atrtr_delete(struct atalk_addr * addr)
+static int atrtr_delete(struct atalk_addr *addr)
{
struct atalk_route **r = &atalk_routes;
int retval = 0;
@@ -936,11 +936,11 @@ static unsigned long atalk_sum_skb(const struct sk_buff *skb, int offset,
int i, copy;
/* checksum stuff in header space */
- if ( (copy = start - offset) > 0) {
+ if ((copy = start - offset) > 0) {
if (copy > len)
copy = len;
sum = atalk_sum_partial(skb->data + offset, copy, sum);
- if ( (len -= copy) == 0)
+ if ((len -= copy) == 0)
return sum;
offset += copy;
@@ -1151,7 +1151,7 @@ static int atalk_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
goto out;
at->src_net = addr->sat_addr.s_net = ap->s_net;
- at->src_node = addr->sat_addr.s_node= ap->s_node;
+ at->src_node = addr->sat_addr.s_node = ap->s_node;
} else {
err = -EADDRNOTAVAIL;
if (!atalk_find_interface(addr->sat_addr.s_net,
@@ -1790,53 +1790,53 @@ static int atalk_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
void __user *argp = (void __user *)arg;
switch (cmd) {
- /* Protocol layer */
- case TIOCOUTQ: {
- long amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
+ /* Protocol layer */
+ case TIOCOUTQ: {
+ long amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk);
- if (amount < 0)
- amount = 0;
- rc = put_user(amount, (int __user *)argp);
- break;
- }
- case TIOCINQ: {
- /*
- * These two are safe on a single CPU system as only
- * user tasks fiddle here
- */
- struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
- long amount = 0;
+ if (amount < 0)
+ amount = 0;
+ rc = put_user(amount, (int __user *)argp);
+ break;
+ }
+ case TIOCINQ: {
+ /*
+ * These two are safe on a single CPU system as only
+ * user tasks fiddle here
+ */
+ struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
+ long amount = 0;
- if (skb)
- amount = skb->len - sizeof(struct ddpehdr);
- rc = put_user(amount, (int __user *)argp);
- break;
- }
- case SIOCGSTAMP:
- rc = sock_get_timestamp(sk, argp);
- break;
- case SIOCGSTAMPNS:
- rc = sock_get_timestampns(sk, argp);
- break;
- /* Routing */
- case SIOCADDRT:
- case SIOCDELRT:
- rc = -EPERM;
- if (capable(CAP_NET_ADMIN))
- rc = atrtr_ioctl(cmd, argp);
- break;
- /* Interface */
- case SIOCGIFADDR:
- case SIOCSIFADDR:
- case SIOCGIFBRDADDR:
- case SIOCATALKDIFADDR:
- case SIOCDIFADDR:
- case SIOCSARP: /* proxy AARP */
- case SIOCDARP: /* proxy AARP */
- rtnl_lock();
- rc = atif_ioctl(cmd, argp);
- rtnl_unlock();
- break;
+ if (skb)
+ amount = skb->len - sizeof(struct ddpehdr);
+ rc = put_user(amount, (int __user *)argp);
+ break;
+ }
+ case SIOCGSTAMP:
+ rc = sock_get_timestamp(sk, argp);
+ break;
+ case SIOCGSTAMPNS:
+ rc = sock_get_timestampns(sk, argp);
+ break;
+ /* Routing */
+ case SIOCADDRT:
+ case SIOCDELRT:
+ rc = -EPERM;
+ if (capable(CAP_NET_ADMIN))
+ rc = atrtr_ioctl(cmd, argp);
+ break;
+ /* Interface */
+ case SIOCGIFADDR:
+ case SIOCSIFADDR:
+ case SIOCGIFBRDADDR:
+ case SIOCATALKDIFADDR:
+ case SIOCDIFADDR:
+ case SIOCSARP: /* proxy AARP */
+ case SIOCDARP: /* proxy AARP */
+ rtnl_lock();
+ rc = atif_ioctl(cmd, argp);
+ rtnl_unlock();
+ break;
}
return rc;
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 55cf226..d7fafc1 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -389,8 +389,6 @@ out:
batadv_neigh_ifinfo_free_ref(router_gw_tq);
if (router_orig_tq)
batadv_neigh_ifinfo_free_ref(router_orig_tq);
-
- return;
}
/**
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index 63f0455..b063050 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -88,18 +88,11 @@ out:
static int br_dev_init(struct net_device *dev)
{
struct net_bridge *br = netdev_priv(dev);
- int i;
- br->stats = alloc_percpu(struct pcpu_sw_netstats);
+ br->stats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!br->stats)
return -ENOMEM;
- for_each_possible_cpu(i) {
- struct pcpu_sw_netstats *br_dev_stats;
- br_dev_stats = per_cpu_ptr(br->stats, i);
- u64_stats_init(&br_dev_stats->syncp);
- }
-
return 0;
}
@@ -374,7 +367,7 @@ void br_dev_setup(struct net_device *dev)
br->bridge_id.prio[0] = 0x80;
br->bridge_id.prio[1] = 0x00;
- memcpy(br->group_addr, eth_reserved_addr_base, ETH_ALEN);
+ ether_addr_copy(br->group_addr, eth_reserved_addr_base);
br->stp_enabled = BR_NO_STP;
br->group_fwd_mask = BR_GROUPFWD_DEFAULT;
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index ef66365..c97c3c8 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -363,7 +363,7 @@ static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br,
skb_reset_mac_header(skb);
eth = eth_hdr(skb);
- memcpy(eth->h_source, br->dev->dev_addr, ETH_ALEN);
+ ether_addr_copy(eth->h_source, br->dev->dev_addr);
eth->h_dest[0] = 1;
eth->h_dest[1] = 0;
eth->h_dest[2] = 0x5e;
@@ -433,7 +433,7 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
skb_reset_mac_header(skb);
eth = eth_hdr(skb);
- memcpy(eth->h_source, br->dev->dev_addr, ETH_ALEN);
+ ether_addr_copy(eth->h_source, br->dev->dev_addr);
eth->h_proto = htons(ETH_P_IPV6);
skb_put(skb, sizeof(*eth));
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index b008c59..df0f114 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -506,7 +506,7 @@ bridged_dnat:
1);
return 0;
}
- memcpy(eth_hdr(skb)->h_dest, dev->dev_addr, ETH_ALEN);
+ ether_addr_copy(eth_hdr(skb)->h_dest, dev->dev_addr);
skb->pkt_type = PACKET_HOST;
}
} else {
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 3ba11bc..e1ca1dc 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -46,12 +46,12 @@ typedef __u16 port_id;
struct bridge_id
{
unsigned char prio[2];
- unsigned char addr[6];
+ unsigned char addr[ETH_ALEN];
};
struct mac_addr
{
- unsigned char addr[6];
+ unsigned char addr[ETH_ALEN];
};
struct br_ip
diff --git a/net/bridge/netfilter/ebt_among.c b/net/bridge/netfilter/ebt_among.c
index 3fb3c84..9024283 100644
--- a/net/bridge/netfilter/ebt_among.c
+++ b/net/bridge/netfilter/ebt_among.c
@@ -28,7 +28,7 @@ static bool ebt_mac_wormhash_contains(const struct ebt_mac_wormhash *wh,
uint32_t cmp[2] = { 0, 0 };
int key = ((const unsigned char *)mac)[5];
- memcpy(((char *) cmp) + 2, mac, ETH_ALEN);
+ ether_addr_copy(((char *) cmp) + 2, mac);
start = wh->table[key];
limit = wh->table[key + 1];
if (ip) {
diff --git a/net/bridge/netfilter/ebt_dnat.c b/net/bridge/netfilter/ebt_dnat.c
index c59f7bf..4e0b0c3 100644
--- a/net/bridge/netfilter/ebt_dnat.c
+++ b/net/bridge/netfilter/ebt_dnat.c
@@ -22,7 +22,7 @@ ebt_dnat_tg(struct sk_buff *skb, const struct xt_action_param *par)
if (!skb_make_writable(skb, 0))
return EBT_DROP;
- memcpy(eth_hdr(skb)->h_dest, info->mac, ETH_ALEN);
+ ether_addr_copy(eth_hdr(skb)->h_dest, info->mac);
return info->target;
}
diff --git a/net/bridge/netfilter/ebt_redirect.c b/net/bridge/netfilter/ebt_redirect.c
index 46624bb..20396499 100644
--- a/net/bridge/netfilter/ebt_redirect.c
+++ b/net/bridge/netfilter/ebt_redirect.c
@@ -25,10 +25,10 @@ ebt_redirect_tg(struct sk_buff *skb, const struct xt_action_param *par)
if (par->hooknum != NF_BR_BROUTING)
/* rcu_read_lock()ed by nf_hook_slow */
- memcpy(eth_hdr(skb)->h_dest,
- br_port_get_rcu(par->in)->br->dev->dev_addr, ETH_ALEN);
+ ether_addr_copy(eth_hdr(skb)->h_dest,
+ br_port_get_rcu(par->in)->br->dev->dev_addr);
else
- memcpy(eth_hdr(skb)->h_dest, par->in->dev_addr, ETH_ALEN);
+ ether_addr_copy(eth_hdr(skb)->h_dest, par->in->dev_addr);
skb->pkt_type = PACKET_HOST;
return info->target;
}
diff --git a/net/bridge/netfilter/ebt_snat.c b/net/bridge/netfilter/ebt_snat.c
index 0f6b118..e56ccd0 100644
--- a/net/bridge/netfilter/ebt_snat.c
+++ b/net/bridge/netfilter/ebt_snat.c
@@ -24,7 +24,7 @@ ebt_snat_tg(struct sk_buff *skb, const struct xt_action_param *par)
if (!skb_make_writable(skb, 0))
return EBT_DROP;
- memcpy(eth_hdr(skb)->h_source, info->mac, ETH_ALEN);
+ ether_addr_copy(eth_hdr(skb)->h_source, info->mac);
if (!(info->target & NAT_ARP_BIT) &&
eth_hdr(skb)->h_proto == htons(ETH_P_ARP)) {
const struct arphdr *ap;
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 0676f2b..82750f9 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -2082,7 +2082,6 @@ bad:
pr_err("osdc handle_map corrupt msg\n");
ceph_msg_dump(msg);
up_write(&osdc->map_sem);
- return;
}
/*
@@ -2281,7 +2280,6 @@ done_err:
bad:
pr_err("osdc handle_watch_notify corrupt msg\n");
- return;
}
/*
diff --git a/net/core/flow.c b/net/core/flow.c
index dfa602c..344a184 100644
--- a/net/core/flow.c
+++ b/net/core/flow.c
@@ -24,6 +24,7 @@
#include <net/flow.h>
#include <linux/atomic.h>
#include <linux/security.h>
+#include <net/net_namespace.h>
struct flow_cache_entry {
union {
@@ -38,37 +39,12 @@ struct flow_cache_entry {
struct flow_cache_object *object;
};
-struct flow_cache_percpu {
- struct hlist_head *hash_table;
- int hash_count;
- u32 hash_rnd;
- int hash_rnd_recalc;
- struct tasklet_struct flush_tasklet;
-};
-
struct flow_flush_info {
struct flow_cache *cache;
atomic_t cpuleft;
struct completion completion;
};
-struct flow_cache {
- u32 hash_shift;
- struct flow_cache_percpu __percpu *percpu;
- struct notifier_block hotcpu_notifier;
- int low_watermark;
- int high_watermark;
- struct timer_list rnd_timer;
-};
-
-atomic_t flow_cache_genid = ATOMIC_INIT(0);
-EXPORT_SYMBOL(flow_cache_genid);
-static struct flow_cache flow_cache_global;
-static struct kmem_cache *flow_cachep __read_mostly;
-
-static DEFINE_SPINLOCK(flow_cache_gc_lock);
-static LIST_HEAD(flow_cache_gc_list);
-
#define flow_cache_hash_size(cache) (1 << (cache)->hash_shift)
#define FLOW_HASH_RND_PERIOD (10 * 60 * HZ)
@@ -84,46 +60,50 @@ static void flow_cache_new_hashrnd(unsigned long arg)
add_timer(&fc->rnd_timer);
}
-static int flow_entry_valid(struct flow_cache_entry *fle)
+static int flow_entry_valid(struct flow_cache_entry *fle,
+ struct netns_xfrm *xfrm)
{
- if (atomic_read(&flow_cache_genid) != fle->genid)
+ if (atomic_read(&xfrm->flow_cache_genid) != fle->genid)
return 0;
if (fle->object && !fle->object->ops->check(fle->object))
return 0;
return 1;
}
-static void flow_entry_kill(struct flow_cache_entry *fle)
+static void flow_entry_kill(struct flow_cache_entry *fle,
+ struct netns_xfrm *xfrm)
{
if (fle->object)
fle->object->ops->delete(fle->object);
- kmem_cache_free(flow_cachep, fle);
+ kmem_cache_free(xfrm->flow_cachep, fle);
}
static void flow_cache_gc_task(struct work_struct *work)
{
struct list_head gc_list;
struct flow_cache_entry *fce, *n;
+ struct netns_xfrm *xfrm = container_of(work, struct netns_xfrm,
+ flow_cache_gc_work);
INIT_LIST_HEAD(&gc_list);
- spin_lock_bh(&flow_cache_gc_lock);
- list_splice_tail_init(&flow_cache_gc_list, &gc_list);
- spin_unlock_bh(&flow_cache_gc_lock);
+ spin_lock_bh(&xfrm->flow_cache_gc_lock);
+ list_splice_tail_init(&xfrm->flow_cache_gc_list, &gc_list);
+ spin_unlock_bh(&xfrm->flow_cache_gc_lock);
list_for_each_entry_safe(fce, n, &gc_list, u.gc_list)
- flow_entry_kill(fce);
+ flow_entry_kill(fce, xfrm);
}
-static DECLARE_WORK(flow_cache_gc_work, flow_cache_gc_task);
static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp,
- int deleted, struct list_head *gc_list)
+ int deleted, struct list_head *gc_list,
+ struct netns_xfrm *xfrm)
{
if (deleted) {
fcp->hash_count -= deleted;
- spin_lock_bh(&flow_cache_gc_lock);
- list_splice_tail(gc_list, &flow_cache_gc_list);
- spin_unlock_bh(&flow_cache_gc_lock);
- schedule_work(&flow_cache_gc_work);
+ spin_lock_bh(&xfrm->flow_cache_gc_lock);
+ list_splice_tail(gc_list, &xfrm->flow_cache_gc_list);
+ spin_unlock_bh(&xfrm->flow_cache_gc_lock);
+ schedule_work(&xfrm->flow_cache_gc_work);
}
}
@@ -135,6 +115,8 @@ static void __flow_cache_shrink(struct flow_cache *fc,
struct hlist_node *tmp;
LIST_HEAD(gc_list);
int i, deleted = 0;
+ struct netns_xfrm *xfrm = container_of(fc, struct netns_xfrm,
+ flow_cache_global);
for (i = 0; i < flow_cache_hash_size(fc); i++) {
int saved = 0;
@@ -142,7 +124,7 @@ static void __flow_cache_shrink(struct flow_cache *fc,
hlist_for_each_entry_safe(fle, tmp,
&fcp->hash_table[i], u.hlist) {
if (saved < shrink_to &&
- flow_entry_valid(fle)) {
+ flow_entry_valid(fle, xfrm)) {
saved++;
} else {
deleted++;
@@ -152,7 +134,7 @@ static void __flow_cache_shrink(struct flow_cache *fc,
}
}
- flow_cache_queue_garbage(fcp, deleted, &gc_list);
+ flow_cache_queue_garbage(fcp, deleted, &gc_list, xfrm);
}
static void flow_cache_shrink(struct flow_cache *fc,
@@ -208,7 +190,7 @@ struct flow_cache_object *
flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
flow_resolve_t resolver, void *ctx)
{
- struct flow_cache *fc = &flow_cache_global;
+ struct flow_cache *fc = &net->xfrm.flow_cache_global;
struct flow_cache_percpu *fcp;
struct flow_cache_entry *fle, *tfle;
struct flow_cache_object *flo;
@@ -248,7 +230,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
if (fcp->hash_count > fc->high_watermark)
flow_cache_shrink(fc, fcp);
- fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC);
+ fle = kmem_cache_alloc(net->xfrm.flow_cachep, GFP_ATOMIC);
if (fle) {
fle->net = net;
fle->family = family;
@@ -258,7 +240,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
fcp->hash_count++;
}
- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
+ } else if (likely(fle->genid == atomic_read(&net->xfrm.flow_cache_genid))) {
flo = fle->object;
if (!flo)
goto ret_object;
@@ -279,7 +261,7 @@ nocache:
}
flo = resolver(net, key, family, dir, flo, ctx);
if (fle) {
- fle->genid = atomic_read(&flow_cache_genid);
+ fle->genid = atomic_read(&net->xfrm.flow_cache_genid);
if (!IS_ERR(flo))
fle->object = flo;
else
@@ -303,12 +285,14 @@ static void flow_cache_flush_tasklet(unsigned long data)
struct hlist_node *tmp;
LIST_HEAD(gc_list);
int i, deleted = 0;
+ struct netns_xfrm *xfrm = container_of(fc, struct netns_xfrm,
+ flow_cache_global);
fcp = this_cpu_ptr(fc->percpu);
for (i = 0; i < flow_cache_hash_size(fc); i++) {
hlist_for_each_entry_safe(fle, tmp,
&fcp->hash_table[i], u.hlist) {
- if (flow_entry_valid(fle))
+ if (flow_entry_valid(fle, xfrm))
continue;
deleted++;
@@ -317,7 +301,7 @@ static void flow_cache_flush_tasklet(unsigned long data)
}
}
- flow_cache_queue_garbage(fcp, deleted, &gc_list);
+ flow_cache_queue_garbage(fcp, deleted, &gc_list, xfrm);
if (atomic_dec_and_test(&info->cpuleft))
complete(&info->completion);
@@ -351,10 +335,9 @@ static void flow_cache_flush_per_cpu(void *data)
tasklet_schedule(tasklet);
}
-void flow_cache_flush(void)
+void flow_cache_flush(struct net *net)
{
struct flow_flush_info info;
- static DEFINE_MUTEX(flow_flush_sem);
cpumask_var_t mask;
int i, self;
@@ -365,8 +348,8 @@ void flow_cache_flush(void)
/* Don't want cpus going down or up during this. */
get_online_cpus();
- mutex_lock(&flow_flush_sem);
- info.cache = &flow_cache_global;
+ mutex_lock(&net->xfrm.flow_flush_sem);
+ info.cache = &net->xfrm.flow_cache_global;
for_each_online_cpu(i)
if (!flow_cache_percpu_empty(info.cache, i))
cpumask_set_cpu(i, mask);
@@ -386,21 +369,23 @@ void flow_cache_flush(void)
wait_for_completion(&info.completion);
done:
- mutex_unlock(&flow_flush_sem);
+ mutex_unlock(&net->xfrm.flow_flush_sem);
put_online_cpus();
free_cpumask_var(mask);
}
static void flow_cache_flush_task(struct work_struct *work)
{
- flow_cache_flush();
-}
+ struct netns_xfrm *xfrm = container_of(work, struct netns_xfrm,
+ flow_cache_gc_work);
+ struct net *net = container_of(xfrm, struct net, xfrm);
-static DECLARE_WORK(flow_cache_flush_work, flow_cache_flush_task);
+ flow_cache_flush(net);
+}
-void flow_cache_flush_deferred(void)
+void flow_cache_flush_deferred(struct net *net)
{
- schedule_work(&flow_cache_flush_work);
+ schedule_work(&net->xfrm.flow_cache_flush_work);
}
static int flow_cache_cpu_prepare(struct flow_cache *fc, int cpu)
@@ -425,7 +410,8 @@ static int flow_cache_cpu(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
- struct flow_cache *fc = container_of(nfb, struct flow_cache, hotcpu_notifier);
+ struct flow_cache *fc = container_of(nfb, struct flow_cache,
+ hotcpu_notifier);
int res, cpu = (unsigned long) hcpu;
struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu);
@@ -444,9 +430,20 @@ static int flow_cache_cpu(struct notifier_block *nfb,
return NOTIFY_OK;
}
-static int __init flow_cache_init(struct flow_cache *fc)
+int flow_cache_init(struct net *net)
{
int i;
+ struct flow_cache *fc = &net->xfrm.flow_cache_global;
+
+ /* Initialize per-net flow cache global variables here */
+ net->xfrm.flow_cachep = kmem_cache_create("flow_cache",
+ sizeof(struct flow_cache_entry),
+ 0, SLAB_PANIC, NULL);
+ spin_lock_init(&net->xfrm.flow_cache_gc_lock);
+ INIT_LIST_HEAD(&net->xfrm.flow_cache_gc_list);
+ INIT_WORK(&net->xfrm.flow_cache_gc_work, flow_cache_gc_task);
+ INIT_WORK(&net->xfrm.flow_cache_flush_work, flow_cache_flush_task);
+ mutex_init(&net->xfrm.flow_flush_sem);
fc->hash_shift = 10;
fc->low_watermark = 2 * flow_cache_hash_size(fc);
@@ -484,14 +481,4 @@ err:
return -ENOMEM;
}
-
-static int __init flow_cache_init_global(void)
-{
- flow_cachep = kmem_cache_create("flow_cache",
- sizeof(struct flow_cache_entry),
- 0, SLAB_PANIC, NULL);
-
- return flow_cache_init(&flow_cache_global);
-}
-
-module_init(flow_cache_init_global);
+EXPORT_SYMBOL(flow_cache_init);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index e161290..8f8a96e 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -836,10 +836,10 @@ out:
static __inline__ int neigh_max_probes(struct neighbour *n)
{
struct neigh_parms *p = n->parms;
- return (n->nud_state & NUD_PROBE) ?
- NEIGH_VAR(p, UCAST_PROBES) :
- NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES) +
- NEIGH_VAR(p, MCAST_PROBES);
+ int max_probes = NEIGH_VAR(p, UCAST_PROBES) + NEIGH_VAR(p, APP_PROBES);
+ if (!(n->nud_state & NUD_PROBE))
+ max_probes += NEIGH_VAR(p, MCAST_PROBES);
+ return max_probes;
}
static void neigh_invalidate(struct neighbour *neigh)
@@ -945,6 +945,7 @@ static void neigh_timer_handler(unsigned long arg)
neigh->nud_state = NUD_FAILED;
notify = 1;
neigh_invalidate(neigh);
+ goto out;
}
if (neigh->nud_state & NUD_IN_TIMER) {
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 9388624..daed9a6 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -104,6 +104,7 @@ static ssize_t netdev_store(struct device *dev, struct device_attribute *attr,
}
NETDEVICE_SHOW_RO(dev_id, fmt_hex);
+NETDEVICE_SHOW_RO(dev_port, fmt_dec);
NETDEVICE_SHOW_RO(addr_assign_type, fmt_dec);
NETDEVICE_SHOW_RO(addr_len, fmt_dec);
NETDEVICE_SHOW_RO(iflink, fmt_dec);
@@ -373,6 +374,7 @@ static struct attribute *net_class_attrs[] = {
&dev_attr_netdev_group.attr,
&dev_attr_type.attr,
&dev_attr_dev_id.attr,
+ &dev_attr_dev_port.attr,
&dev_attr_iflink.attr,
&dev_attr_ifindex.attr,
&dev_attr_addr_assign_type.attr,
@@ -996,15 +998,12 @@ static struct attribute_group dql_group = {
#endif /* CONFIG_BQL */
#ifdef CONFIG_XPS
-static inline unsigned int get_netdev_queue_index(struct netdev_queue *queue)
+static unsigned int get_netdev_queue_index(struct netdev_queue *queue)
{
struct net_device *dev = queue->dev;
- int i;
-
- for (i = 0; i < dev->num_tx_queues; i++)
- if (queue == &dev->_tx[i])
- break;
+ unsigned int i;
+ i = queue - dev->_tx;
BUG_ON(i >= dev->num_tx_queues);
return i;
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index fdac61c..d0dac57 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -476,23 +476,22 @@ static int pgctrl_show(struct seq_file *seq, void *v)
static ssize_t pgctrl_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
- int err = 0;
char data[128];
struct pktgen_net *pn = net_generic(current->nsproxy->net_ns, pg_net_id);
- if (!capable(CAP_NET_ADMIN)) {
- err = -EPERM;
- goto out;
- }
+ if (!capable(CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (count == 0)
+ return -EINVAL;
if (count > sizeof(data))
count = sizeof(data);
- if (copy_from_user(data, buf, count)) {
- err = -EFAULT;
- goto out;
- }
- data[count - 1] = 0; /* Make string */
+ if (copy_from_user(data, buf, count))
+ return -EFAULT;
+
+ data[count - 1] = 0; /* Strip trailing '\n' and terminate string */
if (!strcmp(data, "stop"))
pktgen_stop_all_threads_ifs(pn);
@@ -506,10 +505,7 @@ static ssize_t pgctrl_write(struct file *file, const char __user *buf,
else
pr_warning("Unknown command: %s\n", data);
- err = count;
-
-out:
- return err;
+ return count;
}
static int pgctrl_open(struct inode *inode, struct file *file)
@@ -1251,7 +1247,13 @@ static ssize_t pktgen_if_write(struct file *file,
"Flag -:%s:- unknown\nAvailable flags, (prepend ! to un-set flag):\n%s",
f,
"IPSRC_RND, IPDST_RND, UDPSRC_RND, UDPDST_RND, "
- "MACSRC_RND, MACDST_RND, TXSIZE_RND, IPV6, MPLS_RND, VID_RND, SVID_RND, FLOW_SEQ, IPSEC, NODE_ALLOC\n");
+ "MACSRC_RND, MACDST_RND, TXSIZE_RND, IPV6, "
+ "MPLS_RND, VID_RND, SVID_RND, FLOW_SEQ, "
+ "QUEUE_MAP_RND, QUEUE_MAP_CPU, UDPCSUM, "
+#ifdef CONFIG_XFRM
+ "IPSEC, "
+#endif
+ "NODE_ALLOC\n");
return count;
}
sprintf(pg_result, "OK: flags=0x%x", pkt_dev->flags);
diff --git a/net/core/request_sock.c b/net/core/request_sock.c
index 4425148..467f326 100644
--- a/net/core/request_sock.c
+++ b/net/core/request_sock.c
@@ -221,5 +221,4 @@ void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
out:
spin_unlock_bh(&fastopenq->lock);
sock_put(lsk);
- return;
}
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 1a0dac2..fc122fd 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1121,56 +1121,7 @@ nla_put_failure:
return -EMSGSIZE;
}
-static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
-{
- struct net *net = sock_net(skb->sk);
- int h, s_h;
- int idx = 0, s_idx;
- struct net_device *dev;
- struct hlist_head *head;
- struct nlattr *tb[IFLA_MAX+1];
- u32 ext_filter_mask = 0;
-
- s_h = cb->args[0];
- s_idx = cb->args[1];
-
- rcu_read_lock();
- cb->seq = net->dev_base_seq;
-
- if (nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
- ifla_policy) >= 0) {
-
- if (tb[IFLA_EXT_MASK])
- ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
- }
-
- for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
- idx = 0;
- head = &net->dev_index_head[h];
- hlist_for_each_entry_rcu(dev, head, index_hlist) {
- if (idx < s_idx)
- goto cont;
- if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq, 0,
- NLM_F_MULTI,
- ext_filter_mask) <= 0)
- goto out;
-
- nl_dump_check_consistent(cb, nlmsg_hdr(skb));
-cont:
- idx++;
- }
- }
-out:
- rcu_read_unlock();
- cb->args[1] = idx;
- cb->args[0] = h;
-
- return skb->len;
-}
-
-const struct nla_policy ifla_policy[IFLA_MAX+1] = {
+static const struct nla_policy ifla_policy[IFLA_MAX+1] = {
[IFLA_IFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ-1 },
[IFLA_ADDRESS] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
[IFLA_BROADCAST] = { .type = NLA_BINARY, .len = MAX_ADDR_LEN },
@@ -1197,7 +1148,6 @@ const struct nla_policy ifla_policy[IFLA_MAX+1] = {
[IFLA_NUM_RX_QUEUES] = { .type = NLA_U32 },
[IFLA_PHYS_PORT_ID] = { .type = NLA_BINARY, .len = MAX_PHYS_PORT_ID_LEN },
};
-EXPORT_SYMBOL(ifla_policy);
static const struct nla_policy ifla_info_policy[IFLA_INFO_MAX+1] = {
[IFLA_INFO_KIND] = { .type = NLA_STRING },
@@ -1235,6 +1185,61 @@ static const struct nla_policy ifla_port_policy[IFLA_PORT_MAX+1] = {
[IFLA_PORT_RESPONSE] = { .type = NLA_U16, },
};
+static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct net *net = sock_net(skb->sk);
+ int h, s_h;
+ int idx = 0, s_idx;
+ struct net_device *dev;
+ struct hlist_head *head;
+ struct nlattr *tb[IFLA_MAX+1];
+ u32 ext_filter_mask = 0;
+
+ s_h = cb->args[0];
+ s_idx = cb->args[1];
+
+ rcu_read_lock();
+ cb->seq = net->dev_base_seq;
+
+ if (nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
+ ifla_policy) >= 0) {
+
+ if (tb[IFLA_EXT_MASK])
+ ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
+ }
+
+ for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
+ idx = 0;
+ head = &net->dev_index_head[h];
+ hlist_for_each_entry_rcu(dev, head, index_hlist) {
+ if (idx < s_idx)
+ goto cont;
+ if (rtnl_fill_ifinfo(skb, dev, RTM_NEWLINK,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, 0,
+ NLM_F_MULTI,
+ ext_filter_mask) <= 0)
+ goto out;
+
+ nl_dump_check_consistent(cb, nlmsg_hdr(skb));
+cont:
+ idx++;
+ }
+ }
+out:
+ rcu_read_unlock();
+ cb->args[1] = idx;
+ cb->args[0] = h;
+
+ return skb->len;
+}
+
+int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len)
+{
+ return nla_parse(tb, IFLA_MAX, head, len, ifla_policy);
+}
+EXPORT_SYMBOL(rtnl_nla_parse_ifla);
+
struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[])
{
struct net *net;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 5d6236d..465a01c 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3278,6 +3278,32 @@ __skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
return elt;
}
+/* As compared with skb_to_sgvec, skb_to_sgvec_nomark only map skb to given
+ * sglist without mark the sg which contain last skb data as the end.
+ * So the caller can mannipulate sg list as will when padding new data after
+ * the first call without calling sg_unmark_end to expend sg list.
+ *
+ * Scenario to use skb_to_sgvec_nomark:
+ * 1. sg_init_table
+ * 2. skb_to_sgvec_nomark(payload1)
+ * 3. skb_to_sgvec_nomark(payload2)
+ *
+ * This is equivalent to:
+ * 1. sg_init_table
+ * 2. skb_to_sgvec(payload1)
+ * 3. sg_unmark_end
+ * 4. skb_to_sgvec(payload2)
+ *
+ * When mapping mutilple payload conditionally, skb_to_sgvec_nomark
+ * is more preferable.
+ */
+int skb_to_sgvec_nomark(struct sk_buff *skb, struct scatterlist *sg,
+ int offset, int len)
+{
+ return __skb_to_sgvec(skb, sg, offset, len);
+}
+EXPORT_SYMBOL_GPL(skb_to_sgvec_nomark);
+
int skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len)
{
int nsg = __skb_to_sgvec(skb, sg, offset, len);
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
index cac505f..e5302b7 100644
--- a/net/hsr/hsr_device.c
+++ b/net/hsr/hsr_device.c
@@ -209,7 +209,7 @@ static int slave_xmit(struct sk_buff *skb, struct hsr_priv *hsr_priv,
/* Address substitution (IEC62439-3 pp 26, 50): replace mac
* address of outgoing frame with that of the outgoing slave's.
*/
- memcpy(hsr_ethhdr->ethhdr.h_source, skb->dev->dev_addr, ETH_ALEN);
+ ether_addr_copy(hsr_ethhdr->ethhdr.h_source, skb->dev->dev_addr);
return dev_queue_xmit(skb);
}
@@ -346,7 +346,7 @@ static void send_hsr_supervision_frame(struct net_device *hsr_dev, u8 type)
/* Payload: MacAddressA */
hsr_sp = (typeof(hsr_sp)) skb_put(skb, sizeof(*hsr_sp));
- memcpy(hsr_sp->MacAddressA, hsr_dev->dev_addr, ETH_ALEN);
+ ether_addr_copy(hsr_sp->MacAddressA, hsr_dev->dev_addr);
dev_queue_xmit(skb);
return;
@@ -493,7 +493,7 @@ static int check_slave_ok(struct net_device *dev)
/* Default multicast address for HSR Supervision frames */
-static const unsigned char def_multicast_addr[ETH_ALEN] = {
+static const unsigned char def_multicast_addr[ETH_ALEN] __aligned(2) = {
0x01, 0x15, 0x4e, 0x00, 0x01, 0x00
};
@@ -519,7 +519,7 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
hsr_priv->announce_timer.function = hsr_announce;
hsr_priv->announce_timer.data = (unsigned long) hsr_priv;
- memcpy(hsr_priv->sup_multicast_addr, def_multicast_addr, ETH_ALEN);
+ ether_addr_copy(hsr_priv->sup_multicast_addr, def_multicast_addr);
hsr_priv->sup_multicast_addr[ETH_ALEN - 1] = multicast_spec;
/* FIXME: should I modify the value of these?
@@ -547,7 +547,7 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
hsr_dev->features |= NETIF_F_VLAN_CHALLENGED;
/* Set hsr_dev's MAC address to that of mac_slave1 */
- memcpy(hsr_dev->dev_addr, hsr_priv->slave[0]->dev_addr, ETH_ALEN);
+ ether_addr_copy(hsr_dev->dev_addr, hsr_priv->slave[0]->dev_addr);
/* Set required header length */
for (i = 0; i < HSR_MAX_SLAVE; i++) {
diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
index 7ae0d7f6..83e5844 100644
--- a/net/hsr/hsr_framereg.c
+++ b/net/hsr/hsr_framereg.c
@@ -108,8 +108,8 @@ int hsr_create_self_node(struct list_head *self_node_db,
if (!node)
return -ENOMEM;
- memcpy(node->MacAddressA, addr_a, ETH_ALEN);
- memcpy(node->MacAddressB, addr_b, ETH_ALEN);
+ ether_addr_copy(node->MacAddressA, addr_a);
+ ether_addr_copy(node->MacAddressB, addr_b);
rcu_read_lock();
oldnode = list_first_or_null_rcu(self_node_db,
@@ -199,7 +199,7 @@ struct node_entry *hsr_merge_node(struct hsr_priv *hsr_priv,
/* Node is known, but frame was received from an unknown
* address. Node is PICS_SUBS capable; merge its AddrB.
*/
- memcpy(node->MacAddressB, hsr_ethsup->ethhdr.h_source, ETH_ALEN);
+ ether_addr_copy(node->MacAddressB, hsr_ethsup->ethhdr.h_source);
node->AddrB_if = dev_idx;
return node;
}
@@ -208,8 +208,8 @@ struct node_entry *hsr_merge_node(struct hsr_priv *hsr_priv,
if (!node)
return NULL;
- memcpy(node->MacAddressA, hsr_sp->MacAddressA, ETH_ALEN);
- memcpy(node->MacAddressB, hsr_ethsup->ethhdr.h_source, ETH_ALEN);
+ ether_addr_copy(node->MacAddressA, hsr_sp->MacAddressA);
+ ether_addr_copy(node->MacAddressB, hsr_ethsup->ethhdr.h_source);
if (!ether_addr_equal(hsr_sp->MacAddressA, hsr_ethsup->ethhdr.h_source))
node->AddrB_if = dev_idx;
else
@@ -250,7 +250,7 @@ void hsr_addr_subst_source(struct hsr_priv *hsr_priv, struct sk_buff *skb)
rcu_read_lock();
node = find_node_by_AddrB(&hsr_priv->node_db, ethhdr->h_source);
if (node)
- memcpy(ethhdr->h_source, node->MacAddressA, ETH_ALEN);
+ ether_addr_copy(ethhdr->h_source, node->MacAddressA);
rcu_read_unlock();
}
@@ -272,7 +272,7 @@ void hsr_addr_subst_dest(struct hsr_priv *hsr_priv, struct ethhdr *ethhdr,
rcu_read_lock();
node = find_node_by_AddrA(&hsr_priv->node_db, ethhdr->h_dest);
if (node && (node->AddrB_if == dev_idx))
- memcpy(ethhdr->h_dest, node->MacAddressB, ETH_ALEN);
+ ether_addr_copy(ethhdr->h_dest, node->MacAddressB);
rcu_read_unlock();
}
@@ -428,13 +428,13 @@ void *hsr_get_next_node(struct hsr_priv *hsr_priv, void *_pos,
node = list_first_or_null_rcu(&hsr_priv->node_db,
struct node_entry, mac_list);
if (node)
- memcpy(addr, node->MacAddressA, ETH_ALEN);
+ ether_addr_copy(addr, node->MacAddressA);
return node;
}
node = _pos;
list_for_each_entry_continue_rcu(node, &hsr_priv->node_db, mac_list) {
- memcpy(addr, node->MacAddressA, ETH_ALEN);
+ ether_addr_copy(addr, node->MacAddressA);
return node;
}
@@ -462,7 +462,7 @@ int hsr_get_node_data(struct hsr_priv *hsr_priv,
return -ENOENT; /* No such entry */
}
- memcpy(addr_b, node->MacAddressB, ETH_ALEN);
+ ether_addr_copy(addr_b, node->MacAddressB);
tdiff = jiffies - node->time_in[HSR_DEV_SLAVE_A];
if (node->time_in_stale[HSR_DEV_SLAVE_A])
diff --git a/net/hsr/hsr_main.c b/net/hsr/hsr_main.c
index af68dd8..10010c5 100644
--- a/net/hsr/hsr_main.c
+++ b/net/hsr/hsr_main.c
@@ -138,8 +138,8 @@ static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event,
break;
if (dev == hsr_priv->slave[0])
- memcpy(hsr_priv->dev->dev_addr,
- hsr_priv->slave[0]->dev_addr, ETH_ALEN);
+ ether_addr_copy(hsr_priv->dev->dev_addr,
+ hsr_priv->slave[0]->dev_addr);
/* Make sure we recognize frames from ourselves in hsr_rcv() */
res = hsr_create_self_node(&hsr_priv->self_node_db,
diff --git a/net/ieee802154/6lowpan.h b/net/ieee802154/6lowpan.h
index 2b835db..0dccf62 100644
--- a/net/ieee802154/6lowpan.h
+++ b/net/ieee802154/6lowpan.h
@@ -306,6 +306,119 @@ static inline void lowpan_push_hc_data(u8 **hc_ptr, const void *data,
*hc_ptr += len;
}
+static inline u8 lowpan_addr_mode_size(const u8 addr_mode)
+{
+ static const u8 addr_sizes[] = {
+ [LOWPAN_IPHC_ADDR_00] = 16,
+ [LOWPAN_IPHC_ADDR_01] = 8,
+ [LOWPAN_IPHC_ADDR_02] = 2,
+ [LOWPAN_IPHC_ADDR_03] = 0,
+ };
+ return addr_sizes[addr_mode];
+}
+
+static inline u8 lowpan_next_hdr_size(const u8 h_enc, u16 *uncomp_header)
+{
+ u8 ret = 1;
+
+ if ((h_enc & LOWPAN_NHC_UDP_MASK) == LOWPAN_NHC_UDP_ID) {
+ *uncomp_header += sizeof(struct udphdr);
+
+ switch (h_enc & LOWPAN_NHC_UDP_CS_P_11) {
+ case LOWPAN_NHC_UDP_CS_P_00:
+ ret += 4;
+ break;
+ case LOWPAN_NHC_UDP_CS_P_01:
+ case LOWPAN_NHC_UDP_CS_P_10:
+ ret += 3;
+ break;
+ case LOWPAN_NHC_UDP_CS_P_11:
+ ret++;
+ break;
+ default:
+ break;
+ }
+
+ if (!(h_enc & LOWPAN_NHC_UDP_CS_C))
+ ret += 2;
+ }
+
+ return ret;
+}
+
+/**
+ * lowpan_uncompress_size - returns skb->len size with uncompressed header
+ * @skb: sk_buff with 6lowpan header inside
+ * @datagram_offset: optional to get the datagram_offset value
+ *
+ * Returns the skb->len with uncompressed header
+ */
+static inline u16
+lowpan_uncompress_size(const struct sk_buff *skb, u16 *dgram_offset)
+{
+ u16 ret = 2, uncomp_header = sizeof(struct ipv6hdr);
+ u8 iphc0, iphc1, h_enc;
+
+ iphc0 = skb_network_header(skb)[0];
+ iphc1 = skb_network_header(skb)[1];
+
+ switch ((iphc0 & LOWPAN_IPHC_TF) >> 3) {
+ case 0:
+ ret += 4;
+ break;
+ case 1:
+ ret += 3;
+ break;
+ case 2:
+ ret++;
+ break;
+ default:
+ break;
+ }
+
+ if (!(iphc0 & LOWPAN_IPHC_NH_C))
+ ret++;
+
+ if (!(iphc0 & 0x03))
+ ret++;
+
+ ret += lowpan_addr_mode_size((iphc1 & LOWPAN_IPHC_SAM) >>
+ LOWPAN_IPHC_SAM_BIT);
+
+ if (iphc1 & LOWPAN_IPHC_M) {
+ switch ((iphc1 & LOWPAN_IPHC_DAM_11) >>
+ LOWPAN_IPHC_DAM_BIT) {
+ case LOWPAN_IPHC_DAM_00:
+ ret += 16;
+ break;
+ case LOWPAN_IPHC_DAM_01:
+ ret += 6;
+ break;
+ case LOWPAN_IPHC_DAM_10:
+ ret += 4;
+ break;
+ case LOWPAN_IPHC_DAM_11:
+ ret++;
+ break;
+ default:
+ break;
+ }
+ } else {
+ ret += lowpan_addr_mode_size((iphc1 & LOWPAN_IPHC_DAM_11) >>
+ LOWPAN_IPHC_DAM_BIT);
+ }
+
+ if (iphc0 & LOWPAN_IPHC_NH_C) {
+ h_enc = skb_network_header(skb)[ret];
+ ret += lowpan_next_hdr_size(h_enc, &uncomp_header);
+ }
+
+ if (dgram_offset)
+ *dgram_offset = uncomp_header;
+
+ return skb->len + uncomp_header - ret;
+}
+
typedef int (*skb_delivery_cb)(struct sk_buff *skb, struct net_device *dev);
int lowpan_process_data(struct sk_buff *skb, struct net_device *dev,
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan_rtnl.c
index 8edfea5..e472618 100644
--- a/net/ieee802154/6lowpan.c
+++ b/net/ieee802154/6lowpan_rtnl.c
@@ -1,10 +1,8 @@
-/*
- * Copyright 2011, Siemens AG
+/* Copyright 2011, Siemens AG
* written by Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
*/
-/*
- * Based on patches from Jon Smirl <jonsmirl@gmail.com>
+/* Based on patches from Jon Smirl <jonsmirl@gmail.com>
* Copyright (c) 2011 Jon Smirl <jonsmirl@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
@@ -15,10 +13,6 @@
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
/* Jon's code is based on 6lowpan implementation for Contiki which is:
@@ -60,6 +54,7 @@
#include <net/ieee802154_netdev.h>
#include <net/ipv6.h>
+#include "reassembly.h"
#include "6lowpan.h"
static LIST_HEAD(lowpan_devices);
@@ -68,7 +63,7 @@ static LIST_HEAD(lowpan_devices);
struct lowpan_dev_info {
struct net_device *real_dev; /* real WPAN device ptr */
struct mutex dev_list_mtx; /* mutex for list ops */
- unsigned short fragment_tag;
+ __be16 fragment_tag;
};
struct lowpan_dev_record {
@@ -76,18 +71,6 @@ struct lowpan_dev_record {
struct list_head list;
};
-struct lowpan_fragment {
- struct sk_buff *skb; /* skb to be assembled */
- u16 length; /* length to be assemled */
- u32 bytes_rcv; /* bytes received */
- u16 tag; /* current fragment tag */
- struct timer_list timer; /* assembling timer */
- struct list_head list; /* fragments list */
-};
-
-static LIST_HEAD(lowpan_fragments);
-static DEFINE_SPINLOCK(flist_lock);
-
static inline struct
lowpan_dev_info *lowpan_dev_info(const struct net_device *dev)
{
@@ -124,13 +107,11 @@ static int lowpan_header_create(struct sk_buff *skb,
lowpan_header_compress(skb, dev, type, daddr, saddr, len);
- /*
- * NOTE1: I'm still unsure about the fact that compression and WPAN
+ /* NOTE1: I'm still unsure about the fact that compression and WPAN
* header are created here and not later in the xmit. So wait for
* an opinion of net maintainers.
*/
- /*
- * NOTE2: to be absolutely correct, we must derive PANid information
+ /* NOTE2: to be absolutely correct, we must derive PANid information
* from MAC subif of the 'dev' and 'real_dev' network devices, but
* this isn't implemented in mainline yet, so currently we assign 0xff
*/
@@ -145,8 +126,7 @@ static int lowpan_header_create(struct sk_buff *skb,
/* intra-PAN communications */
da.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev);
- /*
- * if the destination address is the broadcast address, use the
+ /* if the destination address is the broadcast address, use the
* corresponding short address
*/
if (lowpan_is_addr_broadcast(daddr)) {
@@ -188,69 +168,6 @@ static int lowpan_give_skb_to_devices(struct sk_buff *skb,
return stat;
}
-static void lowpan_fragment_timer_expired(unsigned long entry_addr)
-{
- struct lowpan_fragment *entry = (struct lowpan_fragment *)entry_addr;
-
- pr_debug("timer expired for frame with tag %d\n", entry->tag);
-
- list_del(&entry->list);
- dev_kfree_skb(entry->skb);
- kfree(entry);
-}
-
-static struct lowpan_fragment *
-lowpan_alloc_new_frame(struct sk_buff *skb, u16 len, u16 tag)
-{
- struct lowpan_fragment *frame;
-
- frame = kzalloc(sizeof(struct lowpan_fragment),
- GFP_ATOMIC);
- if (!frame)
- goto frame_err;
-
- INIT_LIST_HEAD(&frame->list);
-
- frame->length = len;
- frame->tag = tag;
-
- /* allocate buffer for frame assembling */
- frame->skb = netdev_alloc_skb_ip_align(skb->dev, frame->length +
- sizeof(struct ipv6hdr));
-
- if (!frame->skb)
- goto skb_err;
-
- frame->skb->priority = skb->priority;
-
- /* reserve headroom for uncompressed ipv6 header */
- skb_reserve(frame->skb, sizeof(struct ipv6hdr));
- skb_put(frame->skb, frame->length);
-
- /* copy the first control block to keep a
- * trace of the link-layer addresses in case
- * of a link-local compressed address
- */
- memcpy(frame->skb->cb, skb->cb, sizeof(skb->cb));
-
- init_timer(&frame->timer);
- /* time out is the same as for ipv6 - 60 sec */
- frame->timer.expires = jiffies + LOWPAN_FRAG_TIMEOUT;
- frame->timer.data = (unsigned long)frame;
- frame->timer.function = lowpan_fragment_timer_expired;
-
- add_timer(&frame->timer);
-
- list_add_tail(&frame->list, &lowpan_fragments);
-
- return frame;
-
-skb_err:
- kfree(frame);
-frame_err:
- return NULL;
-}
-
static int process_data(struct sk_buff *skb)
{
u8 iphc0, iphc1;
@@ -264,94 +181,6 @@ static int process_data(struct sk_buff *skb)
if (lowpan_fetch_skb_u8(skb, &iphc0))
goto drop;
- /* fragments assembling */
- switch (iphc0 & LOWPAN_DISPATCH_MASK) {
- case LOWPAN_DISPATCH_FRAG1:
- case LOWPAN_DISPATCH_FRAGN:
- {
- struct lowpan_fragment *frame;
- /* slen stores the rightmost 8 bits of the 11 bits length */
- u8 slen, offset = 0;
- u16 len, tag;
- bool found = false;
-
- if (lowpan_fetch_skb_u8(skb, &slen) || /* frame length */
- lowpan_fetch_skb_u16(skb, &tag)) /* fragment tag */
- goto drop;
-
- /* adds the 3 MSB to the 8 LSB to retrieve the 11 bits length */
- len = ((iphc0 & 7) << 8) | slen;
-
- if ((iphc0 & LOWPAN_DISPATCH_MASK) == LOWPAN_DISPATCH_FRAG1) {
- pr_debug("%s received a FRAG1 packet (tag: %d, "
- "size of the entire IP packet: %d)",
- __func__, tag, len);
- } else { /* FRAGN */
- if (lowpan_fetch_skb_u8(skb, &offset))
- goto unlock_and_drop;
- pr_debug("%s received a FRAGN packet (tag: %d, "
- "size of the entire IP packet: %d, "
- "offset: %d)", __func__, tag, len, offset * 8);
- }
-
- /*
- * check if frame assembling with the same tag is
- * already in progress
- */
- spin_lock_bh(&flist_lock);
-
- list_for_each_entry(frame, &lowpan_fragments, list)
- if (frame->tag == tag) {
- found = true;
- break;
- }
-
- /* alloc new frame structure */
- if (!found) {
- pr_debug("%s first fragment received for tag %d, "
- "begin packet reassembly", __func__, tag);
- frame = lowpan_alloc_new_frame(skb, len, tag);
- if (!frame)
- goto unlock_and_drop;
- }
-
- /* if payload fits buffer, copy it */
- if (likely((offset * 8 + skb->len) <= frame->length))
- skb_copy_to_linear_data_offset(frame->skb, offset * 8,
- skb->data, skb->len);
- else
- goto unlock_and_drop;
-
- frame->bytes_rcv += skb->len;
-
- /* frame assembling complete */
- if ((frame->bytes_rcv == frame->length) &&
- frame->timer.expires > jiffies) {
- /* if timer haven't expired - first of all delete it */
- del_timer_sync(&frame->timer);
- list_del(&frame->list);
- spin_unlock_bh(&flist_lock);
-
- pr_debug("%s successfully reassembled fragment "
- "(tag %d)", __func__, tag);
-
- dev_kfree_skb(skb);
- skb = frame->skb;
- kfree(frame);
-
- if (lowpan_fetch_skb_u8(skb, &iphc0))
- goto drop;
-
- break;
- }
- spin_unlock_bh(&flist_lock);
-
- return kfree_skb(skb), 0;
- }
- default:
- break;
- }
-
if (lowpan_fetch_skb_u8(skb, &iphc1))
goto drop;
@@ -364,8 +193,6 @@ static int process_data(struct sk_buff *skb)
IEEE802154_ADDR_LEN, iphc0, iphc1,
lowpan_give_skb_to_devices);
-unlock_and_drop:
- spin_unlock_bh(&flist_lock);
drop:
kfree_skb(skb);
return -EINVAL;
@@ -386,7 +213,7 @@ static int lowpan_set_address(struct net_device *dev, void *p)
static int
lowpan_fragment_xmit(struct sk_buff *skb, u8 *head,
- int mlen, int plen, int offset, int type)
+ int mlen, int plen, int offset, int type)
{
struct sk_buff *frag;
int hlen;
@@ -422,51 +249,68 @@ lowpan_fragment_xmit(struct sk_buff *skb, u8 *head,
static int
lowpan_skb_fragmentation(struct sk_buff *skb, struct net_device *dev)
{
- int err, header_length, payload_length, tag, offset = 0;
+ int err;
+ u16 dgram_offset, dgram_size, payload_length, header_length,
+ lowpan_size, frag_plen, offset;
+ __be16 tag;
u8 head[5];
header_length = skb->mac_len;
payload_length = skb->len - header_length;
tag = lowpan_dev_info(dev)->fragment_tag++;
+ lowpan_size = skb_network_header_len(skb);
+ dgram_size = lowpan_uncompress_size(skb, &dgram_offset) -
+ header_length;
/* first fragment header */
- head[0] = LOWPAN_DISPATCH_FRAG1 | ((payload_length >> 8) & 0x7);
- head[1] = payload_length & 0xff;
- head[2] = tag >> 8;
- head[3] = tag & 0xff;
+ head[0] = LOWPAN_DISPATCH_FRAG1 | ((dgram_size >> 8) & 0x7);
+ head[1] = dgram_size & 0xff;
+ memcpy(head + 2, &tag, sizeof(tag));
- err = lowpan_fragment_xmit(skb, head, header_length, LOWPAN_FRAG_SIZE,
- 0, LOWPAN_DISPATCH_FRAG1);
+ /* calc the nearest payload length(divided to 8) for first fragment
+ * which fits into a IEEE802154_MTU
+ */
+ frag_plen = round_down(IEEE802154_MTU - header_length -
+ LOWPAN_FRAG1_HEAD_SIZE - lowpan_size -
+ IEEE802154_MFR_SIZE, 8);
+ err = lowpan_fragment_xmit(skb, head, header_length,
+ frag_plen + lowpan_size, 0,
+ LOWPAN_DISPATCH_FRAG1);
if (err) {
pr_debug("%s unable to send FRAG1 packet (tag: %d)",
__func__, tag);
goto exit;
}
- offset = LOWPAN_FRAG_SIZE;
+ offset = lowpan_size + frag_plen;
+ dgram_offset += frag_plen;
/* next fragment header */
head[0] &= ~LOWPAN_DISPATCH_FRAG1;
head[0] |= LOWPAN_DISPATCH_FRAGN;
+ frag_plen = round_down(IEEE802154_MTU - header_length -
+ LOWPAN_FRAGN_HEAD_SIZE - IEEE802154_MFR_SIZE, 8);
+
while (payload_length - offset > 0) {
- int len = LOWPAN_FRAG_SIZE;
+ int len = frag_plen;
- head[4] = offset / 8;
+ head[4] = dgram_offset >> 3;
if (payload_length - offset < len)
len = payload_length - offset;
- err = lowpan_fragment_xmit(skb, head, header_length,
- len, offset, LOWPAN_DISPATCH_FRAGN);
+ err = lowpan_fragment_xmit(skb, head, header_length, len,
+ offset, LOWPAN_DISPATCH_FRAGN);
if (err) {
- pr_debug("%s unable to send a subsequent FRAGN packet "
- "(tag: %d, offset: %d", __func__, tag, offset);
+ pr_debug("%s unable to send a FRAGN packet. (tag: %d, offset: %d)\n",
+ __func__, tag, offset);
goto exit;
}
offset += len;
+ dgram_offset += len;
}
exit:
@@ -594,44 +438,53 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev)
{
struct sk_buff *local_skb;
+ int ret;
if (!netif_running(dev))
- goto drop;
+ goto drop_skb;
if (dev->type != ARPHRD_IEEE802154)
- goto drop;
+ goto drop_skb;
+
+ local_skb = skb_clone(skb, GFP_ATOMIC);
+ if (!local_skb)
+ goto drop_skb;
+
+ kfree_skb(skb);
/* check that it's our buffer */
if (skb->data[0] == LOWPAN_DISPATCH_IPV6) {
- /* Copy the packet so that the IPv6 header is
- * properly aligned.
- */
- local_skb = skb_copy_expand(skb, NET_SKB_PAD - 1,
- skb_tailroom(skb), GFP_ATOMIC);
- if (!local_skb)
- goto drop;
-
local_skb->protocol = htons(ETH_P_IPV6);
local_skb->pkt_type = PACKET_HOST;
/* Pull off the 1-byte of 6lowpan header. */
skb_pull(local_skb, 1);
- lowpan_give_skb_to_devices(local_skb, NULL);
-
- kfree_skb(local_skb);
- kfree_skb(skb);
+ ret = lowpan_give_skb_to_devices(local_skb, NULL);
+ if (ret == NET_RX_DROP)
+ goto drop;
} else {
switch (skb->data[0] & 0xe0) {
case LOWPAN_DISPATCH_IPHC: /* ipv6 datagram */
+ ret = process_data(local_skb);
+ if (ret == NET_RX_DROP)
+ goto drop;
+ break;
case LOWPAN_DISPATCH_FRAG1: /* first fragment header */
+ ret = lowpan_frag_rcv(local_skb, LOWPAN_DISPATCH_FRAG1);
+ if (ret == 1) {
+ ret = process_data(local_skb);
+ if (ret == NET_RX_DROP)
+ goto drop;
+ }
+ break;
case LOWPAN_DISPATCH_FRAGN: /* next fragments headers */
- local_skb = skb_clone(skb, GFP_ATOMIC);
- if (!local_skb)
- goto drop;
- process_data(local_skb);
-
- kfree_skb(skb);
+ ret = lowpan_frag_rcv(local_skb, LOWPAN_DISPATCH_FRAGN);
+ if (ret == 1) {
+ ret = process_data(local_skb);
+ if (ret == NET_RX_DROP)
+ goto drop;
+ }
break;
default:
break;
@@ -639,9 +492,9 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
}
return NET_RX_SUCCESS;
-
-drop:
+drop_skb:
kfree_skb(skb);
+drop:
return NET_RX_DROP;
}
@@ -665,10 +518,9 @@ static int lowpan_newlink(struct net *src_net, struct net_device *dev,
}
lowpan_dev_info(dev)->real_dev = real_dev;
- lowpan_dev_info(dev)->fragment_tag = 0;
mutex_init(&lowpan_dev_info(dev)->dev_list_mtx);
- entry = kzalloc(sizeof(struct lowpan_dev_record), GFP_KERNEL);
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry) {
dev_put(real_dev);
lowpan_dev_info(dev)->real_dev = NULL;
@@ -769,43 +621,40 @@ static int __init lowpan_init_module(void)
{
int err = 0;
- err = lowpan_netlink_init();
+ err = lowpan_net_frag_init();
if (err < 0)
goto out;
+ err = lowpan_netlink_init();
+ if (err < 0)
+ goto out_frag;
+
dev_add_pack(&lowpan_packet_type);
err = register_netdevice_notifier(&lowpan_dev_notifier);
- if (err < 0) {
- dev_remove_pack(&lowpan_packet_type);
- lowpan_netlink_fini();
- }
+ if (err < 0)
+ goto out_pack;
+
+ return 0;
+
+out_pack:
+ dev_remove_pack(&lowpan_packet_type);
+ lowpan_netlink_fini();
+out_frag:
+ lowpan_net_frag_exit();
out:
return err;
}
static void __exit lowpan_cleanup_module(void)
{
- struct lowpan_fragment *frame, *tframe;
-
lowpan_netlink_fini();
dev_remove_pack(&lowpan_packet_type);
- unregister_netdevice_notifier(&lowpan_dev_notifier);
+ lowpan_net_frag_exit();
- /* Now 6lowpan packet_type is removed, so no new fragments are
- * expected on RX, therefore that's the time to clean incomplete
- * fragments.
- */
- spin_lock_bh(&flist_lock);
- list_for_each_entry_safe(frame, tframe, &lowpan_fragments, list) {
- del_timer_sync(&frame->timer);
- list_del(&frame->list);
- dev_kfree_skb(frame->skb);
- kfree(frame);
- }
- spin_unlock_bh(&flist_lock);
+ unregister_netdevice_notifier(&lowpan_dev_notifier);
}
module_init(lowpan_init_module);
diff --git a/net/ieee802154/Kconfig b/net/ieee802154/Kconfig
index 9c9879d..8af1330 100644
--- a/net/ieee802154/Kconfig
+++ b/net/ieee802154/Kconfig
@@ -15,7 +15,7 @@ config IEEE802154_6LOWPAN
depends on IEEE802154 && IPV6
select 6LOWPAN_IPHC
---help---
- IPv6 compression over IEEE 802.15.4.
+ IPv6 compression over IEEE 802.15.4.
config 6LOWPAN_IPHC
tristate
diff --git a/net/ieee802154/Makefile b/net/ieee802154/Makefile
index e8f0588..b113fc4 100644
--- a/net/ieee802154/Makefile
+++ b/net/ieee802154/Makefile
@@ -2,5 +2,6 @@ obj-$(CONFIG_IEEE802154) += ieee802154.o af_802154.o
obj-$(CONFIG_IEEE802154_6LOWPAN) += 6lowpan.o
obj-$(CONFIG_6LOWPAN_IPHC) += 6lowpan_iphc.o
+6lowpan-y := 6lowpan_rtnl.o reassembly.o
ieee802154-y := netlink.o nl-mac.o nl-phy.o nl_policy.o wpan-class.o
af_802154-y := af_ieee802154.o raw.o dgram.o
diff --git a/net/ieee802154/ieee802154.h b/net/ieee802154/ieee802154.h
index cee4425..6cbc896 100644
--- a/net/ieee802154/ieee802154.h
+++ b/net/ieee802154/ieee802154.h
@@ -53,6 +53,7 @@ int ieee802154_list_phy(struct sk_buff *skb, struct genl_info *info);
int ieee802154_dump_phy(struct sk_buff *skb, struct netlink_callback *cb);
int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info);
int ieee802154_del_iface(struct sk_buff *skb, struct genl_info *info);
+int ieee802154_set_phyparams(struct sk_buff *skb, struct genl_info *info);
enum ieee802154_mcgrp_ids {
IEEE802154_COORD_MCGRP,
diff --git a/net/ieee802154/netlink.c b/net/ieee802154/netlink.c
index 43f1b2b..67c151b 100644
--- a/net/ieee802154/netlink.c
+++ b/net/ieee802154/netlink.c
@@ -115,6 +115,7 @@ static const struct genl_ops ieee8021154_ops[] = {
ieee802154_dump_phy),
IEEE802154_OP(IEEE802154_ADD_IFACE, ieee802154_add_iface),
IEEE802154_OP(IEEE802154_DEL_IFACE, ieee802154_del_iface),
+ IEEE802154_OP(IEEE802154_SET_PHYPARAMS, ieee802154_set_phyparams),
/* see nl-mac.c */
IEEE802154_OP(IEEE802154_ASSOCIATE_REQ, ieee802154_associate_req),
IEEE802154_OP(IEEE802154_ASSOCIATE_RESP, ieee802154_associate_resp),
diff --git a/net/ieee802154/nl-phy.c b/net/ieee802154/nl-phy.c
index 89b265a..222310a 100644
--- a/net/ieee802154/nl-phy.c
+++ b/net/ieee802154/nl-phy.c
@@ -55,7 +55,15 @@ static int ieee802154_nl_fill_phy(struct sk_buff *msg, u32 portid,
mutex_lock(&phy->pib_lock);
if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
nla_put_u8(msg, IEEE802154_ATTR_PAGE, phy->current_page) ||
- nla_put_u8(msg, IEEE802154_ATTR_CHANNEL, phy->current_channel))
+ nla_put_u8(msg, IEEE802154_ATTR_CHANNEL, phy->current_channel) ||
+ nla_put_s8(msg, IEEE802154_ATTR_TXPOWER, phy->transmit_power) ||
+ nla_put_u8(msg, IEEE802154_ATTR_LBT_ENABLED, phy->lbt) ||
+ nla_put_u8(msg, IEEE802154_ATTR_CCA_MODE, phy->cca_mode) ||
+ nla_put_s32(msg, IEEE802154_ATTR_CCA_ED_LEVEL, phy->cca_ed_level) ||
+ nla_put_u8(msg, IEEE802154_ATTR_CSMA_RETRIES, phy->csma_retries) ||
+ nla_put_u8(msg, IEEE802154_ATTR_CSMA_MIN_BE, phy->min_be) ||
+ nla_put_u8(msg, IEEE802154_ATTR_CSMA_MAX_BE, phy->max_be) ||
+ nla_put_s8(msg, IEEE802154_ATTR_FRAME_RETRIES, phy->frame_retries))
goto nla_put_failure;
for (i = 0; i < 32; i++) {
if (phy->channels_supported[i])
@@ -354,3 +362,193 @@ out_dev:
return rc;
}
+
+static int phy_set_txpower(struct wpan_phy *phy, struct genl_info *info)
+{
+ int txpower = nla_get_s8(info->attrs[IEEE802154_ATTR_TXPOWER]);
+ int rc;
+
+ rc = phy->set_txpower(phy, txpower);
+ if (rc < 0)
+ return rc;
+
+ phy->transmit_power = txpower;
+
+ return 0;
+}
+
+static int phy_set_lbt(struct wpan_phy *phy, struct genl_info *info)
+{
+ u8 on = !!nla_get_u8(info->attrs[IEEE802154_ATTR_LBT_ENABLED]);
+ int rc;
+
+ rc = phy->set_lbt(phy, on);
+ if (rc < 0)
+ return rc;
+
+ phy->lbt = on;
+
+ return 0;
+}
+
+static int phy_set_cca_mode(struct wpan_phy *phy, struct genl_info *info)
+{
+ u8 mode = nla_get_u8(info->attrs[IEEE802154_ATTR_CCA_MODE]);
+ int rc;
+
+ if (mode > 3)
+ return -EINVAL;
+
+ rc = phy->set_cca_mode(phy, mode);
+ if (rc < 0)
+ return rc;
+
+ phy->cca_mode = mode;
+
+ return 0;
+}
+
+static int phy_set_cca_ed_level(struct wpan_phy *phy, struct genl_info *info)
+{
+ s32 level = nla_get_s32(info->attrs[IEEE802154_ATTR_CCA_ED_LEVEL]);
+ int rc;
+
+ rc = phy->set_cca_ed_level(phy, level);
+ if (rc < 0)
+ return rc;
+
+ phy->cca_ed_level = level;
+
+ return 0;
+}
+
+static int phy_set_csma_params(struct wpan_phy *phy, struct genl_info *info)
+{
+ int rc;
+ u8 min_be = phy->min_be;
+ u8 max_be = phy->max_be;
+ u8 retries = phy->csma_retries;
+
+ if (info->attrs[IEEE802154_ATTR_CSMA_RETRIES])
+ retries = nla_get_u8(info->attrs[IEEE802154_ATTR_CSMA_RETRIES]);
+ if (info->attrs[IEEE802154_ATTR_CSMA_MIN_BE])
+ min_be = nla_get_u8(info->attrs[IEEE802154_ATTR_CSMA_MIN_BE]);
+ if (info->attrs[IEEE802154_ATTR_CSMA_MAX_BE])
+ max_be = nla_get_u8(info->attrs[IEEE802154_ATTR_CSMA_MAX_BE]);
+
+ if (retries > 5 || max_be < 3 || max_be > 8 || min_be > max_be)
+ return -EINVAL;
+
+ rc = phy->set_csma_params(phy, min_be, max_be, retries);
+ if (rc < 0)
+ return rc;
+
+ phy->min_be = min_be;
+ phy->max_be = max_be;
+ phy->csma_retries = retries;
+
+ return 0;
+}
+
+static int phy_set_frame_retries(struct wpan_phy *phy, struct genl_info *info)
+{
+ s8 retries = nla_get_s8(info->attrs[IEEE802154_ATTR_FRAME_RETRIES]);
+ int rc;
+
+ if (retries < -1 || retries > 7)
+ return -EINVAL;
+
+ rc = phy->set_frame_retries(phy, retries);
+ if (rc < 0)
+ return rc;
+
+ phy->frame_retries = retries;
+
+ return 0;
+}
+
+int ieee802154_set_phyparams(struct sk_buff *skb, struct genl_info *info)
+{
+ struct wpan_phy *phy;
+ const char *name;
+ int rc = -ENOTSUPP;
+
+ pr_debug("%s\n", __func__);
+
+ if (!info->attrs[IEEE802154_ATTR_PHY_NAME] &&
+ !info->attrs[IEEE802154_ATTR_LBT_ENABLED] &&
+ !info->attrs[IEEE802154_ATTR_CCA_MODE] &&
+ !info->attrs[IEEE802154_ATTR_CCA_ED_LEVEL] &&
+ !info->attrs[IEEE802154_ATTR_CSMA_RETRIES] &&
+ !info->attrs[IEEE802154_ATTR_CSMA_MIN_BE] &&
+ !info->attrs[IEEE802154_ATTR_CSMA_MAX_BE] &&
+ !info->attrs[IEEE802154_ATTR_FRAME_RETRIES])
+ return -EINVAL;
+
+ name = nla_data(info->attrs[IEEE802154_ATTR_PHY_NAME]);
+ if (name[nla_len(info->attrs[IEEE802154_ATTR_PHY_NAME]) - 1] != '\0')
+ return -EINVAL; /* phy name should be null-terminated */
+
+ phy = wpan_phy_find(name);
+ if (!phy)
+ return -ENODEV;
+
+ if ((!phy->set_txpower && info->attrs[IEEE802154_ATTR_TXPOWER]) ||
+ (!phy->set_lbt && info->attrs[IEEE802154_ATTR_LBT_ENABLED]) ||
+ (!phy->set_cca_mode && info->attrs[IEEE802154_ATTR_CCA_MODE]) ||
+ (!phy->set_cca_ed_level &&
+ info->attrs[IEEE802154_ATTR_CCA_ED_LEVEL]))
+ goto out;
+
+ mutex_lock(&phy->pib_lock);
+
+ if (info->attrs[IEEE802154_ATTR_TXPOWER]) {
+ rc = phy_set_txpower(phy, info);
+ if (rc < 0)
+ goto error;
+ }
+
+ if (info->attrs[IEEE802154_ATTR_LBT_ENABLED]) {
+ rc = phy_set_lbt(phy, info);
+ if (rc < 0)
+ goto error;
+ }
+
+ if (info->attrs[IEEE802154_ATTR_CCA_MODE]) {
+ rc = phy_set_cca_mode(phy, info);
+ if (rc < 0)
+ goto error;
+ }
+
+ if (info->attrs[IEEE802154_ATTR_CCA_ED_LEVEL]) {
+ rc = phy_set_cca_ed_level(phy, info);
+ if (rc < 0)
+ goto error;
+ }
+
+ if (info->attrs[IEEE802154_ATTR_CSMA_RETRIES] ||
+ info->attrs[IEEE802154_ATTR_CSMA_MIN_BE] ||
+ info->attrs[IEEE802154_ATTR_CSMA_MAX_BE]) {
+ rc = phy_set_csma_params(phy, info);
+ if (rc < 0)
+ goto error;
+ }
+
+ if (info->attrs[IEEE802154_ATTR_FRAME_RETRIES]) {
+ rc = phy_set_frame_retries(phy, info);
+ if (rc < 0)
+ goto error;
+ }
+
+ mutex_unlock(&phy->pib_lock);
+
+ wpan_phy_put(phy);
+
+ return 0;
+
+error:
+ mutex_unlock(&phy->pib_lock);
+out:
+ wpan_phy_put(phy);
+ return rc;
+}
diff --git a/net/ieee802154/nl_policy.c b/net/ieee802154/nl_policy.c
index 6adda4d..fd7be5e 100644
--- a/net/ieee802154/nl_policy.c
+++ b/net/ieee802154/nl_policy.c
@@ -52,5 +52,15 @@ const struct nla_policy ieee802154_policy[IEEE802154_ATTR_MAX + 1] = {
[IEEE802154_ATTR_DURATION] = { .type = NLA_U8, },
[IEEE802154_ATTR_ED_LIST] = { .len = 27 },
[IEEE802154_ATTR_CHANNEL_PAGE_LIST] = { .len = 32 * 4, },
+
+ [IEEE802154_ATTR_TXPOWER] = { .type = NLA_S8, },
+ [IEEE802154_ATTR_LBT_ENABLED] = { .type = NLA_U8, },
+ [IEEE802154_ATTR_CCA_MODE] = { .type = NLA_U8, },
+ [IEEE802154_ATTR_CCA_ED_LEVEL] = { .type = NLA_S32, },
+ [IEEE802154_ATTR_CSMA_RETRIES] = { .type = NLA_U8, },
+ [IEEE802154_ATTR_CSMA_MIN_BE] = { .type = NLA_U8, },
+ [IEEE802154_ATTR_CSMA_MAX_BE] = { .type = NLA_U8, },
+
+ [IEEE802154_ATTR_FRAME_RETRIES] = { .type = NLA_S8, },
};
diff --git a/net/ieee802154/reassembly.c b/net/ieee802154/reassembly.c
new file mode 100644
index 0000000..4511fc2
--- /dev/null
+++ b/net/ieee802154/reassembly.c
@@ -0,0 +1,564 @@
+/* 6LoWPAN fragment reassembly
+ *
+ *
+ * Authors:
+ * Alexander Aring <aar@pengutronix.de>
+ *
+ * Based on: net/ipv6/reassembly.c
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define pr_fmt(fmt) "6LoWPAN: " fmt
+
+#include <linux/net.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/random.h>
+#include <linux/jhash.h>
+#include <linux/skbuff.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+
+#include <net/ieee802154_netdev.h>
+#include <net/ipv6.h>
+#include <net/inet_frag.h>
+
+#include "6lowpan.h"
+#include "reassembly.h"
+
+static struct inet_frags lowpan_frags;
+
+static int lowpan_frag_reasm(struct lowpan_frag_queue *fq,
+ struct sk_buff *prev, struct net_device *dev);
+
+static unsigned int lowpan_hash_frag(__be16 tag, u16 d_size,
+ const struct ieee802154_addr *saddr,
+ const struct ieee802154_addr *daddr)
+{
+ u32 c;
+
+ net_get_random_once(&lowpan_frags.rnd, sizeof(lowpan_frags.rnd));
+ c = jhash_3words(ieee802154_addr_hash(saddr),
+ ieee802154_addr_hash(daddr),
+ (__force u32)(tag + (d_size << 16)),
+ lowpan_frags.rnd);
+
+ return c & (INETFRAGS_HASHSZ - 1);
+}
+
+static unsigned int lowpan_hashfn(struct inet_frag_queue *q)
+{
+ struct lowpan_frag_queue *fq;
+
+ fq = container_of(q, struct lowpan_frag_queue, q);
+ return lowpan_hash_frag(fq->tag, fq->d_size, &fq->saddr, &fq->daddr);
+}
+
+bool lowpan_frag_match(struct inet_frag_queue *q, void *a)
+{
+ struct lowpan_frag_queue *fq;
+ struct lowpan_create_arg *arg = a;
+
+ fq = container_of(q, struct lowpan_frag_queue, q);
+ return fq->tag == arg->tag && fq->d_size == arg->d_size &&
+ ieee802154_addr_addr_equal(&fq->saddr, arg->src) &&
+ ieee802154_addr_addr_equal(&fq->daddr, arg->dst);
+}
+EXPORT_SYMBOL(lowpan_frag_match);
+
+void lowpan_frag_init(struct inet_frag_queue *q, void *a)
+{
+ struct lowpan_frag_queue *fq;
+ struct lowpan_create_arg *arg = a;
+
+ fq = container_of(q, struct lowpan_frag_queue, q);
+
+ fq->tag = arg->tag;
+ fq->d_size = arg->d_size;
+ fq->saddr = *arg->src;
+ fq->daddr = *arg->dst;
+}
+EXPORT_SYMBOL(lowpan_frag_init);
+
+void lowpan_expire_frag_queue(struct frag_queue *fq, struct inet_frags *frags)
+{
+ spin_lock(&fq->q.lock);
+
+ if (fq->q.last_in & INET_FRAG_COMPLETE)
+ goto out;
+
+ inet_frag_kill(&fq->q, frags);
+out:
+ spin_unlock(&fq->q.lock);
+ inet_frag_put(&fq->q, frags);
+}
+EXPORT_SYMBOL(lowpan_expire_frag_queue);
+
+static void lowpan_frag_expire(unsigned long data)
+{
+ struct frag_queue *fq;
+ struct net *net;
+
+ fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
+ net = container_of(fq->q.net, struct net, ieee802154_lowpan.frags);
+
+ lowpan_expire_frag_queue(fq, &lowpan_frags);
+}
+
+static inline struct lowpan_frag_queue *
+fq_find(struct net *net, const struct ieee802154_frag_info *frag_info,
+ const struct ieee802154_addr *src, const struct ieee802154_addr *dst)
+{
+ struct inet_frag_queue *q;
+ struct lowpan_create_arg arg;
+ unsigned int hash;
+
+ arg.tag = frag_info->d_tag;
+ arg.d_size = frag_info->d_size;
+ arg.src = src;
+ arg.dst = dst;
+
+ read_lock(&lowpan_frags.lock);
+ hash = lowpan_hash_frag(frag_info->d_tag, frag_info->d_size, src, dst);
+
+ q = inet_frag_find(&net->ieee802154_lowpan.frags,
+ &lowpan_frags, &arg, hash);
+ if (IS_ERR_OR_NULL(q)) {
+ inet_frag_maybe_warn_overflow(q, pr_fmt());
+ return NULL;
+ }
+ return container_of(q, struct lowpan_frag_queue, q);
+}
+
+static int lowpan_frag_queue(struct lowpan_frag_queue *fq,
+ struct sk_buff *skb, const u8 frag_type)
+{
+ struct sk_buff *prev, *next;
+ struct net_device *dev;
+ int end, offset;
+
+ if (fq->q.last_in & INET_FRAG_COMPLETE)
+ goto err;
+
+ offset = mac_cb(skb)->frag_info.d_offset << 3;
+ end = mac_cb(skb)->frag_info.d_size;
+
+ /* Is this the final fragment? */
+ if (offset + skb->len == end) {
+ /* If we already have some bits beyond end
+ * or have different end, the segment is corrupted.
+ */
+ if (end < fq->q.len ||
+ ((fq->q.last_in & INET_FRAG_LAST_IN) && end != fq->q.len))
+ goto err;
+ fq->q.last_in |= INET_FRAG_LAST_IN;
+ fq->q.len = end;
+ } else {
+ if (end > fq->q.len) {
+ /* Some bits beyond end -> corruption. */
+ if (fq->q.last_in & INET_FRAG_LAST_IN)
+ goto err;
+ fq->q.len = end;
+ }
+ }
+
+ /* Find out which fragments are in front and at the back of us
+ * in the chain of fragments so far. We must know where to put
+ * this fragment, right?
+ */
+ prev = fq->q.fragments_tail;
+ if (!prev || mac_cb(prev)->frag_info.d_offset <
+ mac_cb(skb)->frag_info.d_offset) {
+ next = NULL;
+ goto found;
+ }
+ prev = NULL;
+ for (next = fq->q.fragments; next != NULL; next = next->next) {
+ if (mac_cb(next)->frag_info.d_offset >=
+ mac_cb(skb)->frag_info.d_offset)
+ break; /* bingo! */
+ prev = next;
+ }
+
+found:
+ /* Insert this fragment in the chain of fragments. */
+ skb->next = next;
+ if (!next)
+ fq->q.fragments_tail = skb;
+ if (prev)
+ prev->next = skb;
+ else
+ fq->q.fragments = skb;
+
+ dev = skb->dev;
+ if (dev)
+ skb->dev = NULL;
+
+ fq->q.stamp = skb->tstamp;
+ if (frag_type == LOWPAN_DISPATCH_FRAG1) {
+ /* Calculate uncomp. 6lowpan header to estimate full size */
+ fq->q.meat += lowpan_uncompress_size(skb, NULL);
+ fq->q.last_in |= INET_FRAG_FIRST_IN;
+ } else {
+ fq->q.meat += skb->len;
+ }
+ add_frag_mem_limit(&fq->q, skb->truesize);
+
+ if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
+ fq->q.meat == fq->q.len) {
+ int res;
+ unsigned long orefdst = skb->_skb_refdst;
+
+ skb->_skb_refdst = 0UL;
+ res = lowpan_frag_reasm(fq, prev, dev);
+ skb->_skb_refdst = orefdst;
+ return res;
+ }
+
+ inet_frag_lru_move(&fq->q);
+ return -1;
+err:
+ kfree_skb(skb);
+ return -1;
+}
+
+/* Check if this packet is complete.
+ * Returns NULL on failure by any reason, and pointer
+ * to current nexthdr field in reassembled frame.
+ *
+ * It is called with locked fq, and caller must check that
+ * queue is eligible for reassembly i.e. it is not COMPLETE,
+ * the last and the first frames arrived and all the bits are here.
+ */
+static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev,
+ struct net_device *dev)
+{
+ struct sk_buff *fp, *head = fq->q.fragments;
+ int sum_truesize;
+
+ inet_frag_kill(&fq->q, &lowpan_frags);
+
+ /* Make the one we just received the head. */
+ if (prev) {
+ head = prev->next;
+ fp = skb_clone(head, GFP_ATOMIC);
+
+ if (!fp)
+ goto out_oom;
+
+ fp->next = head->next;
+ if (!fp->next)
+ fq->q.fragments_tail = fp;
+ prev->next = fp;
+
+ skb_morph(head, fq->q.fragments);
+ head->next = fq->q.fragments->next;
+
+ consume_skb(fq->q.fragments);
+ fq->q.fragments = head;
+ }
+
+ /* Head of list must not be cloned. */
+ if (skb_unclone(head, GFP_ATOMIC))
+ goto out_oom;
+
+ /* If the first fragment is fragmented itself, we split
+ * it to two chunks: the first with data and paged part
+ * and the second, holding only fragments.
+ */
+ if (skb_has_frag_list(head)) {
+ struct sk_buff *clone;
+ int i, plen = 0;
+
+ clone = alloc_skb(0, GFP_ATOMIC);
+ if (!clone)
+ goto out_oom;
+ clone->next = head->next;
+ head->next = clone;
+ skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
+ skb_frag_list_init(head);
+ for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
+ plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
+ clone->len = head->data_len - plen;
+ clone->data_len = clone->len;
+ head->data_len -= clone->len;
+ head->len -= clone->len;
+ add_frag_mem_limit(&fq->q, clone->truesize);
+ }
+
+ WARN_ON(head == NULL);
+
+ sum_truesize = head->truesize;
+ for (fp = head->next; fp;) {
+ bool headstolen;
+ int delta;
+ struct sk_buff *next = fp->next;
+
+ sum_truesize += fp->truesize;
+ if (skb_try_coalesce(head, fp, &headstolen, &delta)) {
+ kfree_skb_partial(fp, headstolen);
+ } else {
+ if (!skb_shinfo(head)->frag_list)
+ skb_shinfo(head)->frag_list = fp;
+ head->data_len += fp->len;
+ head->len += fp->len;
+ head->truesize += fp->truesize;
+ }
+ fp = next;
+ }
+ sub_frag_mem_limit(&fq->q, sum_truesize);
+
+ head->next = NULL;
+ head->dev = dev;
+ head->tstamp = fq->q.stamp;
+
+ fq->q.fragments = NULL;
+ fq->q.fragments_tail = NULL;
+
+ return 1;
+out_oom:
+ net_dbg_ratelimited("lowpan_frag_reasm: no memory for reassembly\n");
+ return -1;
+}
+
+static int lowpan_get_frag_info(struct sk_buff *skb, const u8 frag_type,
+ struct ieee802154_frag_info *frag_info)
+{
+ bool fail;
+ u8 pattern = 0, low = 0;
+
+ fail = lowpan_fetch_skb(skb, &pattern, 1);
+ fail |= lowpan_fetch_skb(skb, &low, 1);
+ frag_info->d_size = (pattern & 7) << 8 | low;
+ fail |= lowpan_fetch_skb(skb, &frag_info->d_tag, 2);
+
+ if (frag_type == LOWPAN_DISPATCH_FRAGN) {
+ fail |= lowpan_fetch_skb(skb, &frag_info->d_offset, 1);
+ } else {
+ skb_reset_network_header(skb);
+ frag_info->d_offset = 0;
+ }
+
+ if (unlikely(fail))
+ return -EIO;
+
+ return 0;
+}
+
+int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type)
+{
+ struct lowpan_frag_queue *fq;
+ struct net *net = dev_net(skb->dev);
+ struct ieee802154_frag_info *frag_info = &mac_cb(skb)->frag_info;
+ int err;
+
+ err = lowpan_get_frag_info(skb, frag_type, frag_info);
+ if (err < 0)
+ goto err;
+
+ if (frag_info->d_size > net->ieee802154_lowpan.max_dsize)
+ goto err;
+
+ inet_frag_evictor(&net->ieee802154_lowpan.frags, &lowpan_frags, false);
+
+ fq = fq_find(net, frag_info, &mac_cb(skb)->sa, &mac_cb(skb)->da);
+ if (fq != NULL) {
+ int ret;
+ spin_lock(&fq->q.lock);
+ ret = lowpan_frag_queue(fq, skb, frag_type);
+ spin_unlock(&fq->q.lock);
+
+ inet_frag_put(&fq->q, &lowpan_frags);
+ return ret;
+ }
+
+err:
+ kfree_skb(skb);
+ return -1;
+}
+EXPORT_SYMBOL(lowpan_frag_rcv);
+
+#ifdef CONFIG_SYSCTL
+static struct ctl_table lowpan_frags_ns_ctl_table[] = {
+ {
+ .procname = "6lowpanfrag_high_thresh",
+ .data = &init_net.ieee802154_lowpan.frags.high_thresh,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
+ {
+ .procname = "6lowpanfrag_low_thresh",
+ .data = &init_net.ieee802154_lowpan.frags.low_thresh,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
+ {
+ .procname = "6lowpanfrag_time",
+ .data = &init_net.ieee802154_lowpan.frags.timeout,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ {
+ .procname = "6lowpanfrag_max_datagram_size",
+ .data = &init_net.ieee802154_lowpan.max_dsize,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec
+ },
+ { }
+};
+
+static struct ctl_table lowpan_frags_ctl_table[] = {
+ {
+ .procname = "6lowpanfrag_secret_interval",
+ .data = &lowpan_frags.secret_interval,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_jiffies,
+ },
+ { }
+};
+
+static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
+{
+ struct ctl_table *table;
+ struct ctl_table_header *hdr;
+
+ table = lowpan_frags_ns_ctl_table;
+ if (!net_eq(net, &init_net)) {
+ table = kmemdup(table, sizeof(lowpan_frags_ns_ctl_table),
+ GFP_KERNEL);
+ if (table == NULL)
+ goto err_alloc;
+
+ table[0].data = &net->ieee802154_lowpan.frags.high_thresh;
+ table[1].data = &net->ieee802154_lowpan.frags.low_thresh;
+ table[2].data = &net->ieee802154_lowpan.frags.timeout;
+ table[2].data = &net->ieee802154_lowpan.max_dsize;
+
+ /* Don't export sysctls to unprivileged users */
+ if (net->user_ns != &init_user_ns)
+ table[0].procname = NULL;
+ }
+
+ hdr = register_net_sysctl(net, "net/ieee802154/6lowpan", table);
+ if (hdr == NULL)
+ goto err_reg;
+
+ net->ieee802154_lowpan.sysctl.frags_hdr = hdr;
+ return 0;
+
+err_reg:
+ if (!net_eq(net, &init_net))
+ kfree(table);
+err_alloc:
+ return -ENOMEM;
+}
+
+static void __net_exit lowpan_frags_ns_sysctl_unregister(struct net *net)
+{
+ struct ctl_table *table;
+
+ table = net->ieee802154_lowpan.sysctl.frags_hdr->ctl_table_arg;
+ unregister_net_sysctl_table(net->ieee802154_lowpan.sysctl.frags_hdr);
+ if (!net_eq(net, &init_net))
+ kfree(table);
+}
+
+static struct ctl_table_header *lowpan_ctl_header;
+
+static int lowpan_frags_sysctl_register(void)
+{
+ lowpan_ctl_header = register_net_sysctl(&init_net,
+ "net/ieee802154/6lowpan",
+ lowpan_frags_ctl_table);
+ return lowpan_ctl_header == NULL ? -ENOMEM : 0;
+}
+
+static void lowpan_frags_sysctl_unregister(void)
+{
+ unregister_net_sysctl_table(lowpan_ctl_header);
+}
+#else
+static inline int lowpan_frags_ns_sysctl_register(struct net *net)
+{
+ return 0;
+}
+
+static inline void lowpan_frags_ns_sysctl_unregister(struct net *net)
+{
+}
+
+static inline int lowpan_frags_sysctl_register(void)
+{
+ return 0;
+}
+
+static inline void lowpan_frags_sysctl_unregister(void)
+{
+}
+#endif
+
+static int __net_init lowpan_frags_init_net(struct net *net)
+{
+ net->ieee802154_lowpan.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
+ net->ieee802154_lowpan.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
+ net->ieee802154_lowpan.frags.timeout = IPV6_FRAG_TIMEOUT;
+ net->ieee802154_lowpan.max_dsize = 0xFFFF;
+
+ inet_frags_init_net(&net->ieee802154_lowpan.frags);
+
+ return lowpan_frags_ns_sysctl_register(net);
+}
+
+static void __net_exit lowpan_frags_exit_net(struct net *net)
+{
+ lowpan_frags_ns_sysctl_unregister(net);
+ inet_frags_exit_net(&net->ieee802154_lowpan.frags, &lowpan_frags);
+}
+
+static struct pernet_operations lowpan_frags_ops = {
+ .init = lowpan_frags_init_net,
+ .exit = lowpan_frags_exit_net,
+};
+
+int __init lowpan_net_frag_init(void)
+{
+ int ret;
+
+ ret = lowpan_frags_sysctl_register();
+ if (ret)
+ goto out;
+
+ ret = register_pernet_subsys(&lowpan_frags_ops);
+ if (ret)
+ goto err_pernet;
+
+ lowpan_frags.hashfn = lowpan_hashfn;
+ lowpan_frags.constructor = lowpan_frag_init;
+ lowpan_frags.destructor = NULL;
+ lowpan_frags.skb_free = NULL;
+ lowpan_frags.qsize = sizeof(struct frag_queue);
+ lowpan_frags.match = lowpan_frag_match;
+ lowpan_frags.frag_expire = lowpan_frag_expire;
+ lowpan_frags.secret_interval = 10 * 60 * HZ;
+ inet_frags_init(&lowpan_frags);
+err_pernet:
+ lowpan_frags_sysctl_unregister();
+out:
+ return ret;
+}
+
+void lowpan_net_frag_exit(void)
+{
+ inet_frags_fini(&lowpan_frags);
+ lowpan_frags_sysctl_unregister();
+ unregister_pernet_subsys(&lowpan_frags_ops);
+}
diff --git a/net/ieee802154/reassembly.h b/net/ieee802154/reassembly.h
new file mode 100644
index 0000000..055518b
--- /dev/null
+++ b/net/ieee802154/reassembly.h
@@ -0,0 +1,66 @@
+#ifndef __IEEE802154_6LOWPAN_REASSEMBLY_H__
+#define __IEEE802154_6LOWPAN_REASSEMBLY_H__
+
+#include <net/inet_frag.h>
+
+struct lowpan_create_arg {
+ __be16 tag;
+ u16 d_size;
+ const struct ieee802154_addr *src;
+ const struct ieee802154_addr *dst;
+};
+
+/* Equivalent of ipv4 struct ip
+ */
+struct lowpan_frag_queue {
+ struct inet_frag_queue q;
+
+ __be16 tag;
+ u16 d_size;
+ struct ieee802154_addr saddr;
+ struct ieee802154_addr daddr;
+};
+
+static inline u32 ieee802154_addr_hash(const struct ieee802154_addr *a)
+{
+ switch (a->addr_type) {
+ case IEEE802154_ADDR_LONG:
+ return (__force u32)((((u32 *)a->hwaddr))[0] ^
+ ((u32 *)(a->hwaddr))[1]);
+ case IEEE802154_ADDR_SHORT:
+ return (__force u32)(a->short_addr);
+ default:
+ return 0;
+ }
+}
+
+static inline bool ieee802154_addr_addr_equal(const struct ieee802154_addr *a1,
+ const struct ieee802154_addr *a2)
+{
+ if (a1->pan_id != a2->pan_id)
+ return false;
+
+ if (a1->addr_type != a2->addr_type)
+ return false;
+
+ switch (a1->addr_type) {
+ case IEEE802154_ADDR_LONG:
+ if (memcmp(a1->hwaddr, a2->hwaddr, IEEE802154_ADDR_LEN))
+ return false;
+ break;
+ case IEEE802154_ADDR_SHORT:
+ if (a1->short_addr != a2->short_addr)
+ return false;
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type);
+void lowpan_net_frag_exit(void);
+int lowpan_net_frag_init(void);
+
+#endif /* __IEEE802154_6LOWPAN_REASSEMBLY_H__ */
diff --git a/net/ieee802154/wpan-class.c b/net/ieee802154/wpan-class.c
index 4dd3761..edd0962 100644
--- a/net/ieee802154/wpan-class.c
+++ b/net/ieee802154/wpan-class.c
@@ -44,9 +44,7 @@ static DEVICE_ATTR_RO(name);
MASTER_SHOW(current_channel, "%d");
MASTER_SHOW(current_page, "%d");
-MASTER_SHOW_COMPLEX(transmit_power, "%d +- %d dB",
- ((signed char) (phy->transmit_power << 2)) >> 2,
- (phy->transmit_power >> 6) ? (phy->transmit_power >> 6) * 3 : 1);
+MASTER_SHOW(transmit_power, "%d +- 1 dB");
MASTER_SHOW(cca_mode, "%d");
static ssize_t channels_supported_show(struct device *dev,
@@ -171,6 +169,12 @@ struct wpan_phy *wpan_phy_alloc(size_t priv_size)
phy->current_channel = -1; /* not initialised */
phy->current_page = 0; /* for compatibility */
+ /* defaults per 802.15.4-2011 */
+ phy->min_be = 3;
+ phy->max_be = 5;
+ phy->csma_retries = 4;
+ phy->frame_retries = -1; /* for compatibility, actual default is 3 */
+
return phy;
out:
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile
index f8c49ce..f032688 100644
--- a/net/ipv4/Makefile
+++ b/net/ipv4/Makefile
@@ -55,4 +55,4 @@ obj-$(CONFIG_MEMCG_KMEM) += tcp_memcontrol.o
obj-$(CONFIG_NETLABEL) += cipso_ipv4.o
obj-$(CONFIG_XFRM) += xfrm4_policy.o xfrm4_state.o xfrm4_input.o \
- xfrm4_output.o
+ xfrm4_output.o xfrm4_protocol.o
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index 7179026..a2afa89 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -155,6 +155,10 @@ static int ah_output(struct xfrm_state *x, struct sk_buff *skb)
struct iphdr *iph, *top_iph;
struct ip_auth_hdr *ah;
struct ah_data *ahp;
+ int seqhi_len = 0;
+ __be32 *seqhi;
+ int sglists = 0;
+ struct scatterlist *seqhisg;
ahp = x->data;
ahash = ahp->ahash;
@@ -167,14 +171,19 @@ static int ah_output(struct xfrm_state *x, struct sk_buff *skb)
ah = ip_auth_hdr(skb);
ihl = ip_hdrlen(skb);
+ if (x->props.flags & XFRM_STATE_ESN) {
+ sglists = 1;
+ seqhi_len = sizeof(*seqhi);
+ }
err = -ENOMEM;
- iph = ah_alloc_tmp(ahash, nfrags, ihl);
+ iph = ah_alloc_tmp(ahash, nfrags + sglists, ihl + seqhi_len);
if (!iph)
goto out;
-
- icv = ah_tmp_icv(ahash, iph, ihl);
+ seqhi = (__be32 *)((char *)iph + ihl);
+ icv = ah_tmp_icv(ahash, seqhi, seqhi_len);
req = ah_tmp_req(ahash, icv);
sg = ah_req_sg(ahash, req);
+ seqhisg = sg + nfrags;
memset(ah->auth_data, 0, ahp->icv_trunc_len);
@@ -210,10 +219,15 @@ static int ah_output(struct xfrm_state *x, struct sk_buff *skb)
ah->spi = x->id.spi;
ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
- sg_init_table(sg, nfrags);
- skb_to_sgvec(skb, sg, 0, skb->len);
+ sg_init_table(sg, nfrags + sglists);
+ skb_to_sgvec_nomark(skb, sg, 0, skb->len);
- ahash_request_set_crypt(req, sg, icv, skb->len);
+ if (x->props.flags & XFRM_STATE_ESN) {
+ /* Attach seqhi sg right after packet payload */
+ *seqhi = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
+ sg_set_buf(seqhisg, seqhi, seqhi_len);
+ }
+ ahash_request_set_crypt(req, sg, icv, skb->len + seqhi_len);
ahash_request_set_callback(req, 0, ah_output_done, skb);
AH_SKB_CB(skb)->tmp = iph;
@@ -295,6 +309,10 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
struct ip_auth_hdr *ah;
struct ah_data *ahp;
int err = -ENOMEM;
+ int seqhi_len = 0;
+ __be32 *seqhi;
+ int sglists = 0;
+ struct scatterlist *seqhisg;
if (!pskb_may_pull(skb, sizeof(*ah)))
goto out;
@@ -335,14 +353,22 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
iph = ip_hdr(skb);
ihl = ip_hdrlen(skb);
- work_iph = ah_alloc_tmp(ahash, nfrags, ihl + ahp->icv_trunc_len);
+ if (x->props.flags & XFRM_STATE_ESN) {
+ sglists = 1;
+ seqhi_len = sizeof(*seqhi);
+ }
+
+ work_iph = ah_alloc_tmp(ahash, nfrags + sglists, ihl +
+ ahp->icv_trunc_len + seqhi_len);
if (!work_iph)
goto out;
- auth_data = ah_tmp_auth(work_iph, ihl);
+ seqhi = (__be32 *)((char *)work_iph + ihl);
+ auth_data = ah_tmp_auth(seqhi, seqhi_len);
icv = ah_tmp_icv(ahash, auth_data, ahp->icv_trunc_len);
req = ah_tmp_req(ahash, icv);
sg = ah_req_sg(ahash, req);
+ seqhisg = sg + nfrags;
memcpy(work_iph, iph, ihl);
memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len);
@@ -361,10 +387,15 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
skb_push(skb, ihl);
- sg_init_table(sg, nfrags);
- skb_to_sgvec(skb, sg, 0, skb->len);
+ sg_init_table(sg, nfrags + sglists);
+ skb_to_sgvec_nomark(skb, sg, 0, skb->len);
- ahash_request_set_crypt(req, sg, icv, skb->len);
+ if (x->props.flags & XFRM_STATE_ESN) {
+ /* Attach seqhi sg right after packet payload */
+ *seqhi = XFRM_SKB_CB(skb)->seq.input.hi;
+ sg_set_buf(seqhisg, seqhi, seqhi_len);
+ }
+ ahash_request_set_crypt(req, sg, icv, skb->len + seqhi_len);
ahash_request_set_callback(req, 0, ah_input_done, skb);
AH_SKB_CB(skb)->tmp = work_iph;
@@ -397,7 +428,7 @@ out:
return err;
}
-static void ah4_err(struct sk_buff *skb, u32 info)
+static int ah4_err(struct sk_buff *skb, u32 info)
{
struct net *net = dev_net(skb->dev);
const struct iphdr *iph = (const struct iphdr *)skb->data;
@@ -407,23 +438,25 @@ static void ah4_err(struct sk_buff *skb, u32 info)
switch (icmp_hdr(skb)->type) {
case ICMP_DEST_UNREACH:
if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
- return;
+ return 0;
case ICMP_REDIRECT:
break;
default:
- return;
+ return 0;
}
x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
ah->spi, IPPROTO_AH, AF_INET);
if (!x)
- return;
+ return 0;
if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_AH, 0);
else
ipv4_redirect(skb, net, 0, 0, IPPROTO_AH, 0);
xfrm_state_put(x);
+
+ return 0;
}
static int ah_init_state(struct xfrm_state *x)
@@ -505,6 +538,10 @@ static void ah_destroy(struct xfrm_state *x)
kfree(ahp);
}
+static int ah4_rcv_cb(struct sk_buff *skb, int err)
+{
+ return 0;
+}
static const struct xfrm_type ah_type =
{
@@ -518,11 +555,12 @@ static const struct xfrm_type ah_type =
.output = ah_output
};
-static const struct net_protocol ah4_protocol = {
+static struct xfrm4_protocol ah4_protocol = {
.handler = xfrm4_rcv,
+ .input_handler = xfrm_input,
+ .cb_handler = ah4_rcv_cb,
.err_handler = ah4_err,
- .no_policy = 1,
- .netns_ok = 1,
+ .priority = 0,
};
static int __init ah4_init(void)
@@ -531,7 +569,7 @@ static int __init ah4_init(void)
pr_info("%s: can't add xfrm type\n", __func__);
return -EAGAIN;
}
- if (inet_add_protocol(&ah4_protocol, IPPROTO_AH) < 0) {
+ if (xfrm4_protocol_register(&ah4_protocol, IPPROTO_AH) < 0) {
pr_info("%s: can't add protocol\n", __func__);
xfrm_unregister_type(&ah_type, AF_INET);
return -EAGAIN;
@@ -541,7 +579,7 @@ static int __init ah4_init(void)
static void __exit ah4_fini(void)
{
- if (inet_del_protocol(&ah4_protocol, IPPROTO_AH) < 0)
+ if (xfrm4_protocol_deregister(&ah4_protocol, IPPROTO_AH) < 0)
pr_info("%s: can't remove protocol\n", __func__);
if (xfrm_unregister_type(&ah_type, AF_INET) < 0)
pr_info("%s: can't remove xfrm type\n", __func__);
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 7785b28..360b565 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -473,7 +473,7 @@ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
net_adj) & ~(blksize - 1)) + net_adj - 2;
}
-static void esp4_err(struct sk_buff *skb, u32 info)
+static int esp4_err(struct sk_buff *skb, u32 info)
{
struct net *net = dev_net(skb->dev);
const struct iphdr *iph = (const struct iphdr *)skb->data;
@@ -483,23 +483,25 @@ static void esp4_err(struct sk_buff *skb, u32 info)
switch (icmp_hdr(skb)->type) {
case ICMP_DEST_UNREACH:
if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
- return;
+ return 0;
case ICMP_REDIRECT:
break;
default:
- return;
+ return 0;
}
x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
esph->spi, IPPROTO_ESP, AF_INET);
if (!x)
- return;
+ return 0;
if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ESP, 0);
else
ipv4_redirect(skb, net, 0, 0, IPPROTO_ESP, 0);
xfrm_state_put(x);
+
+ return 0;
}
static void esp_destroy(struct xfrm_state *x)
@@ -672,6 +674,11 @@ error:
return err;
}
+static int esp4_rcv_cb(struct sk_buff *skb, int err)
+{
+ return 0;
+}
+
static const struct xfrm_type esp_type =
{
.description = "ESP4",
@@ -685,11 +692,12 @@ static const struct xfrm_type esp_type =
.output = esp_output
};
-static const struct net_protocol esp4_protocol = {
+static struct xfrm4_protocol esp4_protocol = {
.handler = xfrm4_rcv,
+ .input_handler = xfrm_input,
+ .cb_handler = esp4_rcv_cb,
.err_handler = esp4_err,
- .no_policy = 1,
- .netns_ok = 1,
+ .priority = 0,
};
static int __init esp4_init(void)
@@ -698,7 +706,7 @@ static int __init esp4_init(void)
pr_info("%s: can't add xfrm type\n", __func__);
return -EAGAIN;
}
- if (inet_add_protocol(&esp4_protocol, IPPROTO_ESP) < 0) {
+ if (xfrm4_protocol_register(&esp4_protocol, IPPROTO_ESP) < 0) {
pr_info("%s: can't add protocol\n", __func__);
xfrm_unregister_type(&esp_type, AF_INET);
return -EAGAIN;
@@ -708,7 +716,7 @@ static int __init esp4_init(void)
static void __exit esp4_fini(void)
{
- if (inet_del_protocol(&esp4_protocol, IPPROTO_ESP) < 0)
+ if (xfrm4_protocol_deregister(&esp4_protocol, IPPROTO_ESP) < 0)
pr_info("%s: can't remove protocol\n", __func__);
if (xfrm_unregister_type(&esp_type, AF_INET) < 0)
pr_info("%s: can't remove xfrm type\n", __func__);
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index f3869c1..be8abe7 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -127,6 +127,10 @@ int ip_forward(struct sk_buff *skb)
struct rtable *rt; /* Route we use */
struct ip_options *opt = &(IPCB(skb)->opt);
+ /* that should never happen */
+ if (skb->pkt_type != PACKET_HOST)
+ goto drop;
+
if (skb_warn_if_lro(skb))
goto drop;
@@ -136,9 +140,6 @@ int ip_forward(struct sk_buff *skb)
if (IPCB(skb)->opt.router_alert && ip_call_ra_chain(skb))
return NET_RX_SUCCESS;
- if (skb->pkt_type != PACKET_HOST)
- goto drop;
-
skb_forward_csum(skb);
/*
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 73c6b63..1a0755f 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -446,7 +446,6 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
__be16 not_last_frag;
struct rtable *rt = skb_rtable(skb);
int err = 0;
- bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED;
dev = rt->dst.dev;
@@ -456,7 +455,7 @@ int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
iph = ip_hdr(skb);
- mtu = ip_dst_mtu_maybe_forward(&rt->dst, forwarding);
+ mtu = ip_skb_dst_mtu(skb);
if (unlikely(((iph->frag_off & htons(IP_DF)) && !skb->local_df) ||
(IPCB(skb)->frag_max_size &&
IPCB(skb)->frag_max_size > mtu))) {
@@ -822,8 +821,7 @@ static int __ip_append_data(struct sock *sk,
fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
- maxnonfragsize = (inet->pmtudisc >= IP_PMTUDISC_DO) ?
- mtu : 0xFFFF;
+ maxnonfragsize = ip_sk_local_df(sk) ? 0xFFFF : mtu;
if (cork->length + length > maxnonfragsize - fragheaderlen) {
ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
@@ -1146,8 +1144,7 @@ ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
- maxnonfragsize = (inet->pmtudisc >= IP_PMTUDISC_DO) ?
- mtu : 0xFFFF;
+ maxnonfragsize = ip_sk_local_df(sk) ? 0xFFFF : mtu;
if (cork->length + size > maxnonfragsize - fragheaderlen) {
ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
@@ -1308,8 +1305,7 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
* to fragment the frame generated here. No matter, what transforms
* how transforms change size of the packet, it will come out.
*/
- if (inet->pmtudisc < IP_PMTUDISC_DO)
- skb->local_df = 1;
+ skb->local_df = ip_sk_local_df(sk);
/* DF bit is set when we want to see DF on outgoing frames.
* If local_df is set too, we still allow to fragment this frame
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 580dd96..64741b9 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -186,7 +186,8 @@ void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
}
EXPORT_SYMBOL(ip_cmsg_recv);
-int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc)
+int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc,
+ bool allow_ipv6)
{
int err, val;
struct cmsghdr *cmsg;
@@ -194,6 +195,22 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc)
for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
if (!CMSG_OK(msg, cmsg))
return -EINVAL;
+#if defined(CONFIG_IPV6)
+ if (allow_ipv6 &&
+ cmsg->cmsg_level == SOL_IPV6 &&
+ cmsg->cmsg_type == IPV6_PKTINFO) {
+ struct in6_pktinfo *src_info;
+
+ if (cmsg->cmsg_len < CMSG_LEN(sizeof(*src_info)))
+ return -EINVAL;
+ src_info = (struct in6_pktinfo *)CMSG_DATA(cmsg);
+ if (!ipv6_addr_v4mapped(&src_info->ipi6_addr))
+ return -EINVAL;
+ ipc->oif = src_info->ipi6_ifindex;
+ ipc->addr = src_info->ipi6_addr.s6_addr32[3];
+ continue;
+ }
+#endif
if (cmsg->cmsg_level != SOL_IP)
continue;
switch (cmsg->cmsg_type) {
@@ -626,7 +643,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
inet->nodefrag = val ? 1 : 0;
break;
case IP_MTU_DISCOVER:
- if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_INTERFACE)
+ if (val < IP_PMTUDISC_DONT || val > IP_PMTUDISC_OMIT)
goto e_inval;
inet->pmtudisc = val;
break;
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 78a89e6..66aaf50 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -235,13 +235,17 @@ static struct hlist_head *ip_bucket(struct ip_tunnel_net *itn,
{
unsigned int h;
__be32 remote;
+ __be32 i_key = parms->i_key;
if (parms->iph.daddr && !ipv4_is_multicast(parms->iph.daddr))
remote = parms->iph.daddr;
else
remote = 0;
- h = ip_tunnel_hash(parms->i_key, remote);
+ if (!(parms->i_flags & TUNNEL_KEY) && (parms->i_flags & VTI_ISVTI))
+ i_key = 0;
+
+ h = ip_tunnel_hash(i_key, remote);
return &itn->tunnels[h];
}
@@ -398,7 +402,7 @@ static struct ip_tunnel *ip_tunnel_create(struct net *net,
fbt = netdev_priv(itn->fb_tunnel_dev);
dev = __ip_tunnel_create(net, itn->fb_tunnel_dev->rtnl_link_ops, parms);
if (IS_ERR(dev))
- return NULL;
+ return ERR_CAST(dev);
dev->mtu = ip_tunnel_bind_dev(dev);
@@ -751,9 +755,13 @@ int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
t = ip_tunnel_find(itn, p, itn->fb_tunnel_dev->type);
- if (!t && (cmd == SIOCADDTUNNEL))
+ if (!t && (cmd == SIOCADDTUNNEL)) {
t = ip_tunnel_create(net, itn, p);
-
+ if (IS_ERR(t)) {
+ err = PTR_ERR(t);
+ break;
+ }
+ }
if (dev != itn->fb_tunnel_dev && cmd == SIOCCHGTUNNEL) {
if (t != NULL) {
if (t->dev != dev) {
@@ -780,8 +788,9 @@ int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd)
if (t) {
err = 0;
ip_tunnel_update(itn, t, dev, p, true);
- } else
- err = (cmd == SIOCADDTUNNEL ? -ENOBUFS : -ENOENT);
+ } else {
+ err = -ENOENT;
+ }
break;
case SIOCDELTUNNEL:
@@ -996,19 +1005,13 @@ int ip_tunnel_init(struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
struct iphdr *iph = &tunnel->parms.iph;
- int i, err;
+ int err;
dev->destructor = ip_tunnel_dev_free;
- dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
+ dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!dev->tstats)
return -ENOMEM;
- for_each_possible_cpu(i) {
- struct pcpu_sw_netstats *ipt_stats;
- ipt_stats = per_cpu_ptr(dev->tstats, i);
- u64_stats_init(&ipt_stats->syncp);
- }
-
tunnel->dst_cache = alloc_percpu(struct ip_tunnel_dst);
if (!tunnel->dst_cache) {
free_percpu(dev->tstats);
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index 48eafae..687ddef 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -34,6 +34,7 @@
#include <linux/init.h>
#include <linux/netfilter_ipv4.h>
#include <linux/if_ether.h>
+#include <linux/icmpv6.h>
#include <net/sock.h>
#include <net/ip.h>
@@ -49,8 +50,8 @@ static struct rtnl_link_ops vti_link_ops __read_mostly;
static int vti_net_id __read_mostly;
static int vti_tunnel_init(struct net_device *dev);
-/* We dont digest the packet therefore let the packet pass */
-static int vti_rcv(struct sk_buff *skb)
+static int vti_input(struct sk_buff *skb, int nexthdr, __be32 spi,
+ int encap_type)
{
struct ip_tunnel *tunnel;
const struct iphdr *iph = ip_hdr(skb);
@@ -60,79 +61,120 @@ static int vti_rcv(struct sk_buff *skb)
tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
iph->saddr, iph->daddr, 0);
if (tunnel != NULL) {
- struct pcpu_sw_netstats *tstats;
- u32 oldmark = skb->mark;
- int ret;
-
-
- /* temporarily mark the skb with the tunnel o_key, to
- * only match policies with this mark.
- */
- skb->mark = be32_to_cpu(tunnel->parms.o_key);
- ret = xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb);
- skb->mark = oldmark;
- if (!ret)
- return -1;
-
- tstats = this_cpu_ptr(tunnel->dev->tstats);
- u64_stats_update_begin(&tstats->syncp);
- tstats->rx_packets++;
- tstats->rx_bytes += skb->len;
- u64_stats_update_end(&tstats->syncp);
-
- secpath_reset(skb);
- skb->dev = tunnel->dev;
+ if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
+ goto drop;
+
+ XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = tunnel;
+ skb->mark = be32_to_cpu(tunnel->parms.i_key);
+
+ return xfrm_input(skb, nexthdr, spi, encap_type);
+ }
+
+ return -EINVAL;
+drop:
+ kfree_skb(skb);
+ return 0;
+}
+
+static int vti_rcv(struct sk_buff *skb)
+{
+ XFRM_SPI_SKB_CB(skb)->family = AF_INET;
+ XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
+
+ return vti_input(skb, ip_hdr(skb)->protocol, 0, 0);
+}
+
+static int vti_rcv_cb(struct sk_buff *skb, int err)
+{
+ unsigned short family;
+ struct net_device *dev;
+ struct pcpu_sw_netstats *tstats;
+ struct xfrm_state *x;
+ struct ip_tunnel *tunnel = XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4;
+
+ if (!tunnel)
return 1;
+
+ dev = tunnel->dev;
+
+ if (err) {
+ dev->stats.rx_errors++;
+ dev->stats.rx_dropped++;
+
+ return 0;
}
- return -1;
+ x = xfrm_input_state(skb);
+ family = x->inner_mode->afinfo->family;
+
+ if (!xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family))
+ return -EPERM;
+
+ skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(skb->dev)));
+ skb->dev = dev;
+
+ tstats = this_cpu_ptr(dev->tstats);
+
+ u64_stats_update_begin(&tstats->syncp);
+ tstats->rx_packets++;
+ tstats->rx_bytes += skb->len;
+ u64_stats_update_end(&tstats->syncp);
+
+ return 0;
}
-/* This function assumes it is being called from dev_queue_xmit()
- * and that skb is filled properly by that function.
- */
+static bool vti_state_check(const struct xfrm_state *x, __be32 dst, __be32 src)
+{
+ xfrm_address_t *daddr = (xfrm_address_t *)&dst;
+ xfrm_address_t *saddr = (xfrm_address_t *)&src;
-static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
+ /* if there is no transform then this tunnel is not functional.
+ * Or if the xfrm is not mode tunnel.
+ */
+ if (!x || x->props.mode != XFRM_MODE_TUNNEL ||
+ x->props.family != AF_INET)
+ return false;
+
+ if (!dst)
+ return xfrm_addr_equal(saddr, &x->props.saddr, AF_INET);
+
+ if (!xfrm_state_addr_check(x, daddr, saddr, AF_INET))
+ return false;
+
+ return true;
+}
+
+static netdev_tx_t vti_xmit(struct sk_buff *skb, struct net_device *dev,
+ struct flowi *fl)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
- struct iphdr *tiph = &tunnel->parms.iph;
- u8 tos;
- struct rtable *rt; /* Route to the other host */
+ struct ip_tunnel_parm *parms = &tunnel->parms;
+ struct dst_entry *dst = skb_dst(skb);
struct net_device *tdev; /* Device to other host */
- struct iphdr *old_iph = ip_hdr(skb);
- __be32 dst = tiph->daddr;
- struct flowi4 fl4;
int err;
- if (skb->protocol != htons(ETH_P_IP))
- goto tx_error;
-
- tos = old_iph->tos;
+ if (!dst) {
+ dev->stats.tx_carrier_errors++;
+ goto tx_error_icmp;
+ }
- memset(&fl4, 0, sizeof(fl4));
- flowi4_init_output(&fl4, tunnel->parms.link,
- be32_to_cpu(tunnel->parms.o_key), RT_TOS(tos),
- RT_SCOPE_UNIVERSE,
- IPPROTO_IPIP, 0,
- dst, tiph->saddr, 0, 0);
- rt = ip_route_output_key(dev_net(dev), &fl4);
- if (IS_ERR(rt)) {
+ dst_hold(dst);
+ dst = xfrm_lookup(tunnel->net, dst, fl, NULL, 0);
+ if (IS_ERR(dst)) {
dev->stats.tx_carrier_errors++;
goto tx_error_icmp;
}
- /* if there is no transform then this tunnel is not functional.
- * Or if the xfrm is not mode tunnel.
- */
- if (!rt->dst.xfrm ||
- rt->dst.xfrm->props.mode != XFRM_MODE_TUNNEL) {
+
+ if (!vti_state_check(dst->xfrm, parms->iph.daddr, parms->iph.saddr)) {
dev->stats.tx_carrier_errors++;
- ip_rt_put(rt);
+ dst_release(dst);
goto tx_error_icmp;
}
- tdev = rt->dst.dev;
+
+ tdev = dst->dev;
if (tdev == dev) {
- ip_rt_put(rt);
+ dst_release(dst);
dev->stats.collisions++;
goto tx_error;
}
@@ -146,10 +188,8 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
tunnel->err_count = 0;
}
- memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
- skb_dst_drop(skb);
- skb_dst_set(skb, &rt->dst);
- nf_reset(skb);
+ skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(dev)));
+ skb_dst_set(skb, dst);
skb->dev = skb_dst(skb)->dev;
err = dst_output(skb);
@@ -166,6 +206,95 @@ tx_error:
return NETDEV_TX_OK;
}
+/* This function assumes it is being called from dev_queue_xmit()
+ * and that skb is filled properly by that function.
+ */
+static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct ip_tunnel *tunnel = netdev_priv(dev);
+ struct flowi fl;
+
+ memset(&fl, 0, sizeof(fl));
+
+ skb->mark = be32_to_cpu(tunnel->parms.o_key);
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ xfrm_decode_session(skb, &fl, AF_INET);
+ memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
+ break;
+ case htons(ETH_P_IPV6):
+ xfrm_decode_session(skb, &fl, AF_INET6);
+ memset(IP6CB(skb), 0, sizeof(*IP6CB(skb)));
+ break;
+ default:
+ dev->stats.tx_errors++;
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ return vti_xmit(skb, dev, &fl);
+}
+
+static int vti4_err(struct sk_buff *skb, u32 info)
+{
+ __be32 spi;
+ struct xfrm_state *x;
+ struct ip_tunnel *tunnel;
+ struct ip_esp_hdr *esph;
+ struct ip_auth_hdr *ah ;
+ struct ip_comp_hdr *ipch;
+ struct net *net = dev_net(skb->dev);
+ const struct iphdr *iph = (const struct iphdr *)skb->data;
+ int protocol = iph->protocol;
+ struct ip_tunnel_net *itn = net_generic(net, vti_net_id);
+
+ tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
+ iph->daddr, iph->saddr, 0);
+ if (!tunnel)
+ return -1;
+
+ switch (protocol) {
+ case IPPROTO_ESP:
+ esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2));
+ spi = esph->spi;
+ break;
+ case IPPROTO_AH:
+ ah = (struct ip_auth_hdr *)(skb->data+(iph->ihl<<2));
+ spi = ah->spi;
+ break;
+ case IPPROTO_COMP:
+ ipch = (struct ip_comp_hdr *)(skb->data+(iph->ihl<<2));
+ spi = htonl(ntohs(ipch->cpi));
+ break;
+ default:
+ return 0;
+ }
+
+ switch (icmp_hdr(skb)->type) {
+ case ICMP_DEST_UNREACH:
+ if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
+ return 0;
+ case ICMP_REDIRECT:
+ break;
+ default:
+ return 0;
+ }
+
+ x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
+ spi, protocol, AF_INET);
+ if (!x)
+ return 0;
+
+ if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
+ ipv4_update_pmtu(skb, net, info, 0, 0, protocol, 0);
+ else
+ ipv4_redirect(skb, net, 0, 0, protocol, 0);
+ xfrm_state_put(x);
+
+ return 0;
+}
+
static int
vti_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
@@ -181,12 +310,13 @@ vti_tunnel_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
return -EINVAL;
}
+ p.i_flags |= VTI_ISVTI;
err = ip_tunnel_ioctl(dev, &p, cmd);
if (err)
return err;
if (cmd != SIOCDELTUNNEL) {
- p.i_flags |= GRE_KEY | VTI_ISVTI;
+ p.i_flags |= GRE_KEY;
p.o_flags |= GRE_KEY;
}
@@ -224,7 +354,6 @@ static int vti_tunnel_init(struct net_device *dev)
dev->flags = IFF_NOARP;
dev->iflink = 0;
dev->addr_len = 4;
- dev->features |= NETIF_F_NETNS_LOCAL;
dev->features |= NETIF_F_LLTX;
dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
@@ -241,9 +370,28 @@ static void __net_init vti_fb_tunnel_init(struct net_device *dev)
iph->ihl = 5;
}
-static struct xfrm_tunnel_notifier vti_handler __read_mostly = {
+static struct xfrm4_protocol vti_esp4_protocol __read_mostly = {
.handler = vti_rcv,
- .priority = 1,
+ .input_handler = vti_input,
+ .cb_handler = vti_rcv_cb,
+ .err_handler = vti4_err,
+ .priority = 100,
+};
+
+static struct xfrm4_protocol vti_ah4_protocol __read_mostly = {
+ .handler = vti_rcv,
+ .input_handler = vti_input,
+ .cb_handler = vti_rcv_cb,
+ .err_handler = vti4_err,
+ .priority = 100,
+};
+
+static struct xfrm4_protocol vti_ipcomp4_protocol __read_mostly = {
+ .handler = vti_rcv,
+ .input_handler = vti_input,
+ .cb_handler = vti_rcv_cb,
+ .err_handler = vti4_err,
+ .priority = 100,
};
static int __net_init vti_init_net(struct net *net)
@@ -287,6 +435,8 @@ static void vti_netlink_parms(struct nlattr *data[],
if (!data)
return;
+ parms->i_flags = VTI_ISVTI;
+
if (data[IFLA_VTI_LINK])
parms->link = nla_get_u32(data[IFLA_VTI_LINK]);
@@ -382,10 +532,31 @@ static int __init vti_init(void)
err = register_pernet_device(&vti_net_ops);
if (err < 0)
return err;
- err = xfrm4_mode_tunnel_input_register(&vti_handler);
+ err = xfrm4_protocol_register(&vti_esp4_protocol, IPPROTO_ESP);
+ if (err < 0) {
+ unregister_pernet_device(&vti_net_ops);
+ pr_info("vti init: can't register tunnel\n");
+
+ return err;
+ }
+
+ err = xfrm4_protocol_register(&vti_ah4_protocol, IPPROTO_AH);
+ if (err < 0) {
+ xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP);
+ unregister_pernet_device(&vti_net_ops);
+ pr_info("vti init: can't register tunnel\n");
+
+ return err;
+ }
+
+ err = xfrm4_protocol_register(&vti_ipcomp4_protocol, IPPROTO_COMP);
if (err < 0) {
+ xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
+ xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP);
unregister_pernet_device(&vti_net_ops);
pr_info("vti init: can't register tunnel\n");
+
+ return err;
}
err = rtnl_link_register(&vti_link_ops);
@@ -395,7 +566,9 @@ static int __init vti_init(void)
return err;
rtnl_link_failed:
- xfrm4_mode_tunnel_input_deregister(&vti_handler);
+ xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
+ xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
+ xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP);
unregister_pernet_device(&vti_net_ops);
return err;
}
@@ -403,8 +576,13 @@ rtnl_link_failed:
static void __exit vti_fini(void)
{
rtnl_link_unregister(&vti_link_ops);
- if (xfrm4_mode_tunnel_input_deregister(&vti_handler))
+ if (xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP))
pr_info("vti close: can't deregister tunnel\n");
+ if (xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH))
+ pr_info("vti close: can't deregister tunnel\n");
+ if (xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP))
+ pr_info("vti close: can't deregister tunnel\n");
+
unregister_pernet_device(&vti_net_ops);
}
diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
index 826be4c..c0855d5 100644
--- a/net/ipv4/ipcomp.c
+++ b/net/ipv4/ipcomp.c
@@ -23,7 +23,7 @@
#include <net/protocol.h>
#include <net/sock.h>
-static void ipcomp4_err(struct sk_buff *skb, u32 info)
+static int ipcomp4_err(struct sk_buff *skb, u32 info)
{
struct net *net = dev_net(skb->dev);
__be32 spi;
@@ -34,24 +34,26 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
switch (icmp_hdr(skb)->type) {
case ICMP_DEST_UNREACH:
if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
- return;
+ return 0;
case ICMP_REDIRECT:
break;
default:
- return;
+ return 0;
}
spi = htonl(ntohs(ipch->cpi));
x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
spi, IPPROTO_COMP, AF_INET);
if (!x)
- return;
+ return 0;
if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_COMP, 0);
else
ipv4_redirect(skb, net, 0, 0, IPPROTO_COMP, 0);
xfrm_state_put(x);
+
+ return 0;
}
/* We always hold one tunnel user reference to indicate a tunnel */
@@ -147,6 +149,11 @@ out:
return err;
}
+static int ipcomp4_rcv_cb(struct sk_buff *skb, int err)
+{
+ return 0;
+}
+
static const struct xfrm_type ipcomp_type = {
.description = "IPCOMP4",
.owner = THIS_MODULE,
@@ -157,11 +164,12 @@ static const struct xfrm_type ipcomp_type = {
.output = ipcomp_output
};
-static const struct net_protocol ipcomp4_protocol = {
+static struct xfrm4_protocol ipcomp4_protocol = {
.handler = xfrm4_rcv,
+ .input_handler = xfrm_input,
+ .cb_handler = ipcomp4_rcv_cb,
.err_handler = ipcomp4_err,
- .no_policy = 1,
- .netns_ok = 1,
+ .priority = 0,
};
static int __init ipcomp4_init(void)
@@ -170,7 +178,7 @@ static int __init ipcomp4_init(void)
pr_info("%s: can't add xfrm type\n", __func__);
return -EAGAIN;
}
- if (inet_add_protocol(&ipcomp4_protocol, IPPROTO_COMP) < 0) {
+ if (xfrm4_protocol_register(&ipcomp4_protocol, IPPROTO_COMP) < 0) {
pr_info("%s: can't add protocol\n", __func__);
xfrm_unregister_type(&ipcomp_type, AF_INET);
return -EAGAIN;
@@ -180,7 +188,7 @@ static int __init ipcomp4_init(void)
static void __exit ipcomp4_fini(void)
{
- if (inet_del_protocol(&ipcomp4_protocol, IPPROTO_COMP) < 0)
+ if (xfrm4_protocol_deregister(&ipcomp4_protocol, IPPROTO_COMP) < 0)
pr_info("%s: can't remove protocol\n", __func__);
if (xfrm_unregister_type(&ipcomp_type, AF_INET) < 0)
pr_info("%s: can't remove xfrm type\n", __func__);
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 2d11c09..f4b19e5 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -727,7 +727,7 @@ static int ping_v4_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
sock_tx_timestamp(sk, &ipc.tx_flags);
if (msg->msg_controllen) {
- err = ip_cmsg_send(sock_net(sk), msg, &ipc);
+ err = ip_cmsg_send(sock_net(sk), msg, &ipc, false);
if (err)
return err;
if (ipc.opt)
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index a6c8a80..ad737fa 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -273,6 +273,7 @@ static const struct snmp_mib snmp4_net_list[] = {
SNMP_MIB_ITEM("TCPChallengeACK", LINUX_MIB_TCPCHALLENGEACK),
SNMP_MIB_ITEM("TCPSYNChallenge", LINUX_MIB_TCPSYNCHALLENGE),
SNMP_MIB_ITEM("TCPFastOpenActive", LINUX_MIB_TCPFASTOPENACTIVE),
+ SNMP_MIB_ITEM("TCPFastOpenActiveFail", LINUX_MIB_TCPFASTOPENACTIVEFAIL),
SNMP_MIB_ITEM("TCPFastOpenPassive", LINUX_MIB_TCPFASTOPENPASSIVE),
SNMP_MIB_ITEM("TCPFastOpenPassiveFail", LINUX_MIB_TCPFASTOPENPASSIVEFAIL),
SNMP_MIB_ITEM("TCPFastOpenListenOverflow", LINUX_MIB_TCPFASTOPENLISTENOVERFLOW),
@@ -280,6 +281,11 @@ static const struct snmp_mib snmp4_net_list[] = {
SNMP_MIB_ITEM("TCPSpuriousRtxHostQueues", LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES),
SNMP_MIB_ITEM("BusyPollRxPackets", LINUX_MIB_BUSYPOLLRXPACKETS),
SNMP_MIB_ITEM("TCPAutoCorking", LINUX_MIB_TCPAUTOCORKING),
+ SNMP_MIB_ITEM("TCPFromZeroWindowAdv", LINUX_MIB_TCPFROMZEROWINDOWADV),
+ SNMP_MIB_ITEM("TCPToZeroWindowAdv", LINUX_MIB_TCPTOZEROWINDOWADV),
+ SNMP_MIB_ITEM("TCPWantZeroWindowAdv", LINUX_MIB_TCPWANTZEROWINDOWADV),
+ SNMP_MIB_ITEM("TCPSynRetrans", LINUX_MIB_TCPSYNRETRANS),
+ SNMP_MIB_ITEM("TCPOrigDataSent", LINUX_MIB_TCPORIGDATASENT),
SNMP_MIB_SENTINEL
};
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index c04518f..a9dbe58 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -524,7 +524,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
ipc.oif = sk->sk_bound_dev_if;
if (msg->msg_controllen) {
- err = ip_cmsg_send(sock_net(sk), msg, &ipc);
+ err = ip_cmsg_send(sock_net(sk), msg, &ipc, false);
if (err)
goto out;
if (ipc.opt)
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 4c011ec..11e4384 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -697,7 +697,6 @@ static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
out_unlock:
spin_unlock_bh(&fnhe_lock);
- return;
}
static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flowi4 *fl4,
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 97c8f56..4bd6d52 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -387,7 +387,7 @@ void tcp_init_sock(struct sock *sk)
INIT_LIST_HEAD(&tp->tsq_node);
icsk->icsk_rto = TCP_TIMEOUT_INIT;
- tp->mdev = TCP_TIMEOUT_INIT;
+ tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
/* So many TCP implementations out there (incorrectly) count the
* initial SYN frame in their delayed-ACK and congestion control
@@ -2341,7 +2341,7 @@ int tcp_disconnect(struct sock *sk, int flags)
sk->sk_shutdown = 0;
sock_reset_flag(sk, SOCK_DONE);
- tp->srtt = 0;
+ tp->srtt_us = 0;
if ((tp->write_seq += tp->max_window + 2) == 0)
tp->write_seq = 1;
icsk->icsk_backoff = 0;
@@ -2785,8 +2785,8 @@ void tcp_get_info(const struct sock *sk, struct tcp_info *info)
info->tcpi_pmtu = icsk->icsk_pmtu_cookie;
info->tcpi_rcv_ssthresh = tp->rcv_ssthresh;
- info->tcpi_rtt = jiffies_to_usecs(tp->srtt)>>3;
- info->tcpi_rttvar = jiffies_to_usecs(tp->mdev)>>2;
+ info->tcpi_rtt = tp->srtt_us >> 3;
+ info->tcpi_rttvar = tp->mdev_us >> 2;
info->tcpi_snd_ssthresh = tp->snd_ssthresh;
info->tcpi_snd_cwnd = tp->snd_cwnd;
info->tcpi_advmss = tp->advmss;
@@ -2796,6 +2796,11 @@ void tcp_get_info(const struct sock *sk, struct tcp_info *info)
info->tcpi_rcv_space = tp->rcvq_space.space;
info->tcpi_total_retrans = tp->total_retrans;
+
+ info->tcpi_pacing_rate = sk->sk_pacing_rate != ~0U ?
+ sk->sk_pacing_rate : ~0ULL;
+ info->tcpi_max_pacing_rate = sk->sk_max_pacing_rate != ~0U ?
+ sk->sk_max_pacing_rate : ~0ULL;
}
EXPORT_SYMBOL_GPL(tcp_get_info);
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 2388275..2b9464c 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -361,21 +361,12 @@ u32 tcp_reno_ssthresh(struct sock *sk)
}
EXPORT_SYMBOL_GPL(tcp_reno_ssthresh);
-/* Lower bound on congestion window with halving. */
-u32 tcp_reno_min_cwnd(const struct sock *sk)
-{
- const struct tcp_sock *tp = tcp_sk(sk);
- return tp->snd_ssthresh/2;
-}
-EXPORT_SYMBOL_GPL(tcp_reno_min_cwnd);
-
struct tcp_congestion_ops tcp_reno = {
.flags = TCP_CONG_NON_RESTRICTED,
.name = "reno",
.owner = THIS_MODULE,
.ssthresh = tcp_reno_ssthresh,
.cong_avoid = tcp_reno_cong_avoid,
- .min_cwnd = tcp_reno_min_cwnd,
};
/* Initial congestion control used (until SYN)
@@ -387,6 +378,5 @@ struct tcp_congestion_ops tcp_init_congestion_ops = {
.owner = THIS_MODULE,
.ssthresh = tcp_reno_ssthresh,
.cong_avoid = tcp_reno_cong_avoid,
- .min_cwnd = tcp_reno_min_cwnd,
};
EXPORT_SYMBOL_GPL(tcp_init_congestion_ops);
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index 828e4c3..8bf2245 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -476,10 +476,6 @@ static int __init cubictcp_register(void)
/* divide by bic_scale and by constant Srtt (100ms) */
do_div(cube_factor, bic_scale * 10);
- /* hystart needs ms clock resolution */
- if (hystart && HZ < 1000)
- cubictcp.flags |= TCP_CONG_RTT_STAMP;
-
return tcp_register_congestion_control(&cubictcp);
}
diff --git a/net/ipv4/tcp_highspeed.c b/net/ipv4/tcp_highspeed.c
index 8ed9305..8b9e7ba 100644
--- a/net/ipv4/tcp_highspeed.c
+++ b/net/ipv4/tcp_highspeed.c
@@ -162,7 +162,6 @@ static struct tcp_congestion_ops tcp_highspeed __read_mostly = {
.init = hstcp_init,
.ssthresh = hstcp_ssthresh,
.cong_avoid = hstcp_cong_avoid,
- .min_cwnd = tcp_reno_min_cwnd,
.owner = THIS_MODULE,
.name = "highspeed"
diff --git a/net/ipv4/tcp_hybla.c b/net/ipv4/tcp_hybla.c
index 478fe82..a15a799 100644
--- a/net/ipv4/tcp_hybla.c
+++ b/net/ipv4/tcp_hybla.c
@@ -21,7 +21,7 @@ struct hybla {
u32 rho2; /* Rho * Rho, integer part */
u32 rho_3ls; /* Rho parameter, <<3 */
u32 rho2_7ls; /* Rho^2, <<7 */
- u32 minrtt; /* Minimum smoothed round trip time value seen */
+ u32 minrtt_us; /* Minimum smoothed round trip time value seen */
};
/* Hybla reference round trip time (default= 1/40 sec = 25 ms), in ms */
@@ -35,7 +35,9 @@ static inline void hybla_recalc_param (struct sock *sk)
{
struct hybla *ca = inet_csk_ca(sk);
- ca->rho_3ls = max_t(u32, tcp_sk(sk)->srtt / msecs_to_jiffies(rtt0), 8);
+ ca->rho_3ls = max_t(u32,
+ tcp_sk(sk)->srtt_us / (rtt0 * USEC_PER_MSEC),
+ 8U);
ca->rho = ca->rho_3ls >> 3;
ca->rho2_7ls = (ca->rho_3ls * ca->rho_3ls) << 1;
ca->rho2 = ca->rho2_7ls >> 7;
@@ -59,7 +61,7 @@ static void hybla_init(struct sock *sk)
hybla_recalc_param(sk);
/* set minimum rtt as this is the 1st ever seen */
- ca->minrtt = tp->srtt;
+ ca->minrtt_us = tp->srtt_us;
tp->snd_cwnd = ca->rho;
}
@@ -94,9 +96,9 @@ static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 acked,
int is_slowstart = 0;
/* Recalculate rho only if this srtt is the lowest */
- if (tp->srtt < ca->minrtt){
+ if (tp->srtt_us < ca->minrtt_us) {
hybla_recalc_param(sk);
- ca->minrtt = tp->srtt;
+ ca->minrtt_us = tp->srtt_us;
}
if (!tcp_is_cwnd_limited(sk, in_flight))
@@ -166,7 +168,6 @@ static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 acked,
static struct tcp_congestion_ops tcp_hybla __read_mostly = {
.init = hybla_init,
.ssthresh = tcp_reno_ssthresh,
- .min_cwnd = tcp_reno_min_cwnd,
.cong_avoid = hybla_cong_avoid,
.set_state = hybla_state,
diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c
index e498a62..863d105 100644
--- a/net/ipv4/tcp_illinois.c
+++ b/net/ipv4/tcp_illinois.c
@@ -325,10 +325,8 @@ static void tcp_illinois_info(struct sock *sk, u32 ext,
}
static struct tcp_congestion_ops tcp_illinois __read_mostly = {
- .flags = TCP_CONG_RTT_STAMP,
.init = tcp_illinois_init,
.ssthresh = tcp_illinois_ssthresh,
- .min_cwnd = tcp_reno_min_cwnd,
.cong_avoid = tcp_illinois_cong_avoid,
.set_state = tcp_illinois_state,
.get_info = tcp_illinois_info,
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index eeaac39..b99003f 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -667,11 +667,11 @@ static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb)
* To save cycles in the RFC 1323 implementation it was better to break
* it up into three procedures. -- erics
*/
-static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
+static void tcp_rtt_estimator(struct sock *sk, long mrtt_us)
{
struct tcp_sock *tp = tcp_sk(sk);
- long m = mrtt; /* RTT */
- u32 srtt = tp->srtt;
+ long m = mrtt_us; /* RTT */
+ u32 srtt = tp->srtt_us;
/* The following amusing code comes from Jacobson's
* article in SIGCOMM '88. Note that rtt and mdev
@@ -694,7 +694,7 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
srtt += m; /* rtt = 7/8 rtt + 1/8 new */
if (m < 0) {
m = -m; /* m is now abs(error) */
- m -= (tp->mdev >> 2); /* similar update on mdev */
+ m -= (tp->mdev_us >> 2); /* similar update on mdev */
/* This is similar to one of Eifel findings.
* Eifel blocks mdev updates when rtt decreases.
* This solution is a bit different: we use finer gain
@@ -706,28 +706,29 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
if (m > 0)
m >>= 3;
} else {
- m -= (tp->mdev >> 2); /* similar update on mdev */
+ m -= (tp->mdev_us >> 2); /* similar update on mdev */
}
- tp->mdev += m; /* mdev = 3/4 mdev + 1/4 new */
- if (tp->mdev > tp->mdev_max) {
- tp->mdev_max = tp->mdev;
- if (tp->mdev_max > tp->rttvar)
- tp->rttvar = tp->mdev_max;
+ tp->mdev_us += m; /* mdev = 3/4 mdev + 1/4 new */
+ if (tp->mdev_us > tp->mdev_max_us) {
+ tp->mdev_max_us = tp->mdev_us;
+ if (tp->mdev_max_us > tp->rttvar_us)
+ tp->rttvar_us = tp->mdev_max_us;
}
if (after(tp->snd_una, tp->rtt_seq)) {
- if (tp->mdev_max < tp->rttvar)
- tp->rttvar -= (tp->rttvar - tp->mdev_max) >> 2;
+ if (tp->mdev_max_us < tp->rttvar_us)
+ tp->rttvar_us -= (tp->rttvar_us - tp->mdev_max_us) >> 2;
tp->rtt_seq = tp->snd_nxt;
- tp->mdev_max = tcp_rto_min(sk);
+ tp->mdev_max_us = tcp_rto_min_us(sk);
}
} else {
/* no previous measure. */
srtt = m << 3; /* take the measured time to be rtt */
- tp->mdev = m << 1; /* make sure rto = 3*rtt */
- tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
+ tp->mdev_us = m << 1; /* make sure rto = 3*rtt */
+ tp->rttvar_us = max(tp->mdev_us, tcp_rto_min_us(sk));
+ tp->mdev_max_us = tp->rttvar_us;
tp->rtt_seq = tp->snd_nxt;
}
- tp->srtt = max(1U, srtt);
+ tp->srtt_us = max(1U, srtt);
}
/* Set the sk_pacing_rate to allow proper sizing of TSO packets.
@@ -742,20 +743,12 @@ static void tcp_update_pacing_rate(struct sock *sk)
u64 rate;
/* set sk_pacing_rate to 200 % of current rate (mss * cwnd / srtt) */
- rate = (u64)tp->mss_cache * 2 * (HZ << 3);
+ rate = (u64)tp->mss_cache * 2 * (USEC_PER_SEC << 3);
rate *= max(tp->snd_cwnd, tp->packets_out);
- /* Correction for small srtt and scheduling constraints.
- * For small rtt, consider noise is too high, and use
- * the minimal value (srtt = 1 -> 125 us for HZ=1000)
- *
- * We probably need usec resolution in the future.
- * Note: This also takes care of possible srtt=0 case,
- * when tcp_rtt_estimator() was not yet called.
- */
- if (tp->srtt > 8 + 2)
- do_div(rate, tp->srtt);
+ if (likely(tp->srtt_us))
+ do_div(rate, tp->srtt_us);
/* ACCESS_ONCE() is needed because sch_fq fetches sk_pacing_rate
* without any lock. We want to make sure compiler wont store
@@ -1122,10 +1115,10 @@ static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
}
struct tcp_sacktag_state {
- int reord;
- int fack_count;
- int flag;
- s32 rtt; /* RTT measured by SACKing never-retransmitted data */
+ int reord;
+ int fack_count;
+ long rtt_us; /* RTT measured by SACKing never-retransmitted data */
+ int flag;
};
/* Check if skb is fully within the SACK block. In presence of GSO skbs,
@@ -1186,7 +1179,8 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
static u8 tcp_sacktag_one(struct sock *sk,
struct tcp_sacktag_state *state, u8 sacked,
u32 start_seq, u32 end_seq,
- int dup_sack, int pcount, u32 xmit_time)
+ int dup_sack, int pcount,
+ const struct skb_mstamp *xmit_time)
{
struct tcp_sock *tp = tcp_sk(sk);
int fack_count = state->fack_count;
@@ -1227,8 +1221,13 @@ static u8 tcp_sacktag_one(struct sock *sk,
if (!after(end_seq, tp->high_seq))
state->flag |= FLAG_ORIG_SACK_ACKED;
/* Pick the earliest sequence sacked for RTT */
- if (state->rtt < 0)
- state->rtt = tcp_time_stamp - xmit_time;
+ if (state->rtt_us < 0) {
+ struct skb_mstamp now;
+
+ skb_mstamp_get(&now);
+ state->rtt_us = skb_mstamp_us_delta(&now,
+ xmit_time);
+ }
}
if (sacked & TCPCB_LOST) {
@@ -1287,7 +1286,7 @@ static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
*/
tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked,
start_seq, end_seq, dup_sack, pcount,
- TCP_SKB_CB(skb)->when);
+ &skb->skb_mstamp);
if (skb == tp->lost_skb_hint)
tp->lost_cnt_hint += pcount;
@@ -1565,7 +1564,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
TCP_SKB_CB(skb)->end_seq,
dup_sack,
tcp_skb_pcount(skb),
- TCP_SKB_CB(skb)->when);
+ &skb->skb_mstamp);
if (!before(TCP_SKB_CB(skb)->seq,
tcp_highest_sack_seq(tp)))
@@ -1622,7 +1621,7 @@ static int tcp_sack_cache_ok(const struct tcp_sock *tp, const struct tcp_sack_bl
static int
tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
- u32 prior_snd_una, s32 *sack_rtt)
+ u32 prior_snd_una, long *sack_rtt_us)
{
struct tcp_sock *tp = tcp_sk(sk);
const unsigned char *ptr = (skb_transport_header(ack_skb) +
@@ -1640,7 +1639,7 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
state.flag = 0;
state.reord = tp->packets_out;
- state.rtt = -1;
+ state.rtt_us = -1L;
if (!tp->sacked_out) {
if (WARN_ON(tp->fackets_out))
@@ -1824,7 +1823,7 @@ out:
WARN_ON((int)tp->retrans_out < 0);
WARN_ON((int)tcp_packets_in_flight(tp) < 0);
#endif
- *sack_rtt = state.rtt;
+ *sack_rtt_us = state.rtt_us;
return state.flag;
}
@@ -2035,10 +2034,12 @@ static bool tcp_pause_early_retransmit(struct sock *sk, int flag)
* available, or RTO is scheduled to fire first.
*/
if (sysctl_tcp_early_retrans < 2 || sysctl_tcp_early_retrans > 3 ||
- (flag & FLAG_ECE) || !tp->srtt)
+ (flag & FLAG_ECE) || !tp->srtt_us)
return false;
- delay = max_t(unsigned long, (tp->srtt >> 5), msecs_to_jiffies(2));
+ delay = max(usecs_to_jiffies(tp->srtt_us >> 5),
+ msecs_to_jiffies(2));
+
if (!time_after(inet_csk(sk)->icsk_timeout, (jiffies + delay)))
return false;
@@ -2885,7 +2886,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
}
static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag,
- s32 seq_rtt, s32 sack_rtt)
+ long seq_rtt_us, long sack_rtt_us)
{
const struct tcp_sock *tp = tcp_sk(sk);
@@ -2895,10 +2896,10 @@ static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag,
* is acked (RFC6298).
*/
if (flag & FLAG_RETRANS_DATA_ACKED)
- seq_rtt = -1;
+ seq_rtt_us = -1L;
- if (seq_rtt < 0)
- seq_rtt = sack_rtt;
+ if (seq_rtt_us < 0)
+ seq_rtt_us = sack_rtt_us;
/* RTTM Rule: A TSecr value received in a segment is used to
* update the averaged RTT measurement only if the segment
@@ -2906,14 +2907,14 @@ static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag,
* left edge of the send window.
* See draft-ietf-tcplw-high-performance-00, section 3.3.
*/
- if (seq_rtt < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
+ if (seq_rtt_us < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
flag & FLAG_ACKED)
- seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr;
+ seq_rtt_us = jiffies_to_usecs(tcp_time_stamp - tp->rx_opt.rcv_tsecr);
- if (seq_rtt < 0)
+ if (seq_rtt_us < 0)
return false;
- tcp_rtt_estimator(sk, seq_rtt);
+ tcp_rtt_estimator(sk, seq_rtt_us);
tcp_set_rto(sk);
/* RFC6298: only reset backoff on valid RTT measurement. */
@@ -2925,16 +2926,16 @@ static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag,
static void tcp_synack_rtt_meas(struct sock *sk, const u32 synack_stamp)
{
struct tcp_sock *tp = tcp_sk(sk);
- s32 seq_rtt = -1;
+ long seq_rtt_us = -1L;
if (synack_stamp && !tp->total_retrans)
- seq_rtt = tcp_time_stamp - synack_stamp;
+ seq_rtt_us = jiffies_to_usecs(tcp_time_stamp - synack_stamp);
/* If the ACK acks both the SYNACK and the (Fast Open'd) data packets
* sent in SYN_RECV, SYNACK RTT is the smooth RTT computed in tcp_ack()
*/
- if (!tp->srtt)
- tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, seq_rtt, -1);
+ if (!tp->srtt_us)
+ tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, seq_rtt_us, -1L);
}
static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight)
@@ -3023,26 +3024,27 @@ static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb)
* arrived at the other end.
*/
static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
- u32 prior_snd_una, s32 sack_rtt)
+ u32 prior_snd_una, long sack_rtt_us)
{
- struct tcp_sock *tp = tcp_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk);
- struct sk_buff *skb;
- u32 now = tcp_time_stamp;
+ struct skb_mstamp first_ackt, last_ackt, now;
+ struct tcp_sock *tp = tcp_sk(sk);
+ u32 prior_sacked = tp->sacked_out;
+ u32 reord = tp->packets_out;
bool fully_acked = true;
- int flag = 0;
+ long ca_seq_rtt_us = -1L;
+ long seq_rtt_us = -1L;
+ struct sk_buff *skb;
u32 pkts_acked = 0;
- u32 reord = tp->packets_out;
- u32 prior_sacked = tp->sacked_out;
- s32 seq_rtt = -1;
- s32 ca_seq_rtt = -1;
- ktime_t last_ackt = net_invalid_timestamp();
bool rtt_update;
+ int flag = 0;
+
+ first_ackt.v64 = 0;
while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) {
struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
- u32 acked_pcount;
u8 sacked = scb->sacked;
+ u32 acked_pcount;
/* Determine how many packets and what bytes were acked, tso and else */
if (after(scb->end_seq, tp->snd_una)) {
@@ -3064,11 +3066,10 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
tp->retrans_out -= acked_pcount;
flag |= FLAG_RETRANS_DATA_ACKED;
} else {
- ca_seq_rtt = now - scb->when;
- last_ackt = skb->tstamp;
- if (seq_rtt < 0) {
- seq_rtt = ca_seq_rtt;
- }
+ last_ackt = skb->skb_mstamp;
+ if (!first_ackt.v64)
+ first_ackt = last_ackt;
+
if (!(sacked & TCPCB_SACKED_ACKED))
reord = min(pkts_acked, reord);
if (!after(scb->end_seq, tp->high_seq))
@@ -3114,7 +3115,13 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
flag |= FLAG_SACK_RENEGING;
- rtt_update = tcp_ack_update_rtt(sk, flag, seq_rtt, sack_rtt);
+ skb_mstamp_get(&now);
+ if (first_ackt.v64) {
+ seq_rtt_us = skb_mstamp_us_delta(&now, &first_ackt);
+ ca_seq_rtt_us = skb_mstamp_us_delta(&now, &last_ackt);
+ }
+
+ rtt_update = tcp_ack_update_rtt(sk, flag, seq_rtt_us, sack_rtt_us);
if (flag & FLAG_ACKED) {
const struct tcp_congestion_ops *ca_ops
@@ -3142,25 +3149,11 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
tp->fackets_out -= min(pkts_acked, tp->fackets_out);
- if (ca_ops->pkts_acked) {
- s32 rtt_us = -1;
-
- /* Is the ACK triggering packet unambiguous? */
- if (!(flag & FLAG_RETRANS_DATA_ACKED)) {
- /* High resolution needed and available? */
- if (ca_ops->flags & TCP_CONG_RTT_STAMP &&
- !ktime_equal(last_ackt,
- net_invalid_timestamp()))
- rtt_us = ktime_us_delta(ktime_get_real(),
- last_ackt);
- else if (ca_seq_rtt >= 0)
- rtt_us = jiffies_to_usecs(ca_seq_rtt);
- }
+ if (ca_ops->pkts_acked)
+ ca_ops->pkts_acked(sk, pkts_acked, ca_seq_rtt_us);
- ca_ops->pkts_acked(sk, pkts_acked, rtt_us);
- }
- } else if (skb && rtt_update && sack_rtt >= 0 &&
- sack_rtt > (s32)(now - TCP_SKB_CB(skb)->when)) {
+ } else if (skb && rtt_update && sack_rtt_us >= 0 &&
+ sack_rtt_us > skb_mstamp_us_delta(&now, &skb->skb_mstamp)) {
/* Do not re-arm RTO if the sack RTT is measured from data sent
* after when the head was last (re)transmitted. Otherwise the
* timeout may continue to extend in loss recovery.
@@ -3370,12 +3363,12 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
u32 ack_seq = TCP_SKB_CB(skb)->seq;
u32 ack = TCP_SKB_CB(skb)->ack_seq;
bool is_dupack = false;
- u32 prior_in_flight, prior_cwnd = tp->snd_cwnd, prior_rtt = tp->srtt;
+ u32 prior_in_flight;
u32 prior_fackets;
int prior_packets = tp->packets_out;
const int prior_unsacked = tp->packets_out - tp->sacked_out;
int acked = 0; /* Number of packets newly acked */
- s32 sack_rtt = -1;
+ long sack_rtt_us = -1L;
/* If the ack is older than previous acks
* then we can probably ignore it.
@@ -3433,7 +3426,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
if (TCP_SKB_CB(skb)->sacked)
flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una,
- &sack_rtt);
+ &sack_rtt_us);
if (TCP_ECN_rcv_ecn_echo(tp, tcp_hdr(skb)))
flag |= FLAG_ECE;
@@ -3452,7 +3445,8 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
/* See if we can take anything off of the retransmit queue. */
acked = tp->packets_out;
- flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una, sack_rtt);
+ flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una,
+ sack_rtt_us);
acked -= tp->packets_out;
/* Advance cwnd if state allows */
@@ -3475,8 +3469,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
if (icsk->icsk_pending == ICSK_TIME_RETRANS)
tcp_schedule_loss_probe(sk);
- if (tp->srtt != prior_rtt || tp->snd_cwnd != prior_cwnd)
- tcp_update_pacing_rate(sk);
+ tcp_update_pacing_rate(sk);
return 1;
no_queue:
@@ -3505,7 +3498,7 @@ old_ack:
*/
if (TCP_SKB_CB(skb)->sacked) {
flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una,
- &sack_rtt);
+ &sack_rtt_us);
tcp_fastretrans_alert(sk, acked, prior_unsacked,
is_dupack, flag);
}
@@ -5401,9 +5394,12 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack,
break;
}
tcp_rearm_rto(sk);
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVEFAIL);
return true;
}
tp->syn_data_acked = tp->syn_data;
+ if (tp->syn_data_acked)
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVE);
return false;
}
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 3cf9765..c4f1d9a 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -435,7 +435,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
break;
icsk->icsk_backoff--;
- inet_csk(sk)->icsk_rto = (tp->srtt ? __tcp_set_rto(tp) :
+ inet_csk(sk)->icsk_rto = (tp->srtt_us ? __tcp_set_rto(tp) :
TCP_TIMEOUT_INIT) << icsk->icsk_backoff;
tcp_bound_rto(sk);
@@ -854,8 +854,10 @@ static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req)
{
int res = tcp_v4_send_synack(sk, NULL, req, 0);
- if (!res)
+ if (!res) {
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
+ }
return res;
}
diff --git a/net/ipv4/tcp_lp.c b/net/ipv4/tcp_lp.c
index 991d62a..c9aecae 100644
--- a/net/ipv4/tcp_lp.c
+++ b/net/ipv4/tcp_lp.c
@@ -315,11 +315,9 @@ static void tcp_lp_pkts_acked(struct sock *sk, u32 num_acked, s32 rtt_us)
}
static struct tcp_congestion_ops tcp_lp __read_mostly = {
- .flags = TCP_CONG_RTT_STAMP,
.init = tcp_lp_init,
.ssthresh = tcp_reno_ssthresh,
.cong_avoid = tcp_lp_cong_avoid,
- .min_cwnd = tcp_reno_min_cwnd,
.pkts_acked = tcp_lp_pkts_acked,
.owner = THIS_MODULE,
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index d547075..dcaf72f 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -33,6 +33,11 @@ struct tcp_fastopen_metrics {
struct tcp_fastopen_cookie cookie;
};
+/* TCP_METRIC_MAX includes 2 extra fields for userspace compatibility
+ * Kernel only stores RTT and RTTVAR in usec resolution
+ */
+#define TCP_METRIC_MAX_KERNEL (TCP_METRIC_MAX - 2)
+
struct tcp_metrics_block {
struct tcp_metrics_block __rcu *tcpm_next;
struct inetpeer_addr tcpm_saddr;
@@ -41,7 +46,7 @@ struct tcp_metrics_block {
u32 tcpm_ts;
u32 tcpm_ts_stamp;
u32 tcpm_lock;
- u32 tcpm_vals[TCP_METRIC_MAX + 1];
+ u32 tcpm_vals[TCP_METRIC_MAX_KERNEL + 1];
struct tcp_fastopen_metrics tcpm_fastopen;
struct rcu_head rcu_head;
@@ -59,12 +64,6 @@ static u32 tcp_metric_get(struct tcp_metrics_block *tm,
return tm->tcpm_vals[idx];
}
-static u32 tcp_metric_get_jiffies(struct tcp_metrics_block *tm,
- enum tcp_metric_index idx)
-{
- return msecs_to_jiffies(tm->tcpm_vals[idx]);
-}
-
static void tcp_metric_set(struct tcp_metrics_block *tm,
enum tcp_metric_index idx,
u32 val)
@@ -72,13 +71,6 @@ static void tcp_metric_set(struct tcp_metrics_block *tm,
tm->tcpm_vals[idx] = val;
}
-static void tcp_metric_set_msecs(struct tcp_metrics_block *tm,
- enum tcp_metric_index idx,
- u32 val)
-{
- tm->tcpm_vals[idx] = jiffies_to_msecs(val);
-}
-
static bool addr_same(const struct inetpeer_addr *a,
const struct inetpeer_addr *b)
{
@@ -101,9 +93,11 @@ struct tcpm_hash_bucket {
static DEFINE_SPINLOCK(tcp_metrics_lock);
-static void tcpm_suck_dst(struct tcp_metrics_block *tm, struct dst_entry *dst,
+static void tcpm_suck_dst(struct tcp_metrics_block *tm,
+ const struct dst_entry *dst,
bool fastopen_clear)
{
+ u32 msval;
u32 val;
tm->tcpm_stamp = jiffies;
@@ -121,8 +115,11 @@ static void tcpm_suck_dst(struct tcp_metrics_block *tm, struct dst_entry *dst,
val |= 1 << TCP_METRIC_REORDERING;
tm->tcpm_lock = val;
- tm->tcpm_vals[TCP_METRIC_RTT] = dst_metric_raw(dst, RTAX_RTT);
- tm->tcpm_vals[TCP_METRIC_RTTVAR] = dst_metric_raw(dst, RTAX_RTTVAR);
+ msval = dst_metric_raw(dst, RTAX_RTT);
+ tm->tcpm_vals[TCP_METRIC_RTT] = msval * USEC_PER_MSEC;
+
+ msval = dst_metric_raw(dst, RTAX_RTTVAR);
+ tm->tcpm_vals[TCP_METRIC_RTTVAR] = msval * USEC_PER_MSEC;
tm->tcpm_vals[TCP_METRIC_SSTHRESH] = dst_metric_raw(dst, RTAX_SSTHRESH);
tm->tcpm_vals[TCP_METRIC_CWND] = dst_metric_raw(dst, RTAX_CWND);
tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING);
@@ -384,7 +381,7 @@ void tcp_update_metrics(struct sock *sk)
dst_confirm(dst);
rcu_read_lock();
- if (icsk->icsk_backoff || !tp->srtt) {
+ if (icsk->icsk_backoff || !tp->srtt_us) {
/* This session failed to estimate rtt. Why?
* Probably, no packets returned in time. Reset our
* results.
@@ -399,8 +396,8 @@ void tcp_update_metrics(struct sock *sk)
if (!tm)
goto out_unlock;
- rtt = tcp_metric_get_jiffies(tm, TCP_METRIC_RTT);
- m = rtt - tp->srtt;
+ rtt = tcp_metric_get(tm, TCP_METRIC_RTT);
+ m = rtt - tp->srtt_us;
/* If newly calculated rtt larger than stored one, store new
* one. Otherwise, use EWMA. Remember, rtt overestimation is
@@ -408,10 +405,10 @@ void tcp_update_metrics(struct sock *sk)
*/
if (!tcp_metric_locked(tm, TCP_METRIC_RTT)) {
if (m <= 0)
- rtt = tp->srtt;
+ rtt = tp->srtt_us;
else
rtt -= (m >> 3);
- tcp_metric_set_msecs(tm, TCP_METRIC_RTT, rtt);
+ tcp_metric_set(tm, TCP_METRIC_RTT, rtt);
}
if (!tcp_metric_locked(tm, TCP_METRIC_RTTVAR)) {
@@ -422,16 +419,16 @@ void tcp_update_metrics(struct sock *sk)
/* Scale deviation to rttvar fixed point */
m >>= 1;
- if (m < tp->mdev)
- m = tp->mdev;
+ if (m < tp->mdev_us)
+ m = tp->mdev_us;
- var = tcp_metric_get_jiffies(tm, TCP_METRIC_RTTVAR);
+ var = tcp_metric_get(tm, TCP_METRIC_RTTVAR);
if (m >= var)
var = m;
else
var -= (var - m) >> 2;
- tcp_metric_set_msecs(tm, TCP_METRIC_RTTVAR, var);
+ tcp_metric_set(tm, TCP_METRIC_RTTVAR, var);
}
if (tcp_in_initial_slowstart(tp)) {
@@ -528,7 +525,7 @@ void tcp_init_metrics(struct sock *sk)
tp->reordering = val;
}
- crtt = tcp_metric_get_jiffies(tm, TCP_METRIC_RTT);
+ crtt = tcp_metric_get(tm, TCP_METRIC_RTT);
rcu_read_unlock();
reset:
/* The initial RTT measurement from the SYN/SYN-ACK is not ideal
@@ -551,18 +548,20 @@ reset:
* to low value, and then abruptly stops to do it and starts to delay
* ACKs, wait for troubles.
*/
- if (crtt > tp->srtt) {
+ if (crtt > tp->srtt_us) {
/* Set RTO like tcp_rtt_estimator(), but from cached RTT. */
- crtt >>= 3;
+ crtt /= 8 * USEC_PER_MSEC;
inet_csk(sk)->icsk_rto = crtt + max(2 * crtt, tcp_rto_min(sk));
- } else if (tp->srtt == 0) {
+ } else if (tp->srtt_us == 0) {
/* RFC6298: 5.7 We've failed to get a valid RTT sample from
* 3WHS. This is most likely due to retransmission,
* including spurious one. Reset the RTO back to 3secs
* from the more aggressive 1sec to avoid more spurious
* retransmission.
*/
- tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_FALLBACK;
+ tp->rttvar_us = jiffies_to_usecs(TCP_TIMEOUT_FALLBACK);
+ tp->mdev_us = tp->mdev_max_us = tp->rttvar_us;
+
inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
}
/* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
@@ -809,10 +808,26 @@ static int tcp_metrics_fill_info(struct sk_buff *msg,
nest = nla_nest_start(msg, TCP_METRICS_ATTR_VALS);
if (!nest)
goto nla_put_failure;
- for (i = 0; i < TCP_METRIC_MAX + 1; i++) {
- if (!tm->tcpm_vals[i])
+ for (i = 0; i < TCP_METRIC_MAX_KERNEL + 1; i++) {
+ u32 val = tm->tcpm_vals[i];
+
+ if (!val)
continue;
- if (nla_put_u32(msg, i + 1, tm->tcpm_vals[i]) < 0)
+ if (i == TCP_METRIC_RTT) {
+ if (nla_put_u32(msg, TCP_METRIC_RTT_US + 1,
+ val) < 0)
+ goto nla_put_failure;
+ n++;
+ val = max(val / 1000, 1U);
+ }
+ if (i == TCP_METRIC_RTTVAR) {
+ if (nla_put_u32(msg, TCP_METRIC_RTTVAR_US + 1,
+ val) < 0)
+ goto nla_put_failure;
+ n++;
+ val = max(val / 1000, 1U);
+ }
+ if (nla_put_u32(msg, i + 1, val) < 0)
goto nla_put_failure;
n++;
}
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 7a436c5..ca788ad 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -398,8 +398,8 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
tcp_init_wl(newtp, treq->rcv_isn);
- newtp->srtt = 0;
- newtp->mdev = TCP_TIMEOUT_INIT;
+ newtp->srtt_us = 0;
+ newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
newicsk->icsk_rto = TCP_TIMEOUT_INIT;
newtp->packets_out = 0;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index f0eb4e3..5286228 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -86,6 +86,9 @@ static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb)
icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
tcp_rearm_rto(sk);
}
+
+ NET_ADD_STATS_BH(sock_net(sk), LINUX_MIB_TCPORIGDATASENT,
+ tcp_skb_pcount(skb));
}
/* SND.NXT, if window was not shrunk.
@@ -269,6 +272,7 @@ EXPORT_SYMBOL(tcp_select_initial_window);
static u16 tcp_select_window(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
+ u32 old_win = tp->rcv_wnd;
u32 cur_win = tcp_receive_window(tp);
u32 new_win = __tcp_select_window(sk);
@@ -281,6 +285,9 @@ static u16 tcp_select_window(struct sock *sk)
*
* Relax Will Robinson.
*/
+ if (new_win == 0)
+ NET_INC_STATS(sock_net(sk),
+ LINUX_MIB_TCPWANTZEROWINDOWADV);
new_win = ALIGN(cur_win, 1 << tp->rx_opt.rcv_wscale);
}
tp->rcv_wnd = new_win;
@@ -298,8 +305,14 @@ static u16 tcp_select_window(struct sock *sk)
new_win >>= tp->rx_opt.rcv_wscale;
/* If we advertise zero window, disable fast path. */
- if (new_win == 0)
+ if (new_win == 0) {
tp->pred_flags = 0;
+ if (old_win)
+ NET_INC_STATS(sock_net(sk),
+ LINUX_MIB_TCPTOZEROWINDOWADV);
+ } else if (old_win == 0) {
+ NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFROMZEROWINDOWADV);
+ }
return new_win;
}
@@ -856,11 +869,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
if (clone_it) {
const struct sk_buff *fclone = skb + 1;
- /* If congestion control is doing timestamping, we must
- * take such a timestamp before we potentially clone/copy.
- */
- if (icsk->icsk_ca_ops->flags & TCP_CONG_RTT_STAMP)
- __net_timestamp(skb);
+ skb_mstamp_get(&skb->skb_mstamp);
if (unlikely(skb->fclone == SKB_FCLONE_ORIG &&
fclone->fclone == SKB_FCLONE_CLONE))
@@ -1964,7 +1973,7 @@ bool tcp_schedule_loss_probe(struct sock *sk)
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
u32 timeout, tlp_time_stamp, rto_time_stamp;
- u32 rtt = tp->srtt >> 3;
+ u32 rtt = usecs_to_jiffies(tp->srtt_us >> 3);
if (WARN_ON(icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS))
return false;
@@ -1986,7 +1995,7 @@ bool tcp_schedule_loss_probe(struct sock *sk)
/* Schedule a loss probe in 2*RTT for SACK capable connections
* in Open state, that are either limited by cwnd or application.
*/
- if (sysctl_tcp_early_retrans < 3 || !tp->srtt || !tp->packets_out ||
+ if (sysctl_tcp_early_retrans < 3 || !tp->srtt_us || !tp->packets_out ||
!tcp_is_sack(tp) || inet_csk(sk)->icsk_ca_state != TCP_CA_Open)
return false;
@@ -2071,7 +2080,6 @@ rearm_timer:
if (likely(!err))
NET_INC_STATS_BH(sock_net(sk),
LINUX_MIB_TCPLOSSPROBES);
- return;
}
/* Push out any pending frames which were held back due to
@@ -2169,7 +2177,8 @@ u32 __tcp_select_window(struct sock *sk)
*/
int mss = icsk->icsk_ack.rcv_mss;
int free_space = tcp_space(sk);
- int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk));
+ int allowed_space = tcp_full_space(sk);
+ int full_space = min_t(int, tp->window_clamp, allowed_space);
int window;
if (mss > full_space)
@@ -2182,7 +2191,19 @@ u32 __tcp_select_window(struct sock *sk)
tp->rcv_ssthresh = min(tp->rcv_ssthresh,
4U * tp->advmss);
- if (free_space < mss)
+ /* free_space might become our new window, make sure we don't
+ * increase it due to wscale.
+ */
+ free_space = round_down(free_space, 1 << tp->rx_opt.rcv_wscale);
+
+ /* if free space is less than mss estimate, or is below 1/16th
+ * of the maximum allowed, try to move to zero-window, else
+ * tcp_clamp_window() will grow rcv buf up to tcp_rmem[2], and
+ * new incoming data is dropped due to memory limits.
+ * With large window, mss test triggers way too late in order
+ * to announce zero window in time before rmem limit kicks in.
+ */
+ if (free_space < (allowed_space >> 4) || free_space < mss)
return 0;
}
@@ -2420,7 +2441,8 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
if (err == 0) {
/* Update global TCP statistics. */
TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
-
+ if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
tp->total_retrans++;
#if FASTRETRANS_DEBUG > 0
@@ -2950,7 +2972,7 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
if (tcp_transmit_skb(sk, syn_data, 0, sk->sk_allocation) == 0) {
tp->syn_data = (fo->copied > 0);
- NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENACTIVE);
+ NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT);
goto done;
}
syn_data = NULL;
@@ -3038,8 +3060,9 @@ void tcp_send_delayed_ack(struct sock *sk)
* Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements
* directly.
*/
- if (tp->srtt) {
- int rtt = max(tp->srtt >> 3, TCP_DELACK_MIN);
+ if (tp->srtt_us) {
+ int rtt = max_t(int, usecs_to_jiffies(tp->srtt_us >> 3),
+ TCP_DELACK_MIN);
if (rtt < max_ato)
max_ato = rtt;
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
index 1f2d376..3b66610 100644
--- a/net/ipv4/tcp_probe.c
+++ b/net/ipv4/tcp_probe.c
@@ -154,7 +154,7 @@ static void jtcp_rcv_established(struct sock *sk, struct sk_buff *skb,
p->snd_wnd = tp->snd_wnd;
p->rcv_wnd = tp->rcv_wnd;
p->ssthresh = tcp_current_ssthresh(sk);
- p->srtt = tp->srtt >> 3;
+ p->srtt = tp->srtt_us >> 3;
tcp_probe.head = (tcp_probe.head + 1) & (bufsize - 1);
}
diff --git a/net/ipv4/tcp_scalable.c b/net/ipv4/tcp_scalable.c
index 19ea6c2..0ac5083 100644
--- a/net/ipv4/tcp_scalable.c
+++ b/net/ipv4/tcp_scalable.c
@@ -39,7 +39,6 @@ static u32 tcp_scalable_ssthresh(struct sock *sk)
static struct tcp_congestion_ops tcp_scalable __read_mostly = {
.ssthresh = tcp_scalable_ssthresh,
.cong_avoid = tcp_scalable_cong_avoid,
- .min_cwnd = tcp_reno_min_cwnd,
.owner = THIS_MODULE,
.name = "scalable",
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 64f0354..286227a 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -165,6 +165,9 @@ static int tcp_write_timeout(struct sock *sk)
dst_negative_advice(sk);
if (tp->syn_fastopen || tp->syn_data)
tcp_fastopen_cache_set(sk, 0, NULL, true);
+ if (tp->syn_data)
+ NET_INC_STATS_BH(sock_net(sk),
+ LINUX_MIB_TCPFASTOPENACTIVEFAIL);
}
retry_until = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
syn_set = true;
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c
index 06cae62..48539ff 100644
--- a/net/ipv4/tcp_vegas.c
+++ b/net/ipv4/tcp_vegas.c
@@ -306,11 +306,9 @@ void tcp_vegas_get_info(struct sock *sk, u32 ext, struct sk_buff *skb)
EXPORT_SYMBOL_GPL(tcp_vegas_get_info);
static struct tcp_congestion_ops tcp_vegas __read_mostly = {
- .flags = TCP_CONG_RTT_STAMP,
.init = tcp_vegas_init,
.ssthresh = tcp_reno_ssthresh,
.cong_avoid = tcp_vegas_cong_avoid,
- .min_cwnd = tcp_reno_min_cwnd,
.pkts_acked = tcp_vegas_pkts_acked,
.set_state = tcp_vegas_state,
.cwnd_event = tcp_vegas_cwnd_event,
diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c
index 326475a..1b8e28f 100644
--- a/net/ipv4/tcp_veno.c
+++ b/net/ipv4/tcp_veno.c
@@ -203,7 +203,6 @@ static u32 tcp_veno_ssthresh(struct sock *sk)
}
static struct tcp_congestion_ops tcp_veno __read_mostly = {
- .flags = TCP_CONG_RTT_STAMP,
.init = tcp_veno_init,
.ssthresh = tcp_veno_ssthresh,
.cong_avoid = tcp_veno_cong_avoid,
diff --git a/net/ipv4/tcp_westwood.c b/net/ipv4/tcp_westwood.c
index 76a1e23..b94a04a 100644
--- a/net/ipv4/tcp_westwood.c
+++ b/net/ipv4/tcp_westwood.c
@@ -276,7 +276,6 @@ static struct tcp_congestion_ops tcp_westwood __read_mostly = {
.init = tcp_westwood_init,
.ssthresh = tcp_reno_ssthresh,
.cong_avoid = tcp_reno_cong_avoid,
- .min_cwnd = tcp_westwood_bw_rttmin,
.cwnd_event = tcp_westwood_event,
.get_info = tcp_westwood_info,
.pkts_acked = tcp_westwood_pkts_acked,
diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c
index 1a8d271..5ede0e7 100644
--- a/net/ipv4/tcp_yeah.c
+++ b/net/ipv4/tcp_yeah.c
@@ -227,11 +227,9 @@ static u32 tcp_yeah_ssthresh(struct sock *sk) {
}
static struct tcp_congestion_ops tcp_yeah __read_mostly = {
- .flags = TCP_CONG_RTT_STAMP,
.init = tcp_yeah_init,
.ssthresh = tcp_yeah_ssthresh,
.cong_avoid = tcp_yeah_cong_avoid,
- .min_cwnd = tcp_reno_min_cwnd,
.set_state = tcp_vegas_state,
.cwnd_event = tcp_vegas_cwnd_event,
.get_info = tcp_vegas_get_info,
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 77bd16f..4468e1a 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -931,7 +931,8 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
sock_tx_timestamp(sk, &ipc.tx_flags);
if (msg->msg_controllen) {
- err = ip_cmsg_send(sock_net(sk), msg, &ipc);
+ err = ip_cmsg_send(sock_net(sk), msg, &ipc,
+ sk->sk_family == AF_INET6);
if (err)
return err;
if (ipc.opt)
diff --git a/net/ipv4/xfrm4_input.c b/net/ipv4/xfrm4_input.c
index 1f12c8b..aac6197 100644
--- a/net/ipv4/xfrm4_input.c
+++ b/net/ipv4/xfrm4_input.c
@@ -37,15 +37,6 @@ drop:
return NET_RX_DROP;
}
-int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
- int encap_type)
-{
- XFRM_SPI_SKB_CB(skb)->family = AF_INET;
- XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
- return xfrm_input(skb, nexthdr, spi, encap_type);
-}
-EXPORT_SYMBOL(xfrm4_rcv_encap);
-
int xfrm4_transport_finish(struct sk_buff *skb, int async)
{
struct iphdr *iph = ip_hdr(skb);
diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c
index 31b1815..05f2b48 100644
--- a/net/ipv4/xfrm4_mode_tunnel.c
+++ b/net/ipv4/xfrm4_mode_tunnel.c
@@ -15,65 +15,6 @@
#include <net/ip.h>
#include <net/xfrm.h>
-/* Informational hook. The decap is still done here. */
-static struct xfrm_tunnel_notifier __rcu *rcv_notify_handlers __read_mostly;
-static DEFINE_MUTEX(xfrm4_mode_tunnel_input_mutex);
-
-int xfrm4_mode_tunnel_input_register(struct xfrm_tunnel_notifier *handler)
-{
- struct xfrm_tunnel_notifier __rcu **pprev;
- struct xfrm_tunnel_notifier *t;
- int ret = -EEXIST;
- int priority = handler->priority;
-
- mutex_lock(&xfrm4_mode_tunnel_input_mutex);
-
- for (pprev = &rcv_notify_handlers;
- (t = rcu_dereference_protected(*pprev,
- lockdep_is_held(&xfrm4_mode_tunnel_input_mutex))) != NULL;
- pprev = &t->next) {
- if (t->priority > priority)
- break;
- if (t->priority == priority)
- goto err;
-
- }
-
- handler->next = *pprev;
- rcu_assign_pointer(*pprev, handler);
-
- ret = 0;
-
-err:
- mutex_unlock(&xfrm4_mode_tunnel_input_mutex);
- return ret;
-}
-EXPORT_SYMBOL_GPL(xfrm4_mode_tunnel_input_register);
-
-int xfrm4_mode_tunnel_input_deregister(struct xfrm_tunnel_notifier *handler)
-{
- struct xfrm_tunnel_notifier __rcu **pprev;
- struct xfrm_tunnel_notifier *t;
- int ret = -ENOENT;
-
- mutex_lock(&xfrm4_mode_tunnel_input_mutex);
- for (pprev = &rcv_notify_handlers;
- (t = rcu_dereference_protected(*pprev,
- lockdep_is_held(&xfrm4_mode_tunnel_input_mutex))) != NULL;
- pprev = &t->next) {
- if (t == handler) {
- *pprev = handler->next;
- ret = 0;
- break;
- }
- }
- mutex_unlock(&xfrm4_mode_tunnel_input_mutex);
- synchronize_net();
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(xfrm4_mode_tunnel_input_deregister);
-
static inline void ipip_ecn_decapsulate(struct sk_buff *skb)
{
struct iphdr *inner_iph = ipip_hdr(skb);
@@ -127,14 +68,8 @@ static int xfrm4_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb)
return 0;
}
-#define for_each_input_rcu(head, handler) \
- for (handler = rcu_dereference(head); \
- handler != NULL; \
- handler = rcu_dereference(handler->next))
-
static int xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
{
- struct xfrm_tunnel_notifier *handler;
int err = -EINVAL;
if (XFRM_MODE_SKB_CB(skb)->protocol != IPPROTO_IPIP)
@@ -143,9 +78,6 @@ static int xfrm4_mode_tunnel_input(struct xfrm_state *x, struct sk_buff *skb)
if (!pskb_may_pull(skb, sizeof(struct iphdr)))
goto out;
- for_each_input_rcu(rcv_notify_handlers, handler)
- handler->handler(skb);
-
err = skb_unclone(skb, GFP_ATOMIC);
if (err)
goto out;
diff --git a/net/ipv4/xfrm4_protocol.c b/net/ipv4/xfrm4_protocol.c
new file mode 100644
index 0000000..cdc09ef
--- /dev/null
+++ b/net/ipv4/xfrm4_protocol.c
@@ -0,0 +1,275 @@
+/* xfrm4_protocol.c - Generic xfrm protocol multiplexer.
+ *
+ * Copyright (C) 2013 secunet Security Networks AG
+ *
+ * Author:
+ * Steffen Klassert <steffen.klassert@secunet.com>
+ *
+ * Based on:
+ * net/ipv4/tunnel4.c
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/skbuff.h>
+#include <net/icmp.h>
+#include <net/ip.h>
+#include <net/protocol.h>
+#include <net/xfrm.h>
+
+static struct xfrm4_protocol __rcu *esp4_handlers __read_mostly;
+static struct xfrm4_protocol __rcu *ah4_handlers __read_mostly;
+static struct xfrm4_protocol __rcu *ipcomp4_handlers __read_mostly;
+static DEFINE_MUTEX(xfrm4_protocol_mutex);
+
+static inline struct xfrm4_protocol __rcu **proto_handlers(u8 protocol)
+{
+ switch (protocol) {
+ case IPPROTO_ESP:
+ return &esp4_handlers;
+ case IPPROTO_AH:
+ return &ah4_handlers;
+ case IPPROTO_COMP:
+ return &ipcomp4_handlers;
+ }
+
+ return NULL;
+}
+
+#define for_each_protocol_rcu(head, handler) \
+ for (handler = rcu_dereference(head); \
+ handler != NULL; \
+ handler = rcu_dereference(handler->next)) \
+
+int xfrm4_rcv_cb(struct sk_buff *skb, u8 protocol, int err)
+{
+ int ret;
+ struct xfrm4_protocol *handler;
+
+ for_each_protocol_rcu(*proto_handlers(protocol), handler)
+ if ((ret = handler->cb_handler(skb, err)) <= 0)
+ return ret;
+
+ return 0;
+}
+EXPORT_SYMBOL(xfrm4_rcv_cb);
+
+int xfrm4_rcv_encap(struct sk_buff *skb, int nexthdr, __be32 spi,
+ int encap_type)
+{
+ int ret;
+ struct xfrm4_protocol *handler;
+
+ XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
+ XFRM_SPI_SKB_CB(skb)->family = AF_INET;
+ XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
+
+ for_each_protocol_rcu(*proto_handlers(nexthdr), handler)
+ if ((ret = handler->input_handler(skb, nexthdr, spi, encap_type)) != -EINVAL)
+ return ret;
+
+ icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
+
+ kfree_skb(skb);
+ return 0;
+}
+EXPORT_SYMBOL(xfrm4_rcv_encap);
+
+static int xfrm4_esp_rcv(struct sk_buff *skb)
+{
+ int ret;
+ struct xfrm4_protocol *handler;
+
+ XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
+
+ for_each_protocol_rcu(esp4_handlers, handler)
+ if ((ret = handler->handler(skb)) != -EINVAL)
+ return ret;
+
+ icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
+
+ kfree_skb(skb);
+ return 0;
+}
+
+static void xfrm4_esp_err(struct sk_buff *skb, u32 info)
+{
+ struct xfrm4_protocol *handler;
+
+ for_each_protocol_rcu(esp4_handlers, handler)
+ if (!handler->err_handler(skb, info))
+ break;
+}
+
+static int xfrm4_ah_rcv(struct sk_buff *skb)
+{
+ int ret;
+ struct xfrm4_protocol *handler;
+
+ XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
+
+ for_each_protocol_rcu(ah4_handlers, handler)
+ if ((ret = handler->handler(skb)) != -EINVAL)
+ return ret;;
+
+ icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
+
+ kfree_skb(skb);
+ return 0;
+}
+
+static void xfrm4_ah_err(struct sk_buff *skb, u32 info)
+{
+ struct xfrm4_protocol *handler;
+
+ for_each_protocol_rcu(ah4_handlers, handler)
+ if (!handler->err_handler(skb, info))
+ break;
+}
+
+static int xfrm4_ipcomp_rcv(struct sk_buff *skb)
+{
+ int ret;
+ struct xfrm4_protocol *handler;
+
+ XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = NULL;
+
+ for_each_protocol_rcu(ipcomp4_handlers, handler)
+ if ((ret = handler->handler(skb)) != -EINVAL)
+ return ret;
+
+ icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
+
+ kfree_skb(skb);
+ return 0;
+}
+
+static void xfrm4_ipcomp_err(struct sk_buff *skb, u32 info)
+{
+ struct xfrm4_protocol *handler;
+
+ for_each_protocol_rcu(ipcomp4_handlers, handler)
+ if (!handler->err_handler(skb, info))
+ break;
+}
+
+static const struct net_protocol esp4_protocol = {
+ .handler = xfrm4_esp_rcv,
+ .err_handler = xfrm4_esp_err,
+ .no_policy = 1,
+ .netns_ok = 1,
+};
+
+static const struct net_protocol ah4_protocol = {
+ .handler = xfrm4_ah_rcv,
+ .err_handler = xfrm4_ah_err,
+ .no_policy = 1,
+ .netns_ok = 1,
+};
+
+static const struct net_protocol ipcomp4_protocol = {
+ .handler = xfrm4_ipcomp_rcv,
+ .err_handler = xfrm4_ipcomp_err,
+ .no_policy = 1,
+ .netns_ok = 1,
+};
+
+static inline const struct net_protocol *netproto(unsigned char protocol)
+{
+ switch (protocol) {
+ case IPPROTO_ESP:
+ return &esp4_protocol;
+ case IPPROTO_AH:
+ return &ah4_protocol;
+ case IPPROTO_COMP:
+ return &ipcomp4_protocol;
+ }
+
+ return NULL;
+}
+
+int xfrm4_protocol_register(struct xfrm4_protocol *handler,
+ unsigned char protocol)
+{
+ struct xfrm4_protocol __rcu **pprev;
+ struct xfrm4_protocol *t;
+ bool add_netproto = false;
+
+ int ret = -EEXIST;
+ int priority = handler->priority;
+
+ mutex_lock(&xfrm4_protocol_mutex);
+
+ if (!rcu_dereference_protected(*proto_handlers(protocol),
+ lockdep_is_held(&xfrm4_protocol_mutex)))
+ add_netproto = true;
+
+ for (pprev = proto_handlers(protocol);
+ (t = rcu_dereference_protected(*pprev,
+ lockdep_is_held(&xfrm4_protocol_mutex))) != NULL;
+ pprev = &t->next) {
+ if (t->priority < priority)
+ break;
+ if (t->priority == priority)
+ goto err;
+ }
+
+ handler->next = *pprev;
+ rcu_assign_pointer(*pprev, handler);
+
+ ret = 0;
+
+err:
+ mutex_unlock(&xfrm4_protocol_mutex);
+
+ if (add_netproto) {
+ if (inet_add_protocol(netproto(protocol), protocol)) {
+ pr_err("%s: can't add protocol\n", __func__);
+ ret = -EAGAIN;
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(xfrm4_protocol_register);
+
+int xfrm4_protocol_deregister(struct xfrm4_protocol *handler,
+ unsigned char protocol)
+{
+ struct xfrm4_protocol __rcu **pprev;
+ struct xfrm4_protocol *t;
+ int ret = -ENOENT;
+
+ mutex_lock(&xfrm4_protocol_mutex);
+
+ for (pprev = proto_handlers(protocol);
+ (t = rcu_dereference_protected(*pprev,
+ lockdep_is_held(&xfrm4_protocol_mutex))) != NULL;
+ pprev = &t->next) {
+ if (t == handler) {
+ *pprev = handler->next;
+ ret = 0;
+ break;
+ }
+ }
+
+ if (!rcu_dereference_protected(*proto_handlers(protocol),
+ lockdep_is_held(&xfrm4_protocol_mutex))) {
+ if (inet_del_protocol(netproto(protocol), protocol) < 0) {
+ pr_err("%s: can't remove protocol\n", __func__);
+ ret = -EAGAIN;
+ }
+ }
+
+ mutex_unlock(&xfrm4_protocol_mutex);
+
+ synchronize_net();
+
+ return ret;
+}
+EXPORT_SYMBOL(xfrm4_protocol_deregister);
diff --git a/net/ipv6/addrlabel.c b/net/ipv6/addrlabel.c
index b30ad37..731e1e1 100644
--- a/net/ipv6/addrlabel.c
+++ b/net/ipv6/addrlabel.c
@@ -6,7 +6,7 @@
*/
/*
* Author:
- * YOSHIFUJI Hideaki @ USAGI/WIDE Project <yoshfuji@linux-ipv6.org>
+ * YOSHIFUJI Hideaki @ USAGI/WIDE Project <yoshfuji@linux-ipv6.org>
*/
#include <linux/kernel.h>
@@ -22,14 +22,13 @@
#if 0
#define ADDRLABEL(x...) printk(x)
#else
-#define ADDRLABEL(x...) do { ; } while(0)
+#define ADDRLABEL(x...) do { ; } while (0)
#endif
/*
* Policy Table
*/
-struct ip6addrlbl_entry
-{
+struct ip6addrlbl_entry {
#ifdef CONFIG_NET_NS
struct net *lbl_net;
#endif
@@ -88,39 +87,39 @@ static const __net_initconst struct ip6addrlbl_init_table
{ /* ::/0 */
.prefix = &in6addr_any,
.label = 1,
- },{ /* fc00::/7 */
- .prefix = &(struct in6_addr){{{ 0xfc }}},
+ }, { /* fc00::/7 */
+ .prefix = &(struct in6_addr){ { { 0xfc } } } ,
.prefixlen = 7,
.label = 5,
- },{ /* fec0::/10 */
- .prefix = &(struct in6_addr){{{ 0xfe, 0xc0 }}},
+ }, { /* fec0::/10 */
+ .prefix = &(struct in6_addr){ { { 0xfe, 0xc0 } } },
.prefixlen = 10,
.label = 11,
- },{ /* 2002::/16 */
- .prefix = &(struct in6_addr){{{ 0x20, 0x02 }}},
+ }, { /* 2002::/16 */
+ .prefix = &(struct in6_addr){ { { 0x20, 0x02 } } },
.prefixlen = 16,
.label = 2,
- },{ /* 3ffe::/16 */
- .prefix = &(struct in6_addr){{{ 0x3f, 0xfe }}},
+ }, { /* 3ffe::/16 */
+ .prefix = &(struct in6_addr){ { { 0x3f, 0xfe } } },
.prefixlen = 16,
.label = 12,
- },{ /* 2001::/32 */
- .prefix = &(struct in6_addr){{{ 0x20, 0x01 }}},
+ }, { /* 2001::/32 */
+ .prefix = &(struct in6_addr){ { { 0x20, 0x01 } } },
.prefixlen = 32,
.label = 6,
- },{ /* 2001:10::/28 */
- .prefix = &(struct in6_addr){{{ 0x20, 0x01, 0x00, 0x10 }}},
+ }, { /* 2001:10::/28 */
+ .prefix = &(struct in6_addr){ { { 0x20, 0x01, 0x00, 0x10 } } },
.prefixlen = 28,
.label = 7,
- },{ /* ::ffff:0:0 */
- .prefix = &(struct in6_addr){{{ [10] = 0xff, [11] = 0xff }}},
+ }, { /* ::ffff:0:0 */
+ .prefix = &(struct in6_addr){ { { [10] = 0xff, [11] = 0xff } } },
.prefixlen = 96,
.label = 4,
- },{ /* ::/96 */
+ }, { /* ::/96 */
.prefix = &in6addr_any,
.prefixlen = 96,
.label = 3,
- },{ /* ::1/128 */
+ }, { /* ::1/128 */
.prefix = &in6addr_loopback,
.prefixlen = 128,
.label = 0,
@@ -441,7 +440,7 @@ static int ip6addrlbl_newdel(struct sk_buff *skb, struct nlmsghdr *nlh)
if (label == IPV6_ADDR_LABEL_DEFAULT)
return -EINVAL;
- switch(nlh->nlmsg_type) {
+ switch (nlh->nlmsg_type) {
case RTM_NEWADDRLABEL:
if (ifal->ifal_index &&
!__dev_get_by_index(net, ifal->ifal_index))
@@ -505,12 +504,13 @@ static int ip6addrlbl_dump(struct sk_buff *skb, struct netlink_callback *cb)
hlist_for_each_entry_rcu(p, &ip6addrlbl_table.head, list) {
if (idx >= s_idx &&
net_eq(ip6addrlbl_net(p), net)) {
- if ((err = ip6addrlbl_fill(skb, p,
- ip6addrlbl_table.seq,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- RTM_NEWADDRLABEL,
- NLM_F_MULTI)) <= 0)
+ err = ip6addrlbl_fill(skb, p,
+ ip6addrlbl_table.seq,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ RTM_NEWADDRLABEL,
+ NLM_F_MULTI);
+ if (err <= 0)
break;
}
idx++;
@@ -527,7 +527,7 @@ static inline int ip6addrlbl_msgsize(void)
+ nla_total_size(4); /* IFAL_LABEL */
}
-static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr* nlh)
+static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr *nlh)
{
struct net *net = sock_net(in_skb->sk);
struct ifaddrlblmsg *ifal;
@@ -568,7 +568,8 @@ static int ip6addrlbl_get(struct sk_buff *in_skb, struct nlmsghdr* nlh)
goto out;
}
- if (!(skb = nlmsg_new(ip6addrlbl_msgsize(), GFP_KERNEL))) {
+ skb = nlmsg_new(ip6addrlbl_msgsize(), GFP_KERNEL);
+ if (!skb) {
ip6addrlbl_put(p);
return -ENOBUFS;
}
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index 81e496a..6c5f094 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -346,6 +346,10 @@ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb)
struct ip_auth_hdr *ah;
struct ah_data *ahp;
struct tmp_ext *iph_ext;
+ int seqhi_len = 0;
+ __be32 *seqhi;
+ int sglists = 0;
+ struct scatterlist *seqhisg;
ahp = x->data;
ahash = ahp->ahash;
@@ -359,15 +363,22 @@ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb)
if (extlen)
extlen += sizeof(*iph_ext);
+ if (x->props.flags & XFRM_STATE_ESN) {
+ sglists = 1;
+ seqhi_len = sizeof(*seqhi);
+ }
err = -ENOMEM;
- iph_base = ah_alloc_tmp(ahash, nfrags, IPV6HDR_BASELEN + extlen);
+ iph_base = ah_alloc_tmp(ahash, nfrags + sglists, IPV6HDR_BASELEN +
+ extlen + seqhi_len);
if (!iph_base)
goto out;
iph_ext = ah_tmp_ext(iph_base);
- icv = ah_tmp_icv(ahash, iph_ext, extlen);
+ seqhi = (__be32 *)((char *)iph_ext + extlen);
+ icv = ah_tmp_icv(ahash, seqhi, seqhi_len);
req = ah_tmp_req(ahash, icv);
sg = ah_req_sg(ahash, req);
+ seqhisg = sg + nfrags;
ah = ip_auth_hdr(skb);
memset(ah->auth_data, 0, ahp->icv_trunc_len);
@@ -411,10 +422,15 @@ static int ah6_output(struct xfrm_state *x, struct sk_buff *skb)
ah->spi = x->id.spi;
ah->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
- sg_init_table(sg, nfrags);
- skb_to_sgvec(skb, sg, 0, skb->len);
+ sg_init_table(sg, nfrags + sglists);
+ skb_to_sgvec_nomark(skb, sg, 0, skb->len);
- ahash_request_set_crypt(req, sg, icv, skb->len);
+ if (x->props.flags & XFRM_STATE_ESN) {
+ /* Attach seqhi sg right after packet payload */
+ *seqhi = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
+ sg_set_buf(seqhisg, seqhi, seqhi_len);
+ }
+ ahash_request_set_crypt(req, sg, icv, skb->len + seqhi_len);
ahash_request_set_callback(req, 0, ah6_output_done, skb);
AH_SKB_CB(skb)->tmp = iph_base;
@@ -514,6 +530,10 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
int nexthdr;
int nfrags;
int err = -ENOMEM;
+ int seqhi_len = 0;
+ __be32 *seqhi;
+ int sglists = 0;
+ struct scatterlist *seqhisg;
if (!pskb_may_pull(skb, sizeof(struct ip_auth_hdr)))
goto out;
@@ -550,14 +570,22 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
skb_push(skb, hdr_len);
- work_iph = ah_alloc_tmp(ahash, nfrags, hdr_len + ahp->icv_trunc_len);
+ if (x->props.flags & XFRM_STATE_ESN) {
+ sglists = 1;
+ seqhi_len = sizeof(*seqhi);
+ }
+
+ work_iph = ah_alloc_tmp(ahash, nfrags + sglists, hdr_len +
+ ahp->icv_trunc_len + seqhi_len);
if (!work_iph)
goto out;
- auth_data = ah_tmp_auth(work_iph, hdr_len);
- icv = ah_tmp_icv(ahash, auth_data, ahp->icv_trunc_len);
+ auth_data = ah_tmp_auth((u8 *)work_iph, hdr_len);
+ seqhi = (__be32 *)(auth_data + ahp->icv_trunc_len);
+ icv = ah_tmp_icv(ahash, seqhi, seqhi_len);
req = ah_tmp_req(ahash, icv);
sg = ah_req_sg(ahash, req);
+ seqhisg = sg + nfrags;
memcpy(work_iph, ip6h, hdr_len);
memcpy(auth_data, ah->auth_data, ahp->icv_trunc_len);
@@ -572,10 +600,16 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
ip6h->flow_lbl[2] = 0;
ip6h->hop_limit = 0;
- sg_init_table(sg, nfrags);
- skb_to_sgvec(skb, sg, 0, skb->len);
+ sg_init_table(sg, nfrags + sglists);
+ skb_to_sgvec_nomark(skb, sg, 0, skb->len);
+
+ if (x->props.flags & XFRM_STATE_ESN) {
+ /* Attach seqhi sg right after packet payload */
+ *seqhi = XFRM_SKB_CB(skb)->seq.input.hi;
+ sg_set_buf(seqhisg, seqhi, seqhi_len);
+ }
- ahash_request_set_crypt(req, sg, icv, skb->len);
+ ahash_request_set_crypt(req, sg, icv, skb->len + seqhi_len);
ahash_request_set_callback(req, 0, ah6_input_done, skb);
AH_SKB_CB(skb)->tmp = work_iph;
diff --git a/net/ipv6/ip6_checksum.c b/net/ipv6/ip6_checksum.c
index 72d198b..ee7a97f 100644
--- a/net/ipv6/ip6_checksum.c
+++ b/net/ipv6/ip6_checksum.c
@@ -79,7 +79,9 @@ int udp6_csum_init(struct sk_buff *skb, struct udphdr *uh, int proto)
/* RFC 2460 section 8.1 says that we SHOULD log
this error. Well, it is reasonable.
*/
- LIMIT_NETDEBUG(KERN_INFO "IPv6: udp checksum is 0\n");
+ LIMIT_NETDEBUG(KERN_INFO "IPv6: udp checksum is 0 for [%pI6c]:%u->[%pI6c]:%u\n",
+ &ipv6_hdr(skb)->saddr, ntohs(uh->source),
+ &ipv6_hdr(skb)->daddr, ntohs(uh->dest));
return 1;
}
if (skb->ip_summed == CHECKSUM_COMPLETE &&
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index dfa41bb..0961b5e 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -15,9 +15,7 @@
#include <linux/socket.h>
#include <linux/net.h>
#include <linux/netdevice.h>
-#include <linux/if_arp.h>
#include <linux/in6.h>
-#include <linux/route.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
@@ -28,12 +26,8 @@
#include <net/sock.h>
#include <net/ipv6.h>
-#include <net/ndisc.h>
-#include <net/protocol.h>
-#include <net/ip6_route.h>
#include <net/addrconf.h>
#include <net/rawv6.h>
-#include <net/icmp.h>
#include <net/transp_v6.h>
#include <asm/uaccess.h>
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index f3ffb43..c98338b 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -1454,7 +1454,6 @@ static void ip6gre_netlink_parms(struct nlattr *data[],
static int ip6gre_tap_init(struct net_device *dev)
{
struct ip6_tnl *tunnel;
- int i;
tunnel = netdev_priv(dev);
@@ -1464,16 +1463,10 @@ static int ip6gre_tap_init(struct net_device *dev)
ip6gre_tnl_link_config(tunnel, 1);
- dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
+ dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!dev->tstats)
return -ENOMEM;
- for_each_possible_cpu(i) {
- struct pcpu_sw_netstats *ip6gre_tap_stats;
- ip6gre_tap_stats = per_cpu_ptr(dev->tstats, i);
- u64_stats_init(&ip6gre_tap_stats->syncp);
- }
-
return 0;
}
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 16f91a2..2bc1070 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1231,8 +1231,10 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
sizeof(struct frag_hdr) : 0) +
rt->rt6i_nfheader_len;
- maxnonfragsize = (np->pmtudisc >= IPV6_PMTUDISC_DO) ?
- mtu : sizeof(struct ipv6hdr) + IPV6_MAXPLEN;
+ if (ip6_sk_local_df(sk))
+ maxnonfragsize = sizeof(struct ipv6hdr) + IPV6_MAXPLEN;
+ else
+ maxnonfragsize = mtu;
/* dontfrag active */
if ((cork->length + length > mtu - headersize) && dontfrag &&
@@ -1540,8 +1542,7 @@ int ip6_push_pending_frames(struct sock *sk)
}
/* Allow local fragmentation. */
- if (np->pmtudisc < IPV6_PMTUDISC_DO)
- skb->local_df = 1;
+ skb->local_df = ip6_sk_local_df(sk);
*final_dst = fl6->daddr;
__skb_pull(skb, skb_network_header_len(skb));
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 5db8d31..8ad59f4 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1502,19 +1502,12 @@ static inline int
ip6_tnl_dev_init_gen(struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
- int i;
t->dev = dev;
t->net = dev_net(dev);
- dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
+ dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!dev->tstats)
return -ENOMEM;
-
- for_each_possible_cpu(i) {
- struct pcpu_sw_netstats *ip6_tnl_stats;
- ip6_tnl_stats = per_cpu_ptr(dev->tstats, i);
- u64_stats_init(&ip6_tnl_stats->syncp);
- }
return 0;
}
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index 2d19272..8649143 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -731,18 +731,12 @@ static void vti6_dev_setup(struct net_device *dev)
static inline int vti6_dev_init_gen(struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
- int i;
t->dev = dev;
t->net = dev_net(dev);
- dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
+ dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!dev->tstats)
return -ENOMEM;
- for_each_possible_cpu(i) {
- struct pcpu_sw_netstats *stats;
- stats = per_cpu_ptr(dev->tstats, i);
- u64_stats_init(&stats->syncp);
- }
return 0;
}
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 0a00f44..edb58af 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -722,7 +722,7 @@ done:
case IPV6_MTU_DISCOVER:
if (optlen < sizeof(int))
goto e_inval;
- if (val < IPV6_PMTUDISC_DONT || val > IPV6_PMTUDISC_INTERFACE)
+ if (val < IPV6_PMTUDISC_DONT || val > IPV6_PMTUDISC_OMIT)
goto e_inval;
np->pmtudisc = val;
retv = 0;
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index b4d74c8..1693c8d 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -1363,7 +1363,6 @@ static void ipip6_tunnel_setup(struct net_device *dev)
static int ipip6_tunnel_init(struct net_device *dev)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
- int i;
tunnel->dev = dev;
tunnel->net = dev_net(dev);
@@ -1372,16 +1371,10 @@ static int ipip6_tunnel_init(struct net_device *dev)
memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
ipip6_tunnel_bind_dev(dev);
- dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
+ dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!dev->tstats)
return -ENOMEM;
- for_each_possible_cpu(i) {
- struct pcpu_sw_netstats *ipip6_tunnel_stats;
- ipip6_tunnel_stats = per_cpu_ptr(dev->tstats, i);
- u64_stats_init(&ipip6_tunnel_stats->syncp);
- }
-
tunnel->dst_cache = alloc_percpu(struct ip_tunnel_dst);
if (!tunnel->dst_cache) {
free_percpu(dev->tstats);
@@ -1397,7 +1390,6 @@ static int __net_init ipip6_fb_tunnel_init(struct net_device *dev)
struct iphdr *iph = &tunnel->parms.iph;
struct net *net = dev_net(dev);
struct sit_net *sitn = net_generic(net, sit_net_id);
- int i;
tunnel->dev = dev;
tunnel->net = dev_net(dev);
@@ -1408,16 +1400,10 @@ static int __net_init ipip6_fb_tunnel_init(struct net_device *dev)
iph->ihl = 5;
iph->ttl = 64;
- dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
+ dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!dev->tstats)
return -ENOMEM;
- for_each_possible_cpu(i) {
- struct pcpu_sw_netstats *ipip6_fb_stats;
- ipip6_fb_stats = per_cpu_ptr(dev->tstats, i);
- u64_stats_init(&ipip6_fb_stats->syncp);
- }
-
tunnel->dst_cache = alloc_percpu(struct ip_tunnel_dst);
if (!tunnel->dst_cache) {
free_percpu(dev->tstats);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 889079b..3277680 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -501,8 +501,10 @@ static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req)
int res;
res = tcp_v6_send_synack(sk, NULL, &fl6, req, 0);
- if (!res)
+ if (!res) {
TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
+ }
return res;
}
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index 00b2a6d..41e4e93 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -1368,6 +1368,7 @@ static int ipx_release(struct socket *sock)
goto out;
lock_sock(sk);
+ sk->sk_shutdown = SHUTDOWN_MASK;
if (!sock_flag(sk, SOCK_DEAD))
sk->sk_state_change(sk);
@@ -1791,8 +1792,11 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock,
skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
flags & MSG_DONTWAIT, &rc);
- if (!skb)
+ if (!skb) {
+ if (rc == -EAGAIN && (sk->sk_shutdown & RCV_SHUTDOWN))
+ rc = 0;
goto out;
+ }
ipx = ipx_hdr(skb);
copied = ntohs(ipx->ipx_pktsize) - sizeof(struct ipxhdr);
@@ -1922,6 +1926,26 @@ static int ipx_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long
}
#endif
+static int ipx_shutdown(struct socket *sock, int mode)
+{
+ struct sock *sk = sock->sk;
+
+ if (mode < SHUT_RD || mode > SHUT_RDWR)
+ return -EINVAL;
+ /* This maps:
+ * SHUT_RD (0) -> RCV_SHUTDOWN (1)
+ * SHUT_WR (1) -> SEND_SHUTDOWN (2)
+ * SHUT_RDWR (2) -> SHUTDOWN_MASK (3)
+ */
+ ++mode;
+
+ lock_sock(sk);
+ sk->sk_shutdown |= mode;
+ release_sock(sk);
+ sk->sk_state_change(sk);
+
+ return 0;
+}
/*
* Socket family declarations
@@ -1948,7 +1972,7 @@ static const struct proto_ops ipx_dgram_ops = {
.compat_ioctl = ipx_compat_ioctl,
#endif
.listen = sock_no_listen,
- .shutdown = sock_no_shutdown, /* FIXME: support shutdown */
+ .shutdown = ipx_shutdown,
.setsockopt = ipx_setsockopt,
.getsockopt = ipx_getsockopt,
.sendmsg = ipx_sendmsg,
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 1a04c13..a50d979 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -365,6 +365,7 @@ static const u8 sadb_ext_min_len[] = {
[SADB_X_EXT_NAT_T_OA] = (u8) sizeof(struct sadb_address),
[SADB_X_EXT_SEC_CTX] = (u8) sizeof(struct sadb_x_sec_ctx),
[SADB_X_EXT_KMADDRESS] = (u8) sizeof(struct sadb_x_kmaddress),
+ [SADB_X_EXT_FILTER] = (u8) sizeof(struct sadb_x_filter),
};
/* Verify sadb_address_{len,prefixlen} against sa_family. */
@@ -1798,6 +1799,7 @@ static void pfkey_dump_sa_done(struct pfkey_sock *pfk)
static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_msg *hdr, void * const *ext_hdrs)
{
u8 proto;
+ struct xfrm_filter *filter = NULL;
struct pfkey_sock *pfk = pfkey_sk(sk);
if (pfk->dump.dump != NULL)
@@ -1807,11 +1809,27 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_ms
if (proto == 0)
return -EINVAL;
+ if (ext_hdrs[SADB_X_EXT_FILTER - 1]) {
+ struct sadb_x_filter *xfilter = ext_hdrs[SADB_X_EXT_FILTER - 1];
+
+ filter = kmalloc(sizeof(*filter), GFP_KERNEL);
+ if (filter == NULL)
+ return -ENOMEM;
+
+ memcpy(&filter->saddr, &xfilter->sadb_x_filter_saddr,
+ sizeof(xfrm_address_t));
+ memcpy(&filter->daddr, &xfilter->sadb_x_filter_daddr,
+ sizeof(xfrm_address_t));
+ filter->family = xfilter->sadb_x_filter_family;
+ filter->splen = xfilter->sadb_x_filter_splen;
+ filter->dplen = xfilter->sadb_x_filter_dplen;
+ }
+
pfk->dump.msg_version = hdr->sadb_msg_version;
pfk->dump.msg_portid = hdr->sadb_msg_pid;
pfk->dump.dump = pfkey_dump_sa;
pfk->dump.done = pfkey_dump_sa_done;
- xfrm_state_walk_init(&pfk->dump.u.state, proto);
+ xfrm_state_walk_init(&pfk->dump.u.state, proto, filter);
return pfkey_do_dump(pfk);
}
@@ -3059,6 +3077,24 @@ static u32 get_acqseq(void)
return res;
}
+static bool pfkey_is_alive(const struct km_event *c)
+{
+ struct netns_pfkey *net_pfkey = net_generic(c->net, pfkey_net_id);
+ struct sock *sk;
+ bool is_alive = false;
+
+ rcu_read_lock();
+ sk_for_each_rcu(sk, &net_pfkey->table) {
+ if (pfkey_sk(sk)->registered) {
+ is_alive = true;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return is_alive;
+}
+
static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *xp)
{
struct sk_buff *skb;
@@ -3784,6 +3820,7 @@ static struct xfrm_mgr pfkeyv2_mgr =
.new_mapping = pfkey_send_new_mapping,
.notify_policy = pfkey_send_policy_notify,
.migrate = pfkey_send_migrate,
+ .is_alive = pfkey_is_alive,
};
static int __net_init pfkey_net_init(struct net *net)
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 735d0f6..e5dc42f 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1809,8 +1809,6 @@ void l2tp_session_free(struct l2tp_session *session)
}
kfree(session);
-
- return;
}
EXPORT_SYMBOL_GPL(l2tp_session_free);
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index be5fadf..ec40bc3 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -454,13 +454,11 @@ static void pppol2tp_session_close(struct l2tp_session *session)
BUG_ON(session->magic != L2TP_SESSION_MAGIC);
-
if (sock) {
inet_shutdown(sock, 2);
/* Don't let the session go away before our socket does */
l2tp_session_inc_refcount(session);
}
- return;
}
/* Really kill the session socket. (Called from sock_put() if
@@ -474,7 +472,6 @@ static void pppol2tp_session_destruct(struct sock *sk)
BUG_ON(session->magic != L2TP_SESSION_MAGIC);
l2tp_session_dec_refcount(session);
}
- return;
}
/* Called when the PPPoX socket (session) is closed.
diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c
index 13b7683..ce9633a 100644
--- a/net/mac80211/agg-tx.c
+++ b/net/mac80211/agg-tx.c
@@ -107,7 +107,7 @@ static void ieee80211_send_addba_request(struct ieee80211_sub_if_data *sdata,
mgmt->u.action.u.addba_req.start_seq_num =
cpu_to_le16(start_seq_num << 4);
- ieee80211_tx_skb_tid(sdata, skb, tid);
+ ieee80211_tx_skb(sdata, skb);
}
void ieee80211_send_bar(struct ieee80211_vif *vif, u8 *ra, u16 tid, u16 ssn)
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 453e974..363d19b 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -451,11 +451,11 @@ void sta_set_rate_info_rx(struct sta_info *sta, struct rate_info *rinfo)
rinfo->flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH;
if (sta->last_rx_rate_flag & RX_FLAG_SHORT_GI)
rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI;
- if (sta->last_rx_rate_flag & RX_FLAG_80MHZ)
+ if (sta->last_rx_rate_vht_flag & RX_VHT_FLAG_80MHZ)
rinfo->flags |= RATE_INFO_FLAGS_80_MHZ_WIDTH;
- if (sta->last_rx_rate_flag & RX_FLAG_80P80MHZ)
+ if (sta->last_rx_rate_vht_flag & RX_VHT_FLAG_80P80MHZ)
rinfo->flags |= RATE_INFO_FLAGS_80P80_MHZ_WIDTH;
- if (sta->last_rx_rate_flag & RX_FLAG_160MHZ)
+ if (sta->last_rx_rate_vht_flag & RX_VHT_FLAG_160MHZ)
rinfo->flags |= RATE_INFO_FLAGS_160_MHZ_WIDTH;
}
@@ -970,9 +970,9 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
/* TODO: make hostapd tell us what it wants */
sdata->smps_mode = IEEE80211_SMPS_OFF;
sdata->needed_rx_chains = sdata->local->rx_chains;
- sdata->radar_required = params->radar_required;
mutex_lock(&local->mtx);
+ sdata->radar_required = params->radar_required;
err = ieee80211_vif_use_channel(sdata, &params->chandef,
IEEE80211_CHANCTX_SHARED);
mutex_unlock(&local->mtx);
@@ -1056,6 +1056,7 @@ static int ieee80211_change_beacon(struct wiphy *wiphy, struct net_device *dev,
int err;
sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ sdata_assert_lock(sdata);
/* don't allow changing the beacon while CSA is in place - offset
* of channel switch counter may change
@@ -1083,6 +1084,8 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
struct probe_resp *old_probe_resp;
struct cfg80211_chan_def chandef;
+ sdata_assert_lock(sdata);
+
old_beacon = sdata_dereference(sdata->u.ap.beacon, sdata);
if (!old_beacon)
return -ENOENT;
@@ -1343,6 +1346,18 @@ static int sta_apply_parameters(struct ieee80211_local *local,
ieee80211_vht_cap_ie_to_sta_vht_cap(sdata, sband,
params->vht_capa, sta);
+ if (params->opmode_notif_used) {
+ enum ieee80211_band band =
+ ieee80211_get_sdata_band(sdata);
+
+ /* returned value is only needed for rc update, but the
+ * rc isn't initialized here yet, so ignore it
+ */
+ __ieee80211_vht_handle_opmode(sdata, sta,
+ params->opmode_notif,
+ band, false);
+ }
+
if (ieee80211_vif_is_mesh(&sdata->vif)) {
#ifdef CONFIG_MAC80211_MESH
u32 changed = 0;
@@ -2630,6 +2645,18 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
if (!roc)
return -ENOMEM;
+ /*
+ * If the duration is zero, then the driver
+ * wouldn't actually do anything. Set it to
+ * 10 for now.
+ *
+ * TODO: cancel the off-channel operation
+ * when we get the SKB's TX status and
+ * the wait time was zero before.
+ */
+ if (!duration)
+ duration = 10;
+
roc->chan = channel;
roc->duration = duration;
roc->req_duration = duration;
@@ -2671,18 +2698,6 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
/* otherwise actually kick it off here (for error handling) */
- /*
- * If the duration is zero, then the driver
- * wouldn't actually do anything. Set it to
- * 10 for now.
- *
- * TODO: cancel the off-channel operation
- * when we get the SKB's TX status and
- * the wait time was zero before.
- */
- if (!duration)
- duration = 10;
-
ret = drv_remain_on_channel(local, sdata, channel, duration, type);
if (ret) {
kfree(roc);
@@ -2990,69 +3005,88 @@ cfg80211_beacon_dup(struct cfg80211_beacon_data *beacon)
return new_beacon;
}
-void ieee80211_csa_finalize_work(struct work_struct *work)
+void ieee80211_csa_finish(struct ieee80211_vif *vif)
+{
+ struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
+
+ ieee80211_queue_work(&sdata->local->hw,
+ &sdata->csa_finalize_work);
+}
+EXPORT_SYMBOL(ieee80211_csa_finish);
+
+static void ieee80211_csa_finalize(struct ieee80211_sub_if_data *sdata)
{
- struct ieee80211_sub_if_data *sdata =
- container_of(work, struct ieee80211_sub_if_data,
- csa_finalize_work);
struct ieee80211_local *local = sdata->local;
int err, changed = 0;
- sdata_lock(sdata);
- /* AP might have been stopped while waiting for the lock. */
- if (!sdata->vif.csa_active)
- goto unlock;
-
- if (!ieee80211_sdata_running(sdata))
- goto unlock;
+ sdata_assert_lock(sdata);
- sdata->radar_required = sdata->csa_radar_required;
mutex_lock(&local->mtx);
+ sdata->radar_required = sdata->csa_radar_required;
err = ieee80211_vif_change_channel(sdata, &changed);
mutex_unlock(&local->mtx);
if (WARN_ON(err < 0))
- goto unlock;
+ return;
if (!local->use_chanctx) {
local->_oper_chandef = sdata->csa_chandef;
ieee80211_hw_config(local, 0);
}
- ieee80211_bss_info_change_notify(sdata, changed);
-
sdata->vif.csa_active = false;
switch (sdata->vif.type) {
case NL80211_IFTYPE_AP:
err = ieee80211_assign_beacon(sdata, sdata->u.ap.next_beacon);
- if (err < 0)
- goto unlock;
-
- changed |= err;
kfree(sdata->u.ap.next_beacon);
sdata->u.ap.next_beacon = NULL;
- ieee80211_bss_info_change_notify(sdata, err);
+ if (err < 0)
+ return;
+ changed |= err;
break;
case NL80211_IFTYPE_ADHOC:
- ieee80211_ibss_finish_csa(sdata);
+ err = ieee80211_ibss_finish_csa(sdata);
+ if (err < 0)
+ return;
+ changed |= err;
break;
#ifdef CONFIG_MAC80211_MESH
case NL80211_IFTYPE_MESH_POINT:
err = ieee80211_mesh_finish_csa(sdata);
if (err < 0)
- goto unlock;
+ return;
+ changed |= err;
break;
#endif
default:
WARN_ON(1);
- goto unlock;
+ return;
}
+ ieee80211_bss_info_change_notify(sdata, changed);
+
ieee80211_wake_queues_by_reason(&sdata->local->hw,
IEEE80211_MAX_QUEUE_MAP,
IEEE80211_QUEUE_STOP_REASON_CSA);
cfg80211_ch_switch_notify(sdata->dev, &sdata->csa_chandef);
+}
+
+void ieee80211_csa_finalize_work(struct work_struct *work)
+{
+ struct ieee80211_sub_if_data *sdata =
+ container_of(work, struct ieee80211_sub_if_data,
+ csa_finalize_work);
+
+ sdata_lock(sdata);
+ /* AP might have been stopped while waiting for the lock. */
+ if (!sdata->vif.csa_active)
+ goto unlock;
+
+ if (!ieee80211_sdata_running(sdata))
+ goto unlock;
+
+ ieee80211_csa_finalize(sdata);
unlock:
sdata_unlock(sdata);
@@ -3066,9 +3100,9 @@ int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
struct ieee80211_chanctx_conf *chanctx_conf;
struct ieee80211_chanctx *chanctx;
struct ieee80211_if_mesh __maybe_unused *ifmsh;
- int err, num_chanctx;
+ int err, num_chanctx, changed = 0;
- lockdep_assert_held(&sdata->wdev.mtx);
+ sdata_assert_lock(sdata);
if (!list_empty(&local->roc_list) || local->scanning)
return -EBUSY;
@@ -3107,19 +3141,40 @@ int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
switch (sdata->vif.type) {
case NL80211_IFTYPE_AP:
- sdata->csa_counter_offset_beacon =
- params->counter_offset_beacon;
- sdata->csa_counter_offset_presp = params->counter_offset_presp;
sdata->u.ap.next_beacon =
cfg80211_beacon_dup(&params->beacon_after);
if (!sdata->u.ap.next_beacon)
return -ENOMEM;
+ /*
+ * With a count of 0, we don't have to wait for any
+ * TBTT before switching, so complete the CSA
+ * immediately. In theory, with a count == 1 we
+ * should delay the switch until just before the next
+ * TBTT, but that would complicate things so we switch
+ * immediately too. If we would delay the switch
+ * until the next TBTT, we would have to set the probe
+ * response here.
+ *
+ * TODO: A channel switch with count <= 1 without
+ * sending a CSA action frame is kind of useless,
+ * because the clients won't know we're changing
+ * channels. The action frame must be implemented
+ * either here or in the userspace.
+ */
+ if (params->count <= 1)
+ break;
+
+ sdata->csa_counter_offset_beacon =
+ params->counter_offset_beacon;
+ sdata->csa_counter_offset_presp = params->counter_offset_presp;
err = ieee80211_assign_beacon(sdata, &params->beacon_csa);
if (err < 0) {
kfree(sdata->u.ap.next_beacon);
return err;
}
+ changed |= err;
+
break;
case NL80211_IFTYPE_ADHOC:
if (!sdata->vif.bss_conf.ibss_joined)
@@ -3147,17 +3202,21 @@ int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
params->chandef.chan->band)
return -EINVAL;
- err = ieee80211_ibss_csa_beacon(sdata, params);
- if (err < 0)
- return err;
+ /* see comments in the NL80211_IFTYPE_AP block */
+ if (params->count > 1) {
+ err = ieee80211_ibss_csa_beacon(sdata, params);
+ if (err < 0)
+ return err;
+ changed |= err;
+ }
+
+ ieee80211_send_action_csa(sdata, params);
+
break;
#ifdef CONFIG_MAC80211_MESH
case NL80211_IFTYPE_MESH_POINT:
ifmsh = &sdata->u.mesh;
- if (!ifmsh->mesh_id)
- return -EINVAL;
-
if (params->chandef.width != sdata->vif.bss_conf.chandef.width)
return -EINVAL;
@@ -3166,17 +3225,27 @@ int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
params->chandef.chan->band)
return -EINVAL;
- ifmsh->chsw_init = true;
- if (!ifmsh->pre_value)
- ifmsh->pre_value = 1;
- else
- ifmsh->pre_value++;
+ if (ifmsh->csa_role == IEEE80211_MESH_CSA_ROLE_NONE) {
+ ifmsh->csa_role = IEEE80211_MESH_CSA_ROLE_INIT;
+ if (!ifmsh->pre_value)
+ ifmsh->pre_value = 1;
+ else
+ ifmsh->pre_value++;
+ }
- err = ieee80211_mesh_csa_beacon(sdata, params, true);
- if (err < 0) {
- ifmsh->chsw_init = false;
- return err;
+ /* see comments in the NL80211_IFTYPE_AP block */
+ if (params->count > 1) {
+ err = ieee80211_mesh_csa_beacon(sdata, params);
+ if (err < 0) {
+ ifmsh->csa_role = IEEE80211_MESH_CSA_ROLE_NONE;
+ return err;
+ }
+ changed |= err;
}
+
+ if (ifmsh->csa_role == IEEE80211_MESH_CSA_ROLE_INIT)
+ ieee80211_send_action_csa(sdata, params);
+
break;
#endif
default:
@@ -3193,8 +3262,13 @@ int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
sdata->csa_chandef = params->chandef;
sdata->vif.csa_active = true;
- ieee80211_bss_info_change_notify(sdata, err);
- drv_channel_switch_beacon(sdata, &params->chandef);
+ if (changed) {
+ ieee80211_bss_info_change_notify(sdata, changed);
+ drv_channel_switch_beacon(sdata, &params->chandef);
+ } else {
+ /* if the beacon didn't change, we can finalize immediately */
+ ieee80211_csa_finalize(sdata);
+ }
return 0;
}
@@ -3865,7 +3939,7 @@ static int ieee80211_set_qos_map(struct wiphy *wiphy,
return 0;
}
-struct cfg80211_ops mac80211_config_ops = {
+const struct cfg80211_ops mac80211_config_ops = {
.add_virtual_intf = ieee80211_add_iface,
.del_virtual_intf = ieee80211_del_iface,
.change_virtual_intf = ieee80211_change_iface,
diff --git a/net/mac80211/cfg.h b/net/mac80211/cfg.h
index 7d7879f..2d51f62 100644
--- a/net/mac80211/cfg.h
+++ b/net/mac80211/cfg.h
@@ -4,6 +4,6 @@
#ifndef __CFG_H
#define __CFG_H
-extern struct cfg80211_ops mac80211_config_ops;
+extern const struct cfg80211_ops mac80211_config_ops;
#endif /* __CFG_H */
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index f43613a..42c6592 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -196,6 +196,8 @@ static bool ieee80211_is_radar_required(struct ieee80211_local *local)
{
struct ieee80211_sub_if_data *sdata;
+ lockdep_assert_held(&local->mtx);
+
rcu_read_lock();
list_for_each_entry_rcu(sdata, &local->interfaces, list) {
if (sdata->radar_required) {
diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
index 80194b5..2ecb4de 100644
--- a/net/mac80211/debugfs_sta.c
+++ b/net/mac80211/debugfs_sta.c
@@ -195,7 +195,7 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
static ssize_t sta_agg_status_write(struct file *file, const char __user *userbuf,
size_t count, loff_t *ppos)
{
- char _buf[12], *buf = _buf;
+ char _buf[12] = {}, *buf = _buf;
struct sta_info *sta = file->private_data;
bool start, tx;
unsigned long tid;
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index 70dd013..afbe2b2 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -375,7 +375,7 @@ void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata,
mgmt->u.action.u.delba.params = cpu_to_le16(params);
mgmt->u.action.u.delba.reason_code = cpu_to_le16(reason_code);
- ieee80211_tx_skb_tid(sdata, skb, tid);
+ ieee80211_tx_skb(sdata, skb);
}
void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 2796a19..4453e27 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -220,7 +220,6 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
{
struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
struct ieee80211_local *local = sdata->local;
- struct ieee80211_supported_band *sband;
struct ieee80211_mgmt *mgmt;
struct cfg80211_bss *bss;
u32 bss_change;
@@ -294,7 +293,6 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
}
mutex_lock(&local->mtx);
- ieee80211_vif_release_channel(sdata);
if (ieee80211_vif_use_channel(sdata, &chandef,
ifibss->fixed_channel ?
IEEE80211_CHANCTX_SHARED :
@@ -303,12 +301,11 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
mutex_unlock(&local->mtx);
return;
}
+ sdata->radar_required = radar_required;
mutex_unlock(&local->mtx);
memcpy(ifibss->bssid, bssid, ETH_ALEN);
- sband = local->hw.wiphy->bands[chan->band];
-
presp = ieee80211_ibss_build_presp(sdata, beacon_int, basic_rates,
capability, tsf, &chandef,
&have_higher_than_11mbit, NULL);
@@ -318,7 +315,6 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
rcu_assign_pointer(ifibss->presp, presp);
mgmt = (void *)presp->head;
- sdata->radar_required = radar_required;
sdata->vif.bss_conf.enable_beacon = true;
sdata->vif.bss_conf.beacon_int = beacon_int;
sdata->vif.bss_conf.basic_rates = basic_rates;
@@ -386,7 +382,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
presp->head_len, 0, GFP_KERNEL);
cfg80211_put_bss(local->hw.wiphy, bss);
netif_carrier_on(sdata->dev);
- cfg80211_ibss_joined(sdata->dev, ifibss->bssid, GFP_KERNEL);
+ cfg80211_ibss_joined(sdata->dev, ifibss->bssid, chan, GFP_KERNEL);
}
static void ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
@@ -521,12 +517,6 @@ int ieee80211_ibss_csa_beacon(struct ieee80211_sub_if_data *sdata,
if (old_presp)
kfree_rcu(old_presp, rcu_head);
- /* it might not send the beacon for a while. send an action frame
- * immediately to announce the channel switch.
- */
- if (csa_settings)
- ieee80211_send_action_csa(sdata, csa_settings);
-
return BSS_CHANGED_BEACON;
out:
return ret;
@@ -536,7 +526,7 @@ int ieee80211_ibss_finish_csa(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_ibss *ifibss = &sdata->u.ibss;
struct cfg80211_bss *cbss;
- int err;
+ int err, changed = 0;
u16 capability;
sdata_assert_lock(sdata);
@@ -568,10 +558,9 @@ int ieee80211_ibss_finish_csa(struct ieee80211_sub_if_data *sdata)
if (err < 0)
return err;
- if (err)
- ieee80211_bss_info_change_notify(sdata, err);
+ changed |= err;
- return 0;
+ return changed;
}
void ieee80211_ibss_stop(struct ieee80211_sub_if_data *sdata)
@@ -799,6 +788,8 @@ ieee80211_ibss_process_chanswitch(struct ieee80211_sub_if_data *sdata,
int err;
u32 sta_flags;
+ sdata_assert_lock(sdata);
+
sta_flags = IEEE80211_STA_DISABLE_VHT;
switch (ifibss->chandef.width) {
case NL80211_CHAN_WIDTH_5:
@@ -1468,6 +1459,11 @@ static void ieee80211_rx_mgmt_probe_req(struct ieee80211_sub_if_data *sdata,
memcpy(((struct ieee80211_mgmt *) skb->data)->da, mgmt->sa, ETH_ALEN);
ibss_dbg(sdata, "Sending ProbeResp to %pM\n", mgmt->sa);
IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_DONT_ENCRYPT;
+
+ /* avoid excessive retries for probe request to wildcard SSIDs */
+ if (pos[1] == 0)
+ IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_CTL_NO_ACK;
+
ieee80211_tx_skb(sdata, skb);
}
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 5e44e317..906e211 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -616,7 +616,11 @@ struct ieee80211_if_mesh {
struct ps_data ps;
/* Channel Switching Support */
struct mesh_csa_settings __rcu *csa;
- bool chsw_init;
+ enum {
+ IEEE80211_MESH_CSA_ROLE_NONE,
+ IEEE80211_MESH_CSA_ROLE_INIT,
+ IEEE80211_MESH_CSA_ROLE_REPEATER,
+ } csa_role;
u8 chsw_ttl;
u16 pre_value;
@@ -1408,8 +1412,7 @@ void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata);
void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
struct sk_buff *skb);
int ieee80211_mesh_csa_beacon(struct ieee80211_sub_if_data *sdata,
- struct cfg80211_csa_settings *csa_settings,
- bool csa_action);
+ struct cfg80211_csa_settings *csa_settings);
int ieee80211_mesh_finish_csa(struct ieee80211_sub_if_data *sdata);
/* scan/BSS handling */
@@ -1553,6 +1556,9 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta);
enum ieee80211_sta_rx_bandwidth ieee80211_sta_cur_vht_bw(struct sta_info *sta);
void ieee80211_sta_set_rx_nss(struct sta_info *sta);
+u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
+ struct sta_info *sta, u8 opmode,
+ enum ieee80211_band band, bool nss_only);
void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta, u8 opmode,
enum ieee80211_band band, bool nss_only);
@@ -1605,7 +1611,7 @@ static inline int __ieee80211_resume(struct ieee80211_hw *hw)
}
/* utility functions/constants */
-extern void *mac80211_wiphy_privid; /* for wiphy privid */
+extern const void *const mac80211_wiphy_privid; /* for wiphy privid */
u8 *ieee80211_get_bssid(struct ieee80211_hdr *hdr, size_t len,
enum nl80211_iftype type);
int ieee80211_frame_duration(enum ieee80211_band band, size_t len,
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index ce1c443..088111a 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -833,7 +833,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
cancel_work_sync(&local->dynamic_ps_enable_work);
cancel_work_sync(&sdata->recalc_smps);
+ sdata_lock(sdata);
sdata->vif.csa_active = false;
+ sdata_unlock(sdata);
cancel_work_sync(&sdata->csa_finalize_work);
cancel_delayed_work_sync(&sdata->dfs_cac_timer_work);
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index d767cfb..1f7d842 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -893,10 +893,15 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
/* mac80211 supports control port protocol changing */
local->hw.wiphy->flags |= WIPHY_FLAG_CONTROL_PORT_PROTOCOL;
- if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
+ if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM) {
local->hw.wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
- else if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)
+ } else if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC) {
local->hw.wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC;
+ if (hw->max_signal <= 0) {
+ result = -EINVAL;
+ goto fail_wiphy_register;
+ }
+ }
WARN((local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)
&& (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK),
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 5b919ca..f70e9cd 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -688,7 +688,7 @@ ieee80211_mesh_build_beacon(struct ieee80211_if_mesh *ifmsh)
*pos++ = csa->settings.count;
*pos++ = WLAN_EID_CHAN_SWITCH_PARAM;
*pos++ = 6;
- if (ifmsh->chsw_init) {
+ if (ifmsh->csa_role == IEEE80211_MESH_CSA_ROLE_INIT) {
*pos++ = ifmsh->mshcfg.dot11MeshTTL;
*pos |= WLAN_EID_CHAN_SWITCH_PARAM_INITIATOR;
} else {
@@ -859,18 +859,12 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata,
{
struct cfg80211_csa_settings params;
struct ieee80211_csa_ie csa_ie;
- struct ieee80211_chanctx_conf *chanctx_conf;
- struct ieee80211_chanctx *chanctx;
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
enum ieee80211_band band = ieee80211_get_sdata_band(sdata);
- int err, num_chanctx;
+ int err;
u32 sta_flags;
- if (sdata->vif.csa_active)
- return true;
-
- if (!ifmsh->mesh_id)
- return false;
+ sdata_assert_lock(sdata);
sta_flags = IEEE80211_STA_DISABLE_VHT;
switch (sdata->vif.bss_conf.chandef.width) {
@@ -896,10 +890,6 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata,
params.chandef = csa_ie.chandef;
params.count = csa_ie.count;
- if (sdata->vif.bss_conf.chandef.chan->band !=
- params.chandef.chan->band)
- return false;
-
if (!cfg80211_chandef_usable(sdata->local->hw.wiphy, &params.chandef,
IEEE80211_CHAN_DISABLED)) {
sdata_info(sdata,
@@ -922,24 +912,12 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata,
return false;
}
- rcu_read_lock();
- chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
- if (!chanctx_conf)
- goto failed_chswitch;
-
- /* don't handle for multi-VIF cases */
- chanctx = container_of(chanctx_conf, struct ieee80211_chanctx, conf);
- if (chanctx->refcount > 1)
- goto failed_chswitch;
-
- num_chanctx = 0;
- list_for_each_entry_rcu(chanctx, &sdata->local->chanctx_list, list)
- num_chanctx++;
-
- if (num_chanctx > 1)
- goto failed_chswitch;
-
- rcu_read_unlock();
+ if (cfg80211_chandef_identical(&params.chandef,
+ &sdata->vif.bss_conf.chandef)) {
+ mcsa_dbg(sdata,
+ "received csa with an identical chandef, ignoring\n");
+ return true;
+ }
mcsa_dbg(sdata,
"received channel switch announcement to go to channel %d MHz\n",
@@ -953,30 +931,16 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata,
ifmsh->pre_value = csa_ie.pre_value;
}
- if (ifmsh->chsw_ttl < ifmsh->mshcfg.dot11MeshTTL) {
- if (ieee80211_mesh_csa_beacon(sdata, &params, false) < 0)
- return false;
- } else {
+ if (ifmsh->chsw_ttl >= ifmsh->mshcfg.dot11MeshTTL)
return false;
- }
- sdata->csa_radar_required = params.radar_required;
+ ifmsh->csa_role = IEEE80211_MESH_CSA_ROLE_REPEATER;
- if (params.block_tx)
- ieee80211_stop_queues_by_reason(&sdata->local->hw,
- IEEE80211_MAX_QUEUE_MAP,
- IEEE80211_QUEUE_STOP_REASON_CSA);
-
- sdata->csa_chandef = params.chandef;
- sdata->vif.csa_active = true;
-
- ieee80211_bss_info_change_notify(sdata, err);
- drv_channel_switch_beacon(sdata, &params.chandef);
+ if (ieee80211_channel_switch(sdata->local->hw.wiphy, sdata->dev,
+ &params) < 0)
+ return false;
return true;
-failed_chswitch:
- rcu_read_unlock();
- return false;
}
static void
@@ -1086,7 +1050,8 @@ static void ieee80211_mesh_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
ifmsh->sync_ops->rx_bcn_presp(sdata,
stype, mgmt, &elems, rx_status);
- if (!ifmsh->chsw_init)
+ if (ifmsh->csa_role != IEEE80211_MESH_CSA_ROLE_INIT &&
+ !sdata->vif.csa_active)
ieee80211_mesh_process_chnswitch(sdata, &elems, true);
}
@@ -1095,29 +1060,30 @@ int ieee80211_mesh_finish_csa(struct ieee80211_sub_if_data *sdata)
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct mesh_csa_settings *tmp_csa_settings;
int ret = 0;
+ int changed = 0;
/* Reset the TTL value and Initiator flag */
- ifmsh->chsw_init = false;
+ ifmsh->csa_role = IEEE80211_MESH_CSA_ROLE_NONE;
ifmsh->chsw_ttl = 0;
/* Remove the CSA and MCSP elements from the beacon */
tmp_csa_settings = rcu_dereference(ifmsh->csa);
rcu_assign_pointer(ifmsh->csa, NULL);
- kfree_rcu(tmp_csa_settings, rcu_head);
+ if (tmp_csa_settings)
+ kfree_rcu(tmp_csa_settings, rcu_head);
ret = ieee80211_mesh_rebuild_beacon(sdata);
if (ret)
return -EINVAL;
- ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
+ changed |= BSS_CHANGED_BEACON;
mcsa_dbg(sdata, "complete switching to center freq %d MHz",
sdata->vif.bss_conf.chandef.chan->center_freq);
- return 0;
+ return changed;
}
int ieee80211_mesh_csa_beacon(struct ieee80211_sub_if_data *sdata,
- struct cfg80211_csa_settings *csa_settings,
- bool csa_action)
+ struct cfg80211_csa_settings *csa_settings)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct mesh_csa_settings *tmp_csa_settings;
@@ -1141,12 +1107,7 @@ int ieee80211_mesh_csa_beacon(struct ieee80211_sub_if_data *sdata,
return ret;
}
- ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON);
-
- if (csa_action)
- ieee80211_send_action_csa(sdata, csa_settings);
-
- return 0;
+ return BSS_CHANGED_BEACON;
}
static int mesh_fwd_csa_frame(struct ieee80211_sub_if_data *sdata,
@@ -1210,7 +1171,8 @@ static void mesh_rx_csa_frame(struct ieee80211_sub_if_data *sdata,
ifmsh->pre_value = pre_value;
- if (!ieee80211_mesh_process_chnswitch(sdata, &elems, false)) {
+ if (!sdata->vif.csa_active &&
+ !ieee80211_mesh_process_chnswitch(sdata, &elems, false)) {
mcsa_dbg(sdata, "Failed to process CSA action frame");
return;
}
@@ -1257,7 +1219,7 @@ void ieee80211_mesh_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata,
sdata_lock(sdata);
/* mesh already went down */
- if (!sdata->wdev.mesh_id_len)
+ if (!sdata->u.mesh.mesh_id_len)
goto out;
rx_status = IEEE80211_SKB_RXCB(skb);
@@ -1310,7 +1272,7 @@ void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata)
sdata_lock(sdata);
/* mesh already went down */
- if (!sdata->wdev.mesh_id_len)
+ if (!sdata->u.mesh.mesh_id_len)
goto out;
if (ifmsh->preq_queue_len &&
@@ -1365,7 +1327,7 @@ void ieee80211_mesh_init_sdata(struct ieee80211_sub_if_data *sdata)
mesh_rmc_init(sdata);
ifmsh->last_preq = jiffies;
ifmsh->next_perr = jiffies;
- ifmsh->chsw_init = false;
+ ifmsh->csa_role = IEEE80211_MESH_CSA_ROLE_NONE;
/* Allocate all mesh structures when creating the first mesh interface. */
if (!mesh_allocated)
ieee80211s_init();
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 245dce9..4bc2275 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -531,6 +531,7 @@ static void ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
u8 *pos;
u32 cap;
struct ieee80211_sta_vht_cap vht_cap;
+ u32 mask, ap_bf_sts, our_bf_sts;
BUILD_BUG_ON(sizeof(vht_cap) != sizeof(sband->vht_cap));
@@ -558,6 +559,16 @@ static void ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,
cpu_to_le32(IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)))
cap &= ~IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE;
+ mask = IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
+
+ ap_bf_sts = le32_to_cpu(ap_vht_cap->vht_cap_info) & mask;
+ our_bf_sts = cap & mask;
+
+ if (ap_bf_sts < our_bf_sts) {
+ cap &= ~mask;
+ cap |= ap_bf_sts;
+ }
+
/* reserve and fill IE */
pos = skb_put(skb, sizeof(struct ieee80211_vht_cap) + 2);
ieee80211_ie_build_vht_cap(pos, &vht_cap, cap);
@@ -768,6 +779,34 @@ static void ieee80211_send_assoc(struct ieee80211_sub_if_data *sdata)
ieee80211_add_ht_ie(sdata, skb, assoc_data->ap_ht_param,
sband, chan, sdata->smps_mode);
+ /* if present, add any custom IEs that go before VHT */
+ if (assoc_data->ie_len) {
+ static const u8 before_vht[] = {
+ WLAN_EID_SSID,
+ WLAN_EID_SUPP_RATES,
+ WLAN_EID_EXT_SUPP_RATES,
+ WLAN_EID_PWR_CAPABILITY,
+ WLAN_EID_SUPPORTED_CHANNELS,
+ WLAN_EID_RSN,
+ WLAN_EID_QOS_CAPA,
+ WLAN_EID_RRM_ENABLED_CAPABILITIES,
+ WLAN_EID_MOBILITY_DOMAIN,
+ WLAN_EID_SUPPORTED_REGULATORY_CLASSES,
+ WLAN_EID_HT_CAPABILITY,
+ WLAN_EID_BSS_COEX_2040,
+ WLAN_EID_EXT_CAPABILITY,
+ WLAN_EID_QOS_TRAFFIC_CAPA,
+ WLAN_EID_TIM_BCAST_REQ,
+ WLAN_EID_INTERWORKING,
+ };
+ noffset = ieee80211_ie_split(assoc_data->ie, assoc_data->ie_len,
+ before_vht, ARRAY_SIZE(before_vht),
+ offset);
+ pos = skb_put(skb, noffset - offset);
+ memcpy(pos, assoc_data->ie + offset, noffset - offset);
+ offset = noffset;
+ }
+
if (!(ifmgd->flags & IEEE80211_STA_DISABLE_VHT))
ieee80211_add_vht_ie(sdata, skb, sband,
&assoc_data->ap_vht_cap);
@@ -1024,7 +1063,6 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
}
ifmgd->flags |= IEEE80211_STA_CSA_RECEIVED;
- sdata->vif.csa_active = true;
mutex_lock(&local->chanctx_mtx);
if (local->use_chanctx) {
@@ -1062,6 +1100,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
mutex_unlock(&local->chanctx_mtx);
sdata->csa_chandef = csa_ie.chandef;
+ sdata->vif.csa_active = true;
if (csa_ie.mode)
ieee80211_stop_queues_by_reason(&local->hw,
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index 22b223f..8fdadfd 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -10,15 +10,15 @@
#include <linux/kernel.h>
#include <linux/rtnetlink.h>
-#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/slab.h>
#include "rate.h"
#include "ieee80211_i.h"
#include "debugfs.h"
struct rate_control_alg {
struct list_head list;
- struct rate_control_ops *ops;
+ const struct rate_control_ops *ops;
};
static LIST_HEAD(rate_ctrl_algs);
@@ -29,7 +29,7 @@ module_param(ieee80211_default_rc_algo, charp, 0644);
MODULE_PARM_DESC(ieee80211_default_rc_algo,
"Default rate control algorithm for mac80211 to use");
-int ieee80211_rate_control_register(struct rate_control_ops *ops)
+int ieee80211_rate_control_register(const struct rate_control_ops *ops)
{
struct rate_control_alg *alg;
@@ -60,7 +60,7 @@ int ieee80211_rate_control_register(struct rate_control_ops *ops)
}
EXPORT_SYMBOL(ieee80211_rate_control_register);
-void ieee80211_rate_control_unregister(struct rate_control_ops *ops)
+void ieee80211_rate_control_unregister(const struct rate_control_ops *ops)
{
struct rate_control_alg *alg;
@@ -76,32 +76,31 @@ void ieee80211_rate_control_unregister(struct rate_control_ops *ops)
}
EXPORT_SYMBOL(ieee80211_rate_control_unregister);
-static struct rate_control_ops *
+static const struct rate_control_ops *
ieee80211_try_rate_control_ops_get(const char *name)
{
struct rate_control_alg *alg;
- struct rate_control_ops *ops = NULL;
+ const struct rate_control_ops *ops = NULL;
if (!name)
return NULL;
mutex_lock(&rate_ctrl_mutex);
list_for_each_entry(alg, &rate_ctrl_algs, list) {
- if (!strcmp(alg->ops->name, name))
- if (try_module_get(alg->ops->module)) {
- ops = alg->ops;
- break;
- }
+ if (!strcmp(alg->ops->name, name)) {
+ ops = alg->ops;
+ break;
+ }
}
mutex_unlock(&rate_ctrl_mutex);
return ops;
}
/* Get the rate control algorithm. */
-static struct rate_control_ops *
+static const struct rate_control_ops *
ieee80211_rate_control_ops_get(const char *name)
{
- struct rate_control_ops *ops;
+ const struct rate_control_ops *ops;
const char *alg_name;
kparam_block_sysfs_write(ieee80211_default_rc_algo);
@@ -111,10 +110,6 @@ ieee80211_rate_control_ops_get(const char *name)
alg_name = name;
ops = ieee80211_try_rate_control_ops_get(alg_name);
- if (!ops) {
- request_module("rc80211_%s", alg_name);
- ops = ieee80211_try_rate_control_ops_get(alg_name);
- }
if (!ops && name)
/* try default if specific alg requested but not found */
ops = ieee80211_try_rate_control_ops_get(ieee80211_default_rc_algo);
@@ -127,11 +122,6 @@ ieee80211_rate_control_ops_get(const char *name)
return ops;
}
-static void ieee80211_rate_control_ops_put(struct rate_control_ops *ops)
-{
- module_put(ops->module);
-}
-
#ifdef CONFIG_MAC80211_DEBUGFS
static ssize_t rcname_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
@@ -158,11 +148,11 @@ static struct rate_control_ref *rate_control_alloc(const char *name,
ref = kmalloc(sizeof(struct rate_control_ref), GFP_KERNEL);
if (!ref)
- goto fail_ref;
+ return NULL;
ref->local = local;
ref->ops = ieee80211_rate_control_ops_get(name);
if (!ref->ops)
- goto fail_ops;
+ goto free;
#ifdef CONFIG_MAC80211_DEBUGFS
debugfsdir = debugfs_create_dir("rc", local->hw.wiphy->debugfsdir);
@@ -172,14 +162,11 @@ static struct rate_control_ref *rate_control_alloc(const char *name,
ref->priv = ref->ops->alloc(&local->hw, debugfsdir);
if (!ref->priv)
- goto fail_priv;
+ goto free;
return ref;
-fail_priv:
- ieee80211_rate_control_ops_put(ref->ops);
-fail_ops:
+free:
kfree(ref);
-fail_ref:
return NULL;
}
@@ -192,7 +179,6 @@ static void rate_control_free(struct rate_control_ref *ctrl_ref)
ctrl_ref->local->debugfs.rcdir = NULL;
#endif
- ieee80211_rate_control_ops_put(ctrl_ref->ops);
kfree(ctrl_ref);
}
diff --git a/net/mac80211/rate.h b/net/mac80211/rate.h
index b95e16c..9aa2a11 100644
--- a/net/mac80211/rate.h
+++ b/net/mac80211/rate.h
@@ -21,7 +21,7 @@
struct rate_control_ref {
struct ieee80211_local *local;
- struct rate_control_ops *ops;
+ const struct rate_control_ops *ops;
void *priv;
};
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index f3d88b0..26fd94f 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -657,7 +657,7 @@ minstrel_free(void *priv)
kfree(priv);
}
-struct rate_control_ops mac80211_minstrel = {
+const struct rate_control_ops mac80211_minstrel = {
.name = "minstrel",
.tx_status = minstrel_tx_status,
.get_rate = minstrel_get_rate,
diff --git a/net/mac80211/rc80211_minstrel.h b/net/mac80211/rc80211_minstrel.h
index f4301f4..046d1bd 100644
--- a/net/mac80211/rc80211_minstrel.h
+++ b/net/mac80211/rc80211_minstrel.h
@@ -123,7 +123,7 @@ struct minstrel_debugfs_info {
char buf[];
};
-extern struct rate_control_ops mac80211_minstrel;
+extern const struct rate_control_ops mac80211_minstrel;
void minstrel_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir);
void minstrel_remove_sta_debugfs(void *priv, void *priv_sta);
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index c1b5b73..bccaf85 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -124,7 +124,7 @@ const struct mcs_group minstrel_mcs_groups[] = {
#define MINSTREL_CCK_GROUP (ARRAY_SIZE(minstrel_mcs_groups) - 1)
-static u8 sample_table[SAMPLE_COLUMNS][MCS_GROUP_RATES];
+static u8 sample_table[SAMPLE_COLUMNS][MCS_GROUP_RATES] __read_mostly;
static void
minstrel_ht_update_rates(struct minstrel_priv *mp, struct minstrel_ht_sta *mi);
@@ -1031,7 +1031,7 @@ minstrel_ht_free(void *priv)
mac80211_minstrel.free(priv);
}
-static struct rate_control_ops mac80211_minstrel_ht = {
+static const struct rate_control_ops mac80211_minstrel_ht = {
.name = "minstrel_ht",
.tx_status = minstrel_ht_tx_status,
.get_rate = minstrel_ht_get_rate,
@@ -1048,8 +1048,7 @@ static struct rate_control_ops mac80211_minstrel_ht = {
};
-static void
-init_sample_table(void)
+static void __init init_sample_table(void)
{
int col, i, new_idx;
u8 rnd[MCS_GROUP_RATES];
diff --git a/net/mac80211/rc80211_pid_algo.c b/net/mac80211/rc80211_pid_algo.c
index 958fad0..d0da2a7 100644
--- a/net/mac80211/rc80211_pid_algo.c
+++ b/net/mac80211/rc80211_pid_algo.c
@@ -452,7 +452,7 @@ static void rate_control_pid_free_sta(void *priv, struct ieee80211_sta *sta,
kfree(priv_sta);
}
-static struct rate_control_ops mac80211_rcpid = {
+static const struct rate_control_ops mac80211_rcpid = {
.name = "pid",
.tx_status = rate_control_pid_tx_status,
.get_rate = rate_control_pid_get_rate,
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 3e57f96..2824402 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -40,8 +40,6 @@
static struct sk_buff *remove_monitor_info(struct ieee80211_local *local,
struct sk_buff *skb)
{
- struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
-
if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) {
if (likely(skb->len > FCS_LEN))
__pskb_trim(skb, skb->len - FCS_LEN);
@@ -53,9 +51,6 @@ static struct sk_buff *remove_monitor_info(struct ieee80211_local *local,
}
}
- if (status->vendor_radiotap_len)
- __pskb_pull(skb, status->vendor_radiotap_len);
-
return skb;
}
@@ -64,14 +59,13 @@ static inline int should_drop_frame(struct sk_buff *skb, int present_fcs_len)
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_hdr *hdr;
- hdr = (void *)(skb->data + status->vendor_radiotap_len);
+ hdr = (void *)(skb->data);
if (status->flag & (RX_FLAG_FAILED_FCS_CRC |
RX_FLAG_FAILED_PLCP_CRC |
RX_FLAG_AMPDU_IS_ZEROLEN))
return 1;
- if (unlikely(skb->len < 16 + present_fcs_len +
- status->vendor_radiotap_len))
+ if (unlikely(skb->len < 16 + present_fcs_len))
return 1;
if (ieee80211_is_ctl(hdr->frame_control) &&
!ieee80211_is_pspoll(hdr->frame_control) &&
@@ -90,8 +84,6 @@ ieee80211_rx_radiotap_space(struct ieee80211_local *local,
len = sizeof(struct ieee80211_radiotap_header) + 8;
/* allocate extra bitmaps */
- if (status->vendor_radiotap_len)
- len += 4;
if (status->chains)
len += 4 * hweight8(status->chains);
@@ -127,18 +119,6 @@ ieee80211_rx_radiotap_space(struct ieee80211_local *local,
len += 2 * hweight8(status->chains);
}
- if (status->vendor_radiotap_len) {
- if (WARN_ON_ONCE(status->vendor_radiotap_align == 0))
- status->vendor_radiotap_align = 1;
- /* align standard part of vendor namespace */
- len = ALIGN(len, 2);
- /* allocate standard part of vendor namespace */
- len += 6;
- /* align vendor-defined part */
- len = ALIGN(len, status->vendor_radiotap_align);
- /* vendor-defined part is already in skb */
- }
-
return len;
}
@@ -172,7 +152,7 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
it_present = &rthdr->it_present;
/* radiotap header, set always present flags */
- rthdr->it_len = cpu_to_le16(rtap_len + status->vendor_radiotap_len);
+ rthdr->it_len = cpu_to_le16(rtap_len);
it_present_val = BIT(IEEE80211_RADIOTAP_FLAGS) |
BIT(IEEE80211_RADIOTAP_CHANNEL) |
BIT(IEEE80211_RADIOTAP_RX_FLAGS);
@@ -190,14 +170,6 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
BIT(IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
}
- if (status->vendor_radiotap_len) {
- it_present_val |= BIT(IEEE80211_RADIOTAP_VENDOR_NAMESPACE) |
- BIT(IEEE80211_RADIOTAP_EXT);
- put_unaligned_le32(it_present_val, it_present);
- it_present++;
- it_present_val = status->vendor_radiotap_bitmap;
- }
-
put_unaligned_le32(it_present_val, it_present);
pos = (void *)(it_present + 1);
@@ -307,6 +279,8 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
*pos |= IEEE80211_RADIOTAP_MCS_BW_40;
if (status->flag & RX_FLAG_HT_GF)
*pos |= IEEE80211_RADIOTAP_MCS_FMT_GF;
+ if (status->flag & RX_FLAG_LDPC)
+ *pos |= IEEE80211_RADIOTAP_MCS_FEC_LDPC;
stbc = (status->flag & RX_FLAG_STBC_MASK) >> RX_FLAG_STBC_SHIFT;
*pos |= stbc << IEEE80211_RADIOTAP_MCS_STBC_SHIFT;
pos++;
@@ -349,20 +323,23 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_VHT);
/* known field - how to handle 80+80? */
- if (status->flag & RX_FLAG_80P80MHZ)
+ if (status->vht_flag & RX_VHT_FLAG_80P80MHZ)
known &= ~IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH;
put_unaligned_le16(known, pos);
pos += 2;
/* flags */
if (status->flag & RX_FLAG_SHORT_GI)
*pos |= IEEE80211_RADIOTAP_VHT_FLAG_SGI;
+ /* in VHT, STBC is binary */
+ if (status->flag & RX_FLAG_STBC_MASK)
+ *pos |= IEEE80211_RADIOTAP_VHT_FLAG_STBC;
pos++;
/* bandwidth */
- if (status->flag & RX_FLAG_80MHZ)
+ if (status->vht_flag & RX_VHT_FLAG_80MHZ)
*pos++ = 4;
- else if (status->flag & RX_FLAG_80P80MHZ)
+ else if (status->vht_flag & RX_VHT_FLAG_80P80MHZ)
*pos++ = 0; /* marked not known above */
- else if (status->flag & RX_FLAG_160MHZ)
+ else if (status->vht_flag & RX_VHT_FLAG_160MHZ)
*pos++ = 11;
else if (status->flag & RX_FLAG_40MHZ)
*pos++ = 1;
@@ -372,6 +349,8 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
*pos = (status->rate_idx << 4) | status->vht_nss;
pos += 4;
/* coding field */
+ if (status->flag & RX_FLAG_LDPC)
+ *pos |= IEEE80211_RADIOTAP_CODING_LDPC_USER0;
pos++;
/* group ID */
pos++;
@@ -383,21 +362,6 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
*pos++ = status->chain_signal[chain];
*pos++ = chain;
}
-
- if (status->vendor_radiotap_len) {
- /* ensure 2 byte alignment for the vendor field as required */
- if ((pos - (u8 *)rthdr) & 1)
- *pos++ = 0;
- *pos++ = status->vendor_radiotap_oui[0];
- *pos++ = status->vendor_radiotap_oui[1];
- *pos++ = status->vendor_radiotap_oui[2];
- *pos++ = status->vendor_radiotap_subns;
- put_unaligned_le16(status->vendor_radiotap_len, pos);
- pos += 2;
- /* align the actual payload as requested */
- while ((pos - (u8 *)rthdr) & (status->vendor_radiotap_align - 1))
- *pos++ = 0;
- }
}
/*
@@ -428,8 +392,8 @@ ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
present_fcs_len = FCS_LEN;
- /* ensure hdr->frame_control and vendor radiotap data are in skb head */
- if (!pskb_may_pull(origskb, 2 + status->vendor_radiotap_len)) {
+ /* ensure hdr->frame_control is in skb head */
+ if (!pskb_may_pull(origskb, 2)) {
dev_kfree_skb(origskb);
return NULL;
}
@@ -599,10 +563,10 @@ static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
- if (skb->len < 24 || is_multicast_ether_addr(hdr->addr1))
+ if (is_multicast_ether_addr(hdr->addr1))
return 0;
- return ieee80211_is_robust_mgmt_frame(hdr);
+ return ieee80211_is_robust_mgmt_frame(skb);
}
@@ -610,10 +574,10 @@ static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
- if (skb->len < 24 || !is_multicast_ether_addr(hdr->addr1))
+ if (!is_multicast_ether_addr(hdr->addr1))
return 0;
- return ieee80211_is_robust_mgmt_frame(hdr);
+ return ieee80211_is_robust_mgmt_frame(skb);
}
@@ -626,7 +590,7 @@ static int ieee80211_get_mmie_keyidx(struct sk_buff *skb)
if (skb->len < 24 + sizeof(*mmie) || !is_multicast_ether_addr(hdr->da))
return -1;
- if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *) hdr))
+ if (!ieee80211_is_robust_mgmt_frame(skb))
return -1; /* not a robust management frame */
mmie = (struct ieee80211_mmie *)
@@ -1268,6 +1232,7 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
if (ieee80211_is_data(hdr->frame_control)) {
sta->last_rx_rate_idx = status->rate_idx;
sta->last_rx_rate_flag = status->flag;
+ sta->last_rx_rate_vht_flag = status->vht_flag;
sta->last_rx_rate_vht_nss = status->vht_nss;
}
}
@@ -1318,18 +1283,15 @@ ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
!ieee80211_has_morefrags(hdr->frame_control) &&
!(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
(rx->sdata->vif.type == NL80211_IFTYPE_AP ||
- rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)) {
+ rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
+ /* PM bit is only checked in frames where it isn't reserved,
+ * in AP mode it's reserved in non-bufferable management frames
+ * (cf. IEEE 802.11-2012 8.2.4.1.7 Power Management field)
+ */
+ (!ieee80211_is_mgmt(hdr->frame_control) ||
+ ieee80211_is_bufferable_mmpdu(hdr->frame_control))) {
if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
- /*
- * Ignore doze->wake transitions that are
- * indicated by non-data frames, the standard
- * is unclear here, but for example going to
- * PS mode and then scanning would cause a
- * doze->wake transition for the probe request,
- * and that is clearly undesirable.
- */
- if (ieee80211_is_data(hdr->frame_control) &&
- !ieee80211_has_pm(hdr->frame_control))
+ if (!ieee80211_has_pm(hdr->frame_control))
sta_ps_end(sta);
} else {
if (ieee80211_has_pm(hdr->frame_control))
@@ -1852,8 +1814,7 @@ static int ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
* having configured keys.
*/
if (unlikely(ieee80211_is_action(fc) && !rx->key &&
- ieee80211_is_robust_mgmt_frame(
- (struct ieee80211_hdr *) rx->skb->data)))
+ ieee80211_is_robust_mgmt_frame(rx->skb)))
return -EACCES;
}
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index d3a6d82..4acc5fc 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -261,6 +261,7 @@ struct ieee80211_tx_latency_stat {
* "the" transmit rate
* @last_rx_rate_idx: rx status rate index of the last data packet
* @last_rx_rate_flag: rx status flag of the last data packet
+ * @last_rx_rate_vht_flag: rx status vht flag of the last data packet
* @last_rx_rate_vht_nss: rx status nss of last data packet
* @lock: used for locking all fields that require locking, see comments
* in the header file.
@@ -396,6 +397,7 @@ struct sta_info {
struct ieee80211_tx_rate last_tx_rate;
int last_rx_rate_idx;
u32 last_rx_rate_flag;
+ u32 last_rx_rate_vht_flag;
u8 last_rx_rate_vht_nss;
u16 tid_seq[IEEE80211_QOS_CTL_TID_MASK + 1];
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index 1ee85c4..e6e574a 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -479,7 +479,7 @@ static void ieee80211_tx_latency_end_msrmnt(struct ieee80211_local *local,
u32 msrmnt;
u16 tid;
u8 *qc;
- int i, bin_range_count, bin_count;
+ int i, bin_range_count;
u32 *bin_ranges;
__le16 fc;
struct ieee80211_tx_latency_stat *tx_lat;
@@ -522,7 +522,6 @@ static void ieee80211_tx_latency_end_msrmnt(struct ieee80211_local *local,
/* count how many Tx frames transmitted with the appropriate latency */
bin_range_count = tx_latency->n_ranges;
bin_ranges = tx_latency->ranges;
- bin_count = tx_lat->bin_count;
for (i = 0; i < bin_range_count; i++) {
if (msrmnt <= bin_ranges[i]) {
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 4080c61..cd9f804 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -452,8 +452,7 @@ static int ieee80211_use_mfp(__le16 fc, struct sta_info *sta,
if (sta == NULL || !test_sta_flag(sta, WLAN_STA_MFP))
return 0;
- if (!ieee80211_is_robust_mgmt_frame((struct ieee80211_hdr *)
- skb->data))
+ if (!ieee80211_is_robust_mgmt_frame(skb))
return 0;
return 1;
@@ -538,11 +537,8 @@ ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx)
if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED))
return TX_CONTINUE;
- /* only deauth, disassoc and action are bufferable MMPDUs */
if (ieee80211_is_mgmt(hdr->frame_control) &&
- !ieee80211_is_deauth(hdr->frame_control) &&
- !ieee80211_is_disassoc(hdr->frame_control) &&
- !ieee80211_is_action(hdr->frame_control)) {
+ !ieee80211_is_bufferable_mmpdu(hdr->frame_control)) {
if (tx->flags & IEEE80211_TX_UNICAST)
info->flags |= IEEE80211_TX_CTL_NO_PS_BUFFER;
return TX_CONTINUE;
@@ -582,7 +578,7 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
tx->key = key;
else if (ieee80211_is_mgmt(hdr->frame_control) &&
is_multicast_ether_addr(hdr->addr1) &&
- ieee80211_is_robust_mgmt_frame(hdr) &&
+ ieee80211_is_robust_mgmt_frame(tx->skb) &&
(key = rcu_dereference(tx->sdata->default_mgmt_key)))
tx->key = key;
else if (is_multicast_ether_addr(hdr->addr1) &&
@@ -597,12 +593,12 @@ ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx)
tx->key = NULL;
else if (tx->skb->protocol == tx->sdata->control_port_protocol)
tx->key = NULL;
- else if (ieee80211_is_robust_mgmt_frame(hdr) &&
+ else if (ieee80211_is_robust_mgmt_frame(tx->skb) &&
!(ieee80211_is_action(hdr->frame_control) &&
tx->sta && test_sta_flag(tx->sta, WLAN_STA_MFP)))
tx->key = NULL;
else if (ieee80211_is_mgmt(hdr->frame_control) &&
- !ieee80211_is_robust_mgmt_frame(hdr))
+ !ieee80211_is_robust_mgmt_frame(tx->skb))
tx->key = NULL;
else {
I802_DEBUG_INC(tx->local->tx_handlers_drop_unencrypted);
@@ -2417,15 +2413,6 @@ static int ieee80211_beacon_add_tim(struct ieee80211_sub_if_data *sdata,
return 0;
}
-void ieee80211_csa_finish(struct ieee80211_vif *vif)
-{
- struct ieee80211_sub_if_data *sdata = vif_to_sdata(vif);
-
- ieee80211_queue_work(&sdata->local->hw,
- &sdata->csa_finalize_work);
-}
-EXPORT_SYMBOL(ieee80211_csa_finish);
-
static void ieee80211_update_csa(struct ieee80211_sub_if_data *sdata,
struct beacon_data *beacon)
{
@@ -2454,8 +2441,12 @@ static void ieee80211_update_csa(struct ieee80211_sub_if_data *sdata,
if (WARN_ON(counter_offset_beacon >= beacon_data_len))
return;
- /* warn if the driver did not check for/react to csa completeness */
- if (WARN_ON(beacon_data[counter_offset_beacon] == 0))
+ /* Warn if the driver did not check for/react to csa
+ * completeness. A beacon with CSA counter set to 0 should
+ * never occur, because a counter of 1 means switch just
+ * before the next beacon.
+ */
+ if (WARN_ON(beacon_data[counter_offset_beacon] == 1))
return;
beacon_data[counter_offset_beacon]--;
@@ -2521,7 +2512,7 @@ bool ieee80211_csa_is_complete(struct ieee80211_vif *vif)
if (WARN_ON(counter_beacon > beacon_data_len))
goto out;
- if (beacon_data[counter_beacon] == 0)
+ if (beacon_data[counter_beacon] == 1)
ret = true;
out:
rcu_read_unlock();
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index b8700d4..275c94f 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -34,7 +34,7 @@
#include "wep.h"
/* privid for wiphys to determine whether they belong to us or not */
-void *mac80211_wiphy_privid = &mac80211_wiphy_privid;
+const void *const mac80211_wiphy_privid = &mac80211_wiphy_privid;
struct ieee80211_hw *wiphy_to_ieee80211_hw(struct wiphy *wiphy)
{
@@ -1277,13 +1277,32 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
* that calculates local->scan_ies_len.
*/
- /* add any remaining custom IEs */
+ /* insert custom IEs that go before VHT */
if (ie && ie_len) {
- noffset = ie_len;
+ static const u8 before_vht[] = {
+ WLAN_EID_SSID,
+ WLAN_EID_SUPP_RATES,
+ WLAN_EID_REQUEST,
+ WLAN_EID_EXT_SUPP_RATES,
+ WLAN_EID_DS_PARAMS,
+ WLAN_EID_SUPPORTED_REGULATORY_CLASSES,
+ WLAN_EID_HT_CAPABILITY,
+ WLAN_EID_BSS_COEX_2040,
+ WLAN_EID_EXT_CAPABILITY,
+ WLAN_EID_SSID_LIST,
+ WLAN_EID_CHANNEL_USAGE,
+ WLAN_EID_INTERWORKING,
+ /* mesh ID can't happen here */
+ /* 60 GHz can't happen here right now */
+ };
+ noffset = ieee80211_ie_split(ie, ie_len,
+ before_vht, ARRAY_SIZE(before_vht),
+ offset);
if (end - pos < noffset - offset)
goto out_err;
memcpy(pos, ie + offset, noffset - offset);
pos += noffset - offset;
+ offset = noffset;
}
if (sband->vht_cap.vht_supported) {
@@ -1293,6 +1312,15 @@ int ieee80211_build_preq_ies(struct ieee80211_local *local, u8 *buffer,
sband->vht_cap.cap);
}
+ /* add any remaining custom IEs */
+ if (ie && ie_len) {
+ noffset = ie_len;
+ if (end - pos < noffset - offset)
+ goto out_err;
+ memcpy(pos, ie + offset, noffset - offset);
+ pos += noffset - offset;
+ }
+
return pos - buffer;
out_err:
WARN_ONCE(1, "not enough space for preq IEs\n");
@@ -1370,7 +1398,6 @@ u32 ieee80211_sta_get_rates(struct ieee80211_sub_if_data *sdata,
enum ieee80211_band band, u32 *basic_rates)
{
struct ieee80211_supported_band *sband;
- struct ieee80211_rate *bitrates;
size_t num_rates;
u32 supp_rates, rate_flags;
int i, j, shift;
@@ -1382,7 +1409,6 @@ u32 ieee80211_sta_get_rates(struct ieee80211_sub_if_data *sdata,
if (WARN_ON(!sband))
return 1;
- bitrates = sband->bitrates;
num_rates = sband->n_bitrates;
supp_rates = 0;
for (i = 0; i < elems->supp_rates_len +
@@ -2268,11 +2294,11 @@ u64 ieee80211_calculate_rx_timestamp(struct ieee80211_local *local,
ri.nss = status->vht_nss;
if (status->flag & RX_FLAG_40MHZ)
ri.flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH;
- if (status->flag & RX_FLAG_80MHZ)
+ if (status->vht_flag & RX_VHT_FLAG_80MHZ)
ri.flags |= RATE_INFO_FLAGS_80_MHZ_WIDTH;
- if (status->flag & RX_FLAG_80P80MHZ)
+ if (status->vht_flag & RX_VHT_FLAG_80P80MHZ)
ri.flags |= RATE_INFO_FLAGS_80P80_MHZ_WIDTH;
- if (status->flag & RX_FLAG_160MHZ)
+ if (status->vht_flag & RX_VHT_FLAG_160MHZ)
ri.flags |= RATE_INFO_FLAGS_160_MHZ_WIDTH;
if (status->flag & RX_FLAG_SHORT_GI)
ri.flags |= RATE_INFO_FLAGS_SHORT_GI;
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
index d75f35c..e9e36a2 100644
--- a/net/mac80211/vht.c
+++ b/net/mac80211/vht.c
@@ -349,9 +349,9 @@ void ieee80211_sta_set_rx_nss(struct sta_info *sta)
sta->sta.rx_nss = max_t(u8, 1, ht_rx_nss);
}
-void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
- struct sta_info *sta, u8 opmode,
- enum ieee80211_band band, bool nss_only)
+u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
+ struct sta_info *sta, u8 opmode,
+ enum ieee80211_band band, bool nss_only)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_supported_band *sband;
@@ -363,7 +363,7 @@ void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
/* ignore - no support for BF yet */
if (opmode & IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF)
- return;
+ return 0;
nss = opmode & IEEE80211_OPMODE_NOTIF_RX_NSS_MASK;
nss >>= IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT;
@@ -375,7 +375,7 @@ void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
}
if (nss_only)
- goto change;
+ return changed;
switch (opmode & IEEE80211_OPMODE_NOTIF_CHANWIDTH_MASK) {
case IEEE80211_OPMODE_NOTIF_CHANWIDTH_20MHZ:
@@ -398,7 +398,19 @@ void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
changed |= IEEE80211_RC_BW_CHANGED;
}
- change:
- if (changed)
+ return changed;
+}
+
+void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
+ struct sta_info *sta, u8 opmode,
+ enum ieee80211_band band, bool nss_only)
+{
+ struct ieee80211_local *local = sdata->local;
+ struct ieee80211_supported_band *sband = local->hw.wiphy->bands[band];
+
+ u32 changed = __ieee80211_vht_handle_opmode(sdata, sta, opmode,
+ band, nss_only);
+
+ if (changed > 0)
rate_control_rate_update(local, sband, sta, changed);
}
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 21448d6..b8600e3 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -301,8 +301,7 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
}
-static void ccmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *b_0, u8 *aad,
- int encrypted)
+static void ccmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *b_0, u8 *aad)
{
__le16 mask_fc;
int a4_included, mgmt;
@@ -456,7 +455,7 @@ static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb)
return 0;
pos += IEEE80211_CCMP_HDR_LEN;
- ccmp_special_blocks(skb, pn, b_0, aad, 0);
+ ccmp_special_blocks(skb, pn, b_0, aad);
ieee80211_aes_ccm_encrypt(key->u.ccmp.tfm, b_0, aad, pos, len,
skb_put(skb, IEEE80211_CCMP_MIC_LEN));
@@ -495,7 +494,7 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
hdrlen = ieee80211_hdrlen(hdr->frame_control);
if (!ieee80211_is_data(hdr->frame_control) &&
- !ieee80211_is_robust_mgmt_frame(hdr))
+ !ieee80211_is_robust_mgmt_frame(skb))
return RX_CONTINUE;
data_len = skb->len - hdrlen - IEEE80211_CCMP_HDR_LEN -
@@ -524,7 +523,7 @@ ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx)
u8 aad[2 * AES_BLOCK_SIZE];
u8 b_0[AES_BLOCK_SIZE];
/* hardware didn't decrypt/verify MIC */
- ccmp_special_blocks(skb, pn, b_0, aad, 1);
+ ccmp_special_blocks(skb, pn, b_0, aad);
if (ieee80211_aes_ccm_decrypt(
key->u.ccmp.tfm, b_0, aad,
diff --git a/net/mac802154/ieee802154_dev.c b/net/mac802154/ieee802154_dev.c
index 52ae664..b75bb01 100644
--- a/net/mac802154/ieee802154_dev.c
+++ b/net/mac802154/ieee802154_dev.c
@@ -165,6 +165,67 @@ err:
return ERR_PTR(err);
}
+static int mac802154_set_txpower(struct wpan_phy *phy, int db)
+{
+ struct mac802154_priv *priv = wpan_phy_priv(phy);
+
+ if (!priv->ops->set_txpower)
+ return -ENOTSUPP;
+
+ return priv->ops->set_txpower(&priv->hw, db);
+}
+
+static int mac802154_set_lbt(struct wpan_phy *phy, bool on)
+{
+ struct mac802154_priv *priv = wpan_phy_priv(phy);
+
+ if (!priv->ops->set_lbt)
+ return -ENOTSUPP;
+
+ return priv->ops->set_lbt(&priv->hw, on);
+}
+
+static int mac802154_set_cca_mode(struct wpan_phy *phy, u8 mode)
+{
+ struct mac802154_priv *priv = wpan_phy_priv(phy);
+
+ if (!priv->ops->set_cca_mode)
+ return -ENOTSUPP;
+
+ return priv->ops->set_cca_mode(&priv->hw, mode);
+}
+
+static int mac802154_set_cca_ed_level(struct wpan_phy *phy, s32 level)
+{
+ struct mac802154_priv *priv = wpan_phy_priv(phy);
+
+ if (!priv->ops->set_cca_ed_level)
+ return -ENOTSUPP;
+
+ return priv->ops->set_cca_ed_level(&priv->hw, level);
+}
+
+static int mac802154_set_csma_params(struct wpan_phy *phy, u8 min_be,
+ u8 max_be, u8 retries)
+{
+ struct mac802154_priv *priv = wpan_phy_priv(phy);
+
+ if (!priv->ops->set_csma_params)
+ return -ENOTSUPP;
+
+ return priv->ops->set_csma_params(&priv->hw, min_be, max_be, retries);
+}
+
+static int mac802154_set_frame_retries(struct wpan_phy *phy, s8 retries)
+{
+ struct mac802154_priv *priv = wpan_phy_priv(phy);
+
+ if (!priv->ops->set_frame_retries)
+ return -ENOTSUPP;
+
+ return priv->ops->set_frame_retries(&priv->hw, retries);
+}
+
struct ieee802154_dev *
ieee802154_alloc_device(size_t priv_data_len, struct ieee802154_ops *ops)
{
@@ -242,6 +303,12 @@ int ieee802154_register_device(struct ieee802154_dev *dev)
priv->phy->add_iface = mac802154_add_iface;
priv->phy->del_iface = mac802154_del_iface;
+ priv->phy->set_txpower = mac802154_set_txpower;
+ priv->phy->set_lbt = mac802154_set_lbt;
+ priv->phy->set_cca_mode = mac802154_set_cca_mode;
+ priv->phy->set_cca_ed_level = mac802154_set_cca_ed_level;
+ priv->phy->set_csma_params = mac802154_set_csma_params;
+ priv->phy->set_frame_retries = mac802154_set_frame_retries;
rc = wpan_phy_register(priv->phy);
if (rc < 0)
diff --git a/net/mac802154/mib.c b/net/mac802154/mib.c
index 8ded97c..f48f40c1 100644
--- a/net/mac802154/mib.c
+++ b/net/mac802154/mib.c
@@ -62,8 +62,6 @@ static void hw_addr_notify(struct work_struct *work)
pr_debug("failed changed mask %lx\n", nw->changed);
kfree(nw);
-
- return;
}
static void set_hw_addr_filt(struct net_device *dev, unsigned long changed)
@@ -79,8 +77,6 @@ static void set_hw_addr_filt(struct net_device *dev, unsigned long changed)
work->dev = dev;
work->changed = changed;
queue_work(priv->hw->dev_workqueue, &work->work);
-
- return;
}
void mac802154_dev_set_short_addr(struct net_device *dev, u16 val)
diff --git a/net/mac802154/rx.c b/net/mac802154/rx.c
index 38548ec..03855b0 100644
--- a/net/mac802154/rx.c
+++ b/net/mac802154/rx.c
@@ -80,7 +80,6 @@ mac802154_subif_rx(struct ieee802154_dev *hw, struct sk_buff *skb, u8 lqi)
mac802154_wpans_rx(priv, skb);
out:
dev_kfree_skb(skb);
- return;
}
static void mac802154_rx_worker(struct work_struct *work)
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 04748ab6..9db8bab 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1460,7 +1460,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
if (nlk->netlink_bind && nlk->groups[0]) {
int i;
- for (i=0; i<nlk->ngroups; i++) {
+ for (i = 0; i < nlk->ngroups; i++) {
if (test_bit(i, nlk->groups))
nlk->netlink_bind(i);
}
@@ -2549,7 +2549,7 @@ __nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int fla
struct nlmsghdr *nlh;
int size = nlmsg_msg_size(len);
- nlh = (struct nlmsghdr*)skb_put(skb, NLMSG_ALIGN(size));
+ nlh = (struct nlmsghdr *)skb_put(skb, NLMSG_ALIGN(size));
nlh->nlmsg_type = type;
nlh->nlmsg_len = size;
nlh->nlmsg_flags = flags;
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index e9a48ba..36f8872 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -256,10 +256,10 @@ void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
out:
/* Update datapath statistics. */
- u64_stats_update_begin(&stats->sync);
+ u64_stats_update_begin(&stats->syncp);
(*stats_counter)++;
stats->n_mask_hit += n_mask_hit;
- u64_stats_update_end(&stats->sync);
+ u64_stats_update_end(&stats->syncp);
}
static struct genl_family dp_packet_genl_family = {
@@ -295,9 +295,9 @@ int ovs_dp_upcall(struct datapath *dp, struct sk_buff *skb,
err:
stats = this_cpu_ptr(dp->stats_percpu);
- u64_stats_update_begin(&stats->sync);
+ u64_stats_update_begin(&stats->syncp);
stats->n_lost++;
- u64_stats_update_end(&stats->sync);
+ u64_stats_update_end(&stats->syncp);
return err;
}
@@ -606,9 +606,9 @@ static void get_dp_stats(struct datapath *dp, struct ovs_dp_stats *stats,
percpu_stats = per_cpu_ptr(dp->stats_percpu, i);
do {
- start = u64_stats_fetch_begin_bh(&percpu_stats->sync);
+ start = u64_stats_fetch_begin_bh(&percpu_stats->syncp);
local_stats = *percpu_stats;
- } while (u64_stats_fetch_retry_bh(&percpu_stats->sync, start));
+ } while (u64_stats_fetch_retry_bh(&percpu_stats->syncp, start));
stats->n_hit += local_stats.n_hit;
stats->n_missed += local_stats.n_missed;
@@ -1215,18 +1215,12 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
if (err)
goto err_free_dp;
- dp->stats_percpu = alloc_percpu(struct dp_stats_percpu);
+ dp->stats_percpu = netdev_alloc_pcpu_stats(struct dp_stats_percpu);
if (!dp->stats_percpu) {
err = -ENOMEM;
goto err_destroy_table;
}
- for_each_possible_cpu(i) {
- struct dp_stats_percpu *dpath_stats;
- dpath_stats = per_cpu_ptr(dp->stats_percpu, i);
- u64_stats_init(&dpath_stats->sync);
- }
-
dp->ports = kmalloc(DP_VPORT_HASH_BUCKETS * sizeof(struct hlist_head),
GFP_KERNEL);
if (!dp->ports) {
diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h
index 6be9fbb..0531738 100644
--- a/net/openvswitch/datapath.h
+++ b/net/openvswitch/datapath.h
@@ -55,7 +55,7 @@ struct dp_stats_percpu {
u64 n_missed;
u64 n_lost;
u64 n_mask_hit;
- struct u64_stats_sync sync;
+ struct u64_stats_sync syncp;
};
/**
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index 208dd9a..3b4db32 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -121,7 +121,6 @@ struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops,
{
struct vport *vport;
size_t alloc_size;
- int i;
alloc_size = sizeof(struct vport);
if (priv_size) {
@@ -139,19 +138,12 @@ struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops,
vport->ops = ops;
INIT_HLIST_NODE(&vport->dp_hash_node);
- vport->percpu_stats = alloc_percpu(struct pcpu_sw_netstats);
+ vport->percpu_stats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!vport->percpu_stats) {
kfree(vport);
return ERR_PTR(-ENOMEM);
}
- for_each_possible_cpu(i) {
- struct pcpu_sw_netstats *vport_stats;
- vport_stats = per_cpu_ptr(vport->percpu_stats, i);
- u64_stats_init(&vport_stats->syncp);
- }
-
-
spin_lock_init(&vport->stats_lock);
return vport;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 48a6a93..2923044 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2257,8 +2257,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
if (unlikely(!(dev->flags & IFF_UP)))
goto out_put;
- reserve = dev->hard_header_len;
-
+ reserve = dev->hard_header_len + VLAN_HLEN;
size_max = po->tx_ring.frame_size
- (po->tp_hdrlen - sizeof(struct sockaddr_ll));
@@ -2285,8 +2284,19 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
goto out_status;
tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
- addr, hlen);
+ addr, hlen);
+ if (tp_len > dev->mtu + dev->hard_header_len) {
+ struct ethhdr *ehdr;
+ /* Earlier code assumed this would be a VLAN pkt,
+ * double-check this now that we have the actual
+ * packet in hand.
+ */
+ skb_reset_mac_header(skb);
+ ehdr = eth_hdr(skb);
+ if (ehdr->h_proto != htons(ETH_P_8021Q))
+ tp_len = -EMSGSIZE;
+ }
if (unlikely(tp_len < 0)) {
if (po->tp_loss) {
__packet_set_status(po, ph,
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index ed7e0b4..b3b16c0 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -789,7 +789,8 @@ void rfkill_resume_polling(struct rfkill *rfkill)
if (!rfkill->ops->poll)
return;
- schedule_work(&rfkill->poll_work.work);
+ queue_delayed_work(system_power_efficient_wq,
+ &rfkill->poll_work, 0);
}
EXPORT_SYMBOL(rfkill_resume_polling);
@@ -894,7 +895,8 @@ static void rfkill_poll(struct work_struct *work)
*/
rfkill->ops->poll(rfkill, rfkill->data);
- schedule_delayed_work(&rfkill->poll_work,
+ queue_delayed_work(system_power_efficient_wq,
+ &rfkill->poll_work,
round_jiffies_relative(POLL_INTERVAL));
}
@@ -958,7 +960,8 @@ int __must_check rfkill_register(struct rfkill *rfkill)
INIT_WORK(&rfkill->sync_work, rfkill_sync_work);
if (rfkill->ops->poll)
- schedule_delayed_work(&rfkill->poll_work,
+ queue_delayed_work(system_power_efficient_wq,
+ &rfkill->poll_work,
round_jiffies_relative(POLL_INTERVAL));
if (!rfkill->persistent || rfkill_epo_lock_active) {
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 72bdc71..8a5ba5a 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -27,8 +27,11 @@
#include <net/act_api.h>
#include <net/netlink.h>
-void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo)
+void tcf_hash_destroy(struct tc_action *a)
{
+ struct tcf_common *p = a->priv;
+ struct tcf_hashinfo *hinfo = a->ops->hinfo;
+
spin_lock_bh(&hinfo->lock);
hlist_del(&p->tcfc_head);
spin_unlock_bh(&hinfo->lock);
@@ -42,18 +45,22 @@ void tcf_hash_destroy(struct tcf_common *p, struct tcf_hashinfo *hinfo)
}
EXPORT_SYMBOL(tcf_hash_destroy);
-int tcf_hash_release(struct tcf_common *p, int bind,
- struct tcf_hashinfo *hinfo)
+int tcf_hash_release(struct tc_action *a, int bind)
{
+ struct tcf_common *p = a->priv;
int ret = 0;
if (p) {
if (bind)
p->tcfc_bindcnt--;
+ else if (p->tcfc_bindcnt > 0)
+ return -EPERM;
p->tcfc_refcnt--;
if (p->tcfc_bindcnt <= 0 && p->tcfc_refcnt <= 0) {
- tcf_hash_destroy(p, hinfo);
+ if (a->ops->cleanup)
+ a->ops->cleanup(a, bind);
+ tcf_hash_destroy(a);
ret = 1;
}
}
@@ -118,6 +125,7 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a)
struct tcf_common *p;
struct nlattr *nest;
int i = 0, n_i = 0;
+ int ret = -EINVAL;
nest = nla_nest_start(skb, a->order);
if (nest == NULL)
@@ -127,10 +135,13 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a)
for (i = 0; i < (hinfo->hmask + 1); i++) {
head = &hinfo->htab[tcf_hash(i, hinfo->hmask)];
hlist_for_each_entry_safe(p, n, head, tcfc_head) {
- if (ACT_P_DELETED == tcf_hash_release(p, 0, hinfo)) {
+ a->priv = p;
+ ret = tcf_hash_release(a, 0);
+ if (ret == ACT_P_DELETED) {
module_put(a->ops->owner);
n_i++;
- }
+ } else if (ret < 0)
+ goto nla_put_failure;
}
}
if (nla_put_u32(skb, TCA_FCNT, n_i))
@@ -140,7 +151,7 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a)
return n_i;
nla_put_failure:
nla_nest_cancel(skb, nest);
- return -EINVAL;
+ return ret;
}
static int tcf_generic_walker(struct sk_buff *skb, struct netlink_callback *cb,
@@ -198,7 +209,7 @@ int tcf_hash_search(struct tc_action *a, u32 index)
}
EXPORT_SYMBOL(tcf_hash_search);
-struct tcf_common *tcf_hash_check(u32 index, struct tc_action *a, int bind)
+int tcf_hash_check(u32 index, struct tc_action *a, int bind)
{
struct tcf_hashinfo *hinfo = a->ops->hinfo;
struct tcf_common *p = NULL;
@@ -207,19 +218,30 @@ struct tcf_common *tcf_hash_check(u32 index, struct tc_action *a, int bind)
p->tcfc_bindcnt++;
p->tcfc_refcnt++;
a->priv = p;
+ return 1;
}
- return p;
+ return 0;
}
EXPORT_SYMBOL(tcf_hash_check);
-struct tcf_common *tcf_hash_create(u32 index, struct nlattr *est,
- struct tc_action *a, int size, int bind)
+void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est)
+{
+ struct tcf_common *pc = a->priv;
+ if (est)
+ gen_kill_estimator(&pc->tcfc_bstats,
+ &pc->tcfc_rate_est);
+ kfree_rcu(pc, tcfc_rcu);
+}
+EXPORT_SYMBOL(tcf_hash_cleanup);
+
+int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a,
+ int size, int bind)
{
struct tcf_hashinfo *hinfo = a->ops->hinfo;
struct tcf_common *p = kzalloc(size, GFP_KERNEL);
if (unlikely(!p))
- return ERR_PTR(-ENOMEM);
+ return -ENOMEM;
p->tcfc_refcnt = 1;
if (bind)
p->tcfc_bindcnt = 1;
@@ -234,17 +256,19 @@ struct tcf_common *tcf_hash_create(u32 index, struct nlattr *est,
&p->tcfc_lock, est);
if (err) {
kfree(p);
- return ERR_PTR(err);
+ return err;
}
}
a->priv = (void *) p;
- return p;
+ return 0;
}
EXPORT_SYMBOL(tcf_hash_create);
-void tcf_hash_insert(struct tcf_common *p, struct tcf_hashinfo *hinfo)
+void tcf_hash_insert(struct tc_action *a)
{
+ struct tcf_common *p = a->priv;
+ struct tcf_hashinfo *hinfo = a->ops->hinfo;
unsigned int h = tcf_hash(p->tcfc_index, hinfo->hmask);
spin_lock_bh(&hinfo->lock);
@@ -256,12 +280,13 @@ EXPORT_SYMBOL(tcf_hash_insert);
static LIST_HEAD(act_base);
static DEFINE_RWLOCK(act_mod_lock);
-int tcf_register_action(struct tc_action_ops *act)
+int tcf_register_action(struct tc_action_ops *act, unsigned int mask)
{
struct tc_action_ops *a;
+ int err;
- /* Must supply act, dump, cleanup and init */
- if (!act->act || !act->dump || !act->cleanup || !act->init)
+ /* Must supply act, dump and init */
+ if (!act->act || !act->dump || !act->init)
return -EINVAL;
/* Supply defaults */
@@ -270,10 +295,21 @@ int tcf_register_action(struct tc_action_ops *act)
if (!act->walk)
act->walk = tcf_generic_walker;
+ act->hinfo = kmalloc(sizeof(struct tcf_hashinfo), GFP_KERNEL);
+ if (!act->hinfo)
+ return -ENOMEM;
+ err = tcf_hashinfo_init(act->hinfo, mask);
+ if (err) {
+ kfree(act->hinfo);
+ return err;
+ }
+
write_lock(&act_mod_lock);
list_for_each_entry(a, &act_base, head) {
if (act->type == a->type || (strcmp(act->kind, a->kind) == 0)) {
write_unlock(&act_mod_lock);
+ tcf_hashinfo_destroy(act->hinfo);
+ kfree(act->hinfo);
return -EEXIST;
}
}
@@ -292,6 +328,8 @@ int tcf_unregister_action(struct tc_action_ops *act)
list_for_each_entry(a, &act_base, head) {
if (a == act) {
list_del(&act->head);
+ tcf_hashinfo_destroy(act->hinfo);
+ kfree(act->hinfo);
err = 0;
break;
}
@@ -368,16 +406,21 @@ exec_done:
}
EXPORT_SYMBOL(tcf_action_exec);
-void tcf_action_destroy(struct list_head *actions, int bind)
+int tcf_action_destroy(struct list_head *actions, int bind)
{
struct tc_action *a, *tmp;
+ int ret = 0;
list_for_each_entry_safe(a, tmp, actions, list) {
- if (a->ops->cleanup(a, bind) == ACT_P_DELETED)
+ ret = tcf_hash_release(a, bind);
+ if (ret == ACT_P_DELETED)
module_put(a->ops->owner);
+ else if (ret < 0)
+ return ret;
list_del(&a->list);
kfree(a);
}
+ return ret;
}
int
@@ -642,6 +685,20 @@ act_get_notify(struct net *net, u32 portid, struct nlmsghdr *n,
return rtnl_unicast(skb, net, portid);
}
+static struct tc_action *create_a(int i)
+{
+ struct tc_action *act;
+
+ act = kzalloc(sizeof(*act), GFP_KERNEL);
+ if (act == NULL) {
+ pr_debug("create_a: failed to alloc!\n");
+ return NULL;
+ }
+ act->order = i;
+ INIT_LIST_HEAD(&act->list);
+ return act;
+}
+
static struct tc_action *
tcf_action_get_1(struct nlattr *nla, struct nlmsghdr *n, u32 portid)
{
@@ -661,11 +718,10 @@ tcf_action_get_1(struct nlattr *nla, struct nlmsghdr *n, u32 portid)
index = nla_get_u32(tb[TCA_ACT_INDEX]);
err = -ENOMEM;
- a = kzalloc(sizeof(struct tc_action), GFP_KERNEL);
+ a = create_a(0);
if (a == NULL)
goto err_out;
- INIT_LIST_HEAD(&a->list);
err = -EINVAL;
a->ops = tc_lookup_action(tb[TCA_ACT_KIND]);
if (a->ops == NULL) /* could happen in batch of actions */
@@ -695,20 +751,6 @@ static void cleanup_a(struct list_head *actions)
}
}
-static struct tc_action *create_a(int i)
-{
- struct tc_action *act;
-
- act = kzalloc(sizeof(*act), GFP_KERNEL);
- if (act == NULL) {
- pr_debug("create_a: failed to alloc!\n");
- return NULL;
- }
- act->order = i;
- INIT_LIST_HEAD(&act->list);
- return act;
-}
-
static int tca_action_flush(struct net *net, struct nlattr *nla,
struct nlmsghdr *n, u32 portid)
{
@@ -720,18 +762,12 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
struct nlattr *nest;
struct nlattr *tb[TCA_ACT_MAX + 1];
struct nlattr *kind;
- struct tc_action *a = create_a(0);
+ struct tc_action a;
int err = -ENOMEM;
- if (a == NULL) {
- pr_debug("tca_action_flush: couldnt create tc_action\n");
- return err;
- }
-
skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
if (!skb) {
pr_debug("tca_action_flush: failed skb alloc\n");
- kfree(a);
return err;
}
@@ -743,8 +779,10 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
err = -EINVAL;
kind = tb[TCA_ACT_KIND];
- a->ops = tc_lookup_action(kind);
- if (a->ops == NULL) /*some idjot trying to flush unknown action */
+ memset(&a, 0, sizeof(struct tc_action));
+ INIT_LIST_HEAD(&a.list);
+ a.ops = tc_lookup_action(kind);
+ if (a.ops == NULL) /*some idjot trying to flush unknown action */
goto err_out;
nlh = nlmsg_put(skb, portid, n->nlmsg_seq, RTM_DELACTION, sizeof(*t), 0);
@@ -759,7 +797,7 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
if (nest == NULL)
goto out_module_put;
- err = a->ops->walk(skb, &dcb, RTM_DELACTION, a);
+ err = a.ops->walk(skb, &dcb, RTM_DELACTION, &a);
if (err < 0)
goto out_module_put;
if (err == 0)
@@ -769,8 +807,7 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
nlh->nlmsg_len = skb_tail_pointer(skb) - b;
nlh->nlmsg_flags |= NLM_F_ROOT;
- module_put(a->ops->owner);
- kfree(a);
+ module_put(a.ops->owner);
err = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
n->nlmsg_flags & NLM_F_ECHO);
if (err > 0)
@@ -779,11 +816,10 @@ static int tca_action_flush(struct net *net, struct nlattr *nla,
return err;
out_module_put:
- module_put(a->ops->owner);
+ module_put(a.ops->owner);
err_out:
noflush_out:
kfree_skb(skb);
- kfree(a);
return err;
}
@@ -805,7 +841,11 @@ tcf_del_notify(struct net *net, struct nlmsghdr *n, struct list_head *actions,
}
/* now do the delete */
- tcf_action_destroy(actions, 0);
+ ret = tcf_action_destroy(actions, 0);
+ if (ret < 0) {
+ kfree_skb(skb);
+ return ret;
+ }
ret = rtnetlink_send(skb, net, portid, RTNLGRP_TC,
n->nlmsg_flags & NLM_F_ECHO);
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 2210187..edbf40d 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -37,7 +37,6 @@
#include <net/tc_act/tc_csum.h>
#define CSUM_TAB_MASK 15
-static struct tcf_hashinfo csum_hash_info;
static const struct nla_policy csum_policy[TCA_CSUM_MAX + 1] = {
[TCA_CSUM_PARMS] = { .len = sizeof(struct tc_csum), },
@@ -48,7 +47,6 @@ static int tcf_csum_init(struct net *n, struct nlattr *nla, struct nlattr *est,
{
struct nlattr *tb[TCA_CSUM_MAX + 1];
struct tc_csum *parm;
- struct tcf_common *pc;
struct tcf_csum *p;
int ret = 0, err;
@@ -63,38 +61,31 @@ static int tcf_csum_init(struct net *n, struct nlattr *nla, struct nlattr *est,
return -EINVAL;
parm = nla_data(tb[TCA_CSUM_PARMS]);
- pc = tcf_hash_check(parm->index, a, bind);
- if (!pc) {
- pc = tcf_hash_create(parm->index, est, a, sizeof(*p), bind);
- if (IS_ERR(pc))
- return PTR_ERR(pc);
+ if (!tcf_hash_check(parm->index, a, bind)) {
+ ret = tcf_hash_create(parm->index, est, a, sizeof(*p), bind);
+ if (ret)
+ return ret;
ret = ACT_P_CREATED;
} else {
if (bind)/* dont override defaults */
return 0;
- tcf_hash_release(pc, bind, a->ops->hinfo);
+ tcf_hash_release(a, bind);
if (!ovr)
return -EEXIST;
}
- p = to_tcf_csum(pc);
+ p = to_tcf_csum(a);
spin_lock_bh(&p->tcf_lock);
p->tcf_action = parm->action;
p->update_flags = parm->update_flags;
spin_unlock_bh(&p->tcf_lock);
if (ret == ACT_P_CREATED)
- tcf_hash_insert(pc, a->ops->hinfo);
+ tcf_hash_insert(a);
return ret;
}
-static int tcf_csum_cleanup(struct tc_action *a, int bind)
-{
- struct tcf_csum *p = a->priv;
- return tcf_hash_release(&p->common, bind, &csum_hash_info);
-}
-
/**
* tcf_csum_skb_nextlayer - Get next layer pointer
* @skb: sk_buff to use
@@ -569,12 +560,10 @@ nla_put_failure:
static struct tc_action_ops act_csum_ops = {
.kind = "csum",
- .hinfo = &csum_hash_info,
.type = TCA_ACT_CSUM,
.owner = THIS_MODULE,
.act = tcf_csum,
.dump = tcf_csum_dump,
- .cleanup = tcf_csum_cleanup,
.init = tcf_csum_init,
};
@@ -583,11 +572,7 @@ MODULE_LICENSE("GPL");
static int __init csum_init_module(void)
{
- int err = tcf_hashinfo_init(&csum_hash_info, CSUM_TAB_MASK);
- if (err)
- return err;
-
- return tcf_register_action(&act_csum_ops);
+ return tcf_register_action(&act_csum_ops, CSUM_TAB_MASK);
}
static void __exit csum_cleanup_module(void)
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index a0eed30..d6bcbd9 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -24,7 +24,6 @@
#include <net/tc_act/tc_gact.h>
#define GACT_TAB_MASK 15
-static struct tcf_hashinfo gact_hash_info;
#ifdef CONFIG_GACT_PROB
static int gact_net_rand(struct tcf_gact *gact)
@@ -57,7 +56,6 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
struct nlattr *tb[TCA_GACT_MAX + 1];
struct tc_gact *parm;
struct tcf_gact *gact;
- struct tcf_common *pc;
int ret = 0;
int err;
#ifdef CONFIG_GACT_PROB
@@ -86,21 +84,20 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
}
#endif
- pc = tcf_hash_check(parm->index, a, bind);
- if (!pc) {
- pc = tcf_hash_create(parm->index, est, a, sizeof(*gact), bind);
- if (IS_ERR(pc))
- return PTR_ERR(pc);
+ if (!tcf_hash_check(parm->index, a, bind)) {
+ ret = tcf_hash_create(parm->index, est, a, sizeof(*gact), bind);
+ if (ret)
+ return ret;
ret = ACT_P_CREATED;
} else {
if (bind)/* dont override defaults */
return 0;
- tcf_hash_release(pc, bind, a->ops->hinfo);
+ tcf_hash_release(a, bind);
if (!ovr)
return -EEXIST;
}
- gact = to_gact(pc);
+ gact = to_gact(a);
spin_lock_bh(&gact->tcf_lock);
gact->tcf_action = parm->action;
@@ -113,19 +110,10 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
#endif
spin_unlock_bh(&gact->tcf_lock);
if (ret == ACT_P_CREATED)
- tcf_hash_insert(pc, a->ops->hinfo);
+ tcf_hash_insert(a);
return ret;
}
-static int tcf_gact_cleanup(struct tc_action *a, int bind)
-{
- struct tcf_gact *gact = a->priv;
-
- if (gact)
- return tcf_hash_release(&gact->common, bind, a->ops->hinfo);
- return 0;
-}
-
static int tcf_gact(struct sk_buff *skb, const struct tc_action *a,
struct tcf_result *res)
{
@@ -191,12 +179,10 @@ nla_put_failure:
static struct tc_action_ops act_gact_ops = {
.kind = "gact",
- .hinfo = &gact_hash_info,
.type = TCA_ACT_GACT,
.owner = THIS_MODULE,
.act = tcf_gact,
.dump = tcf_gact_dump,
- .cleanup = tcf_gact_cleanup,
.init = tcf_gact_init,
};
@@ -206,21 +192,17 @@ MODULE_LICENSE("GPL");
static int __init gact_init_module(void)
{
- int err = tcf_hashinfo_init(&gact_hash_info, GACT_TAB_MASK);
- if (err)
- return err;
#ifdef CONFIG_GACT_PROB
pr_info("GACT probability on\n");
#else
pr_info("GACT probability NOT on\n");
#endif
- return tcf_register_action(&act_gact_ops);
+ return tcf_register_action(&act_gact_ops, GACT_TAB_MASK);
}
static void __exit gact_cleanup_module(void)
{
tcf_unregister_action(&act_gact_ops);
- tcf_hashinfo_destroy(&gact_hash_info);
}
module_init(gact_init_module);
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 0a6d621..8a64a07 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -29,7 +29,6 @@
#define IPT_TAB_MASK 15
-static struct tcf_hashinfo ipt_hash_info;
static int ipt_init_target(struct xt_entry_target *t, char *table, unsigned int hook)
{
@@ -69,22 +68,12 @@ static void ipt_destroy_target(struct xt_entry_target *t)
module_put(par.target->me);
}
-static int tcf_ipt_release(struct tcf_ipt *ipt, int bind)
+static void tcf_ipt_release(struct tc_action *a, int bind)
{
- int ret = 0;
- if (ipt) {
- if (bind)
- ipt->tcf_bindcnt--;
- ipt->tcf_refcnt--;
- if (ipt->tcf_bindcnt <= 0 && ipt->tcf_refcnt <= 0) {
- ipt_destroy_target(ipt->tcfi_t);
- kfree(ipt->tcfi_tname);
- kfree(ipt->tcfi_t);
- tcf_hash_destroy(&ipt->common, &ipt_hash_info);
- ret = ACT_P_DELETED;
- }
- }
- return ret;
+ struct tcf_ipt *ipt = to_ipt(a);
+ ipt_destroy_target(ipt->tcfi_t);
+ kfree(ipt->tcfi_tname);
+ kfree(ipt->tcfi_t);
}
static const struct nla_policy ipt_policy[TCA_IPT_MAX + 1] = {
@@ -99,7 +88,6 @@ static int tcf_ipt_init(struct net *net, struct nlattr *nla, struct nlattr *est,
{
struct nlattr *tb[TCA_IPT_MAX + 1];
struct tcf_ipt *ipt;
- struct tcf_common *pc;
struct xt_entry_target *td, *t;
char *tname;
int ret = 0, err;
@@ -125,21 +113,20 @@ static int tcf_ipt_init(struct net *net, struct nlattr *nla, struct nlattr *est,
if (tb[TCA_IPT_INDEX] != NULL)
index = nla_get_u32(tb[TCA_IPT_INDEX]);
- pc = tcf_hash_check(index, a, bind);
- if (!pc) {
- pc = tcf_hash_create(index, est, a, sizeof(*ipt), bind);
- if (IS_ERR(pc))
- return PTR_ERR(pc);
+ if (!tcf_hash_check(index, a, bind) ) {
+ ret = tcf_hash_create(index, est, a, sizeof(*ipt), bind);
+ if (ret)
+ return ret;
ret = ACT_P_CREATED;
} else {
if (bind)/* dont override defaults */
return 0;
- tcf_ipt_release(to_ipt(pc), bind);
+ tcf_hash_release(a, bind);
if (!ovr)
return -EEXIST;
}
- ipt = to_ipt(pc);
+ ipt = to_ipt(a);
hook = nla_get_u32(tb[TCA_IPT_HOOK]);
@@ -170,7 +157,7 @@ static int tcf_ipt_init(struct net *net, struct nlattr *nla, struct nlattr *est,
ipt->tcfi_hook = hook;
spin_unlock_bh(&ipt->tcf_lock);
if (ret == ACT_P_CREATED)
- tcf_hash_insert(pc, a->ops->hinfo);
+ tcf_hash_insert(a);
return ret;
err3:
@@ -178,21 +165,11 @@ err3:
err2:
kfree(tname);
err1:
- if (ret == ACT_P_CREATED) {
- if (est)
- gen_kill_estimator(&pc->tcfc_bstats,
- &pc->tcfc_rate_est);
- kfree_rcu(pc, tcfc_rcu);
- }
+ if (ret == ACT_P_CREATED)
+ tcf_hash_cleanup(a, est);
return err;
}
-static int tcf_ipt_cleanup(struct tc_action *a, int bind)
-{
- struct tcf_ipt *ipt = a->priv;
- return tcf_ipt_release(ipt, bind);
-}
-
static int tcf_ipt(struct sk_buff *skb, const struct tc_action *a,
struct tcf_result *res)
{
@@ -284,23 +261,21 @@ nla_put_failure:
static struct tc_action_ops act_ipt_ops = {
.kind = "ipt",
- .hinfo = &ipt_hash_info,
.type = TCA_ACT_IPT,
.owner = THIS_MODULE,
.act = tcf_ipt,
.dump = tcf_ipt_dump,
- .cleanup = tcf_ipt_cleanup,
+ .cleanup = tcf_ipt_release,
.init = tcf_ipt_init,
};
static struct tc_action_ops act_xt_ops = {
.kind = "xt",
- .hinfo = &ipt_hash_info,
.type = TCA_ACT_XT,
.owner = THIS_MODULE,
.act = tcf_ipt,
.dump = tcf_ipt_dump,
- .cleanup = tcf_ipt_cleanup,
+ .cleanup = tcf_ipt_release,
.init = tcf_ipt_init,
};
@@ -311,20 +286,16 @@ MODULE_ALIAS("act_xt");
static int __init ipt_init_module(void)
{
- int ret1, ret2, err;
- err = tcf_hashinfo_init(&ipt_hash_info, IPT_TAB_MASK);
- if (err)
- return err;
+ int ret1, ret2;
- ret1 = tcf_register_action(&act_xt_ops);
+ ret1 = tcf_register_action(&act_xt_ops, IPT_TAB_MASK);
if (ret1 < 0)
printk("Failed to load xt action\n");
- ret2 = tcf_register_action(&act_ipt_ops);
+ ret2 = tcf_register_action(&act_ipt_ops, IPT_TAB_MASK);
if (ret2 < 0)
printk("Failed to load ipt action\n");
if (ret1 < 0 && ret2 < 0) {
- tcf_hashinfo_destroy(&ipt_hash_info);
return ret1;
} else
return 0;
@@ -334,7 +305,6 @@ static void __exit ipt_cleanup_module(void)
{
tcf_unregister_action(&act_xt_ops);
tcf_unregister_action(&act_ipt_ops);
- tcf_hashinfo_destroy(&ipt_hash_info);
}
module_init(ipt_init_module);
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 0b2c6d3..4f912c0 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -31,23 +31,13 @@
#define MIRRED_TAB_MASK 7
static LIST_HEAD(mirred_list);
-static struct tcf_hashinfo mirred_hash_info;
-static int tcf_mirred_release(struct tcf_mirred *m, int bind)
+static void tcf_mirred_release(struct tc_action *a, int bind)
{
- if (m) {
- if (bind)
- m->tcf_bindcnt--;
- m->tcf_refcnt--;
- if (!m->tcf_bindcnt && m->tcf_refcnt <= 0) {
- list_del(&m->tcfm_list);
- if (m->tcfm_dev)
- dev_put(m->tcfm_dev);
- tcf_hash_destroy(&m->common, &mirred_hash_info);
- return 1;
- }
- }
- return 0;
+ struct tcf_mirred *m = to_mirred(a);
+ list_del(&m->tcfm_list);
+ if (m->tcfm_dev)
+ dev_put(m->tcfm_dev);
}
static const struct nla_policy mirred_policy[TCA_MIRRED_MAX + 1] = {
@@ -61,7 +51,6 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
struct nlattr *tb[TCA_MIRRED_MAX + 1];
struct tc_mirred *parm;
struct tcf_mirred *m;
- struct tcf_common *pc;
struct net_device *dev;
int ret, ok_push = 0;
@@ -101,21 +90,20 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
dev = NULL;
}
- pc = tcf_hash_check(parm->index, a, bind);
- if (!pc) {
+ if (!tcf_hash_check(parm->index, a, bind)) {
if (dev == NULL)
return -EINVAL;
- pc = tcf_hash_create(parm->index, est, a, sizeof(*m), bind);
- if (IS_ERR(pc))
- return PTR_ERR(pc);
+ ret = tcf_hash_create(parm->index, est, a, sizeof(*m), bind);
+ if (ret)
+ return ret;
ret = ACT_P_CREATED;
} else {
if (!ovr) {
- tcf_mirred_release(to_mirred(pc), bind);
+ tcf_hash_release(a, bind);
return -EEXIST;
}
}
- m = to_mirred(pc);
+ m = to_mirred(a);
spin_lock_bh(&m->tcf_lock);
m->tcf_action = parm->action;
@@ -131,21 +119,12 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
spin_unlock_bh(&m->tcf_lock);
if (ret == ACT_P_CREATED) {
list_add(&m->tcfm_list, &mirred_list);
- tcf_hash_insert(pc, a->ops->hinfo);
+ tcf_hash_insert(a);
}
return ret;
}
-static int tcf_mirred_cleanup(struct tc_action *a, int bind)
-{
- struct tcf_mirred *m = a->priv;
-
- if (m)
- return tcf_mirred_release(m, bind);
- return 0;
-}
-
static int tcf_mirred(struct sk_buff *skb, const struct tc_action *a,
struct tcf_result *res)
{
@@ -254,12 +233,11 @@ static struct notifier_block mirred_device_notifier = {
static struct tc_action_ops act_mirred_ops = {
.kind = "mirred",
- .hinfo = &mirred_hash_info,
.type = TCA_ACT_MIRRED,
.owner = THIS_MODULE,
.act = tcf_mirred,
.dump = tcf_mirred_dump,
- .cleanup = tcf_mirred_cleanup,
+ .cleanup = tcf_mirred_release,
.init = tcf_mirred_init,
};
@@ -273,19 +251,13 @@ static int __init mirred_init_module(void)
if (err)
return err;
- err = tcf_hashinfo_init(&mirred_hash_info, MIRRED_TAB_MASK);
- if (err) {
- unregister_netdevice_notifier(&mirred_device_notifier);
- return err;
- }
pr_info("Mirror/redirect action on\n");
- return tcf_register_action(&act_mirred_ops);
+ return tcf_register_action(&act_mirred_ops, MIRRED_TAB_MASK);
}
static void __exit mirred_cleanup_module(void)
{
tcf_unregister_action(&act_mirred_ops);
- tcf_hashinfo_destroy(&mirred_hash_info);
unregister_netdevice_notifier(&mirred_device_notifier);
}
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 81f0404..270a030 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -31,8 +31,6 @@
#define NAT_TAB_MASK 15
-static struct tcf_hashinfo nat_hash_info;
-
static const struct nla_policy nat_policy[TCA_NAT_MAX + 1] = {
[TCA_NAT_PARMS] = { .len = sizeof(struct tc_nat) },
};
@@ -44,7 +42,6 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
struct tc_nat *parm;
int ret = 0, err;
struct tcf_nat *p;
- struct tcf_common *pc;
if (nla == NULL)
return -EINVAL;
@@ -57,20 +54,19 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
return -EINVAL;
parm = nla_data(tb[TCA_NAT_PARMS]);
- pc = tcf_hash_check(parm->index, a, bind);
- if (!pc) {
- pc = tcf_hash_create(parm->index, est, a, sizeof(*p), bind);
- if (IS_ERR(pc))
- return PTR_ERR(pc);
+ if (!tcf_hash_check(parm->index, a, bind)) {
+ ret = tcf_hash_create(parm->index, est, a, sizeof(*p), bind);
+ if (ret)
+ return ret;
ret = ACT_P_CREATED;
} else {
if (bind)
return 0;
- tcf_hash_release(pc, bind, a->ops->hinfo);
+ tcf_hash_release(a, bind);
if (!ovr)
return -EEXIST;
}
- p = to_tcf_nat(pc);
+ p = to_tcf_nat(a);
spin_lock_bh(&p->tcf_lock);
p->old_addr = parm->old_addr;
@@ -82,18 +78,11 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
spin_unlock_bh(&p->tcf_lock);
if (ret == ACT_P_CREATED)
- tcf_hash_insert(pc, a->ops->hinfo);
+ tcf_hash_insert(a);
return ret;
}
-static int tcf_nat_cleanup(struct tc_action *a, int bind)
-{
- struct tcf_nat *p = a->priv;
-
- return tcf_hash_release(&p->common, bind, &nat_hash_info);
-}
-
static int tcf_nat(struct sk_buff *skb, const struct tc_action *a,
struct tcf_result *res)
{
@@ -293,12 +282,10 @@ nla_put_failure:
static struct tc_action_ops act_nat_ops = {
.kind = "nat",
- .hinfo = &nat_hash_info,
.type = TCA_ACT_NAT,
.owner = THIS_MODULE,
.act = tcf_nat,
.dump = tcf_nat_dump,
- .cleanup = tcf_nat_cleanup,
.init = tcf_nat_init,
};
@@ -307,16 +294,12 @@ MODULE_LICENSE("GPL");
static int __init nat_init_module(void)
{
- int err = tcf_hashinfo_init(&nat_hash_info, NAT_TAB_MASK);
- if (err)
- return err;
- return tcf_register_action(&act_nat_ops);
+ return tcf_register_action(&act_nat_ops, NAT_TAB_MASK);
}
static void __exit nat_cleanup_module(void)
{
tcf_unregister_action(&act_nat_ops);
- tcf_hashinfo_destroy(&nat_hash_info);
}
module_init(nat_init_module);
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index be3f0f6..5f9bcb2 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -25,8 +25,6 @@
#define PEDIT_TAB_MASK 15
-static struct tcf_hashinfo pedit_hash_info;
-
static const struct nla_policy pedit_policy[TCA_PEDIT_MAX + 1] = {
[TCA_PEDIT_PARMS] = { .len = sizeof(struct tc_pedit) },
};
@@ -39,7 +37,6 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
struct tc_pedit *parm;
int ret = 0, err;
struct tcf_pedit *p;
- struct tcf_common *pc;
struct tc_pedit_key *keys = NULL;
int ksize;
@@ -57,26 +54,22 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
if (nla_len(tb[TCA_PEDIT_PARMS]) < sizeof(*parm) + ksize)
return -EINVAL;
- pc = tcf_hash_check(parm->index, a, bind);
- if (!pc) {
+ if (!tcf_hash_check(parm->index, a, bind)) {
if (!parm->nkeys)
return -EINVAL;
- pc = tcf_hash_create(parm->index, est, a, sizeof(*p), bind);
- if (IS_ERR(pc))
- return PTR_ERR(pc);
- p = to_pedit(pc);
+ ret = tcf_hash_create(parm->index, est, a, sizeof(*p), bind);
+ if (ret)
+ return ret;
+ p = to_pedit(a);
keys = kmalloc(ksize, GFP_KERNEL);
if (keys == NULL) {
- if (est)
- gen_kill_estimator(&pc->tcfc_bstats,
- &pc->tcfc_rate_est);
- kfree_rcu(pc, tcfc_rcu);
+ tcf_hash_cleanup(a, est);
return -ENOMEM;
}
ret = ACT_P_CREATED;
} else {
- p = to_pedit(pc);
- tcf_hash_release(pc, bind, a->ops->hinfo);
+ p = to_pedit(a);
+ tcf_hash_release(a, bind);
if (bind)
return 0;
if (!ovr)
@@ -100,22 +93,15 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
memcpy(p->tcfp_keys, parm->keys, ksize);
spin_unlock_bh(&p->tcf_lock);
if (ret == ACT_P_CREATED)
- tcf_hash_insert(pc, a->ops->hinfo);
+ tcf_hash_insert(a);
return ret;
}
-static int tcf_pedit_cleanup(struct tc_action *a, int bind)
+static void tcf_pedit_cleanup(struct tc_action *a, int bind)
{
struct tcf_pedit *p = a->priv;
-
- if (p) {
- struct tc_pedit_key *keys = p->tcfp_keys;
- if (tcf_hash_release(&p->common, bind, &pedit_hash_info)) {
- kfree(keys);
- return 1;
- }
- }
- return 0;
+ struct tc_pedit_key *keys = p->tcfp_keys;
+ kfree(keys);
}
static int tcf_pedit(struct sk_buff *skb, const struct tc_action *a,
@@ -230,7 +216,6 @@ nla_put_failure:
static struct tc_action_ops act_pedit_ops = {
.kind = "pedit",
- .hinfo = &pedit_hash_info,
.type = TCA_ACT_PEDIT,
.owner = THIS_MODULE,
.act = tcf_pedit,
@@ -245,15 +230,11 @@ MODULE_LICENSE("GPL");
static int __init pedit_init_module(void)
{
- int err = tcf_hashinfo_init(&pedit_hash_info, PEDIT_TAB_MASK);
- if (err)
- return err;
- return tcf_register_action(&act_pedit_ops);
+ return tcf_register_action(&act_pedit_ops, PEDIT_TAB_MASK);
}
static void __exit pedit_cleanup_module(void)
{
- tcf_hashinfo_destroy(&pedit_hash_info);
tcf_unregister_action(&act_pedit_ops);
}
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 1778209..0566e46 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -41,7 +41,6 @@ struct tcf_police {
container_of(pc, struct tcf_police, common)
#define POL_TAB_MASK 15
-static struct tcf_hashinfo police_hash_info;
/* old policer structure from before tc actions */
struct tc_police_compat {
@@ -234,7 +233,7 @@ override:
police->tcfp_t_c = ktime_to_ns(ktime_get());
police->tcf_index = parm->index ? parm->index :
- tcf_hash_new_index(a->ops->hinfo);
+ tcf_hash_new_index(hinfo);
h = tcf_hash(police->tcf_index, POL_TAB_MASK);
spin_lock_bh(&hinfo->lock);
hlist_add_head(&police->tcf_head, &hinfo->htab[h]);
@@ -253,14 +252,6 @@ failure:
return err;
}
-static int tcf_act_police_cleanup(struct tc_action *a, int bind)
-{
- struct tcf_police *p = a->priv;
- if (p)
- return tcf_hash_release(&p->common, bind, &police_hash_info);
- return 0;
-}
-
static int tcf_act_police(struct sk_buff *skb, const struct tc_action *a,
struct tcf_result *res)
{
@@ -357,12 +348,10 @@ MODULE_LICENSE("GPL");
static struct tc_action_ops act_police_ops = {
.kind = "police",
- .hinfo = &police_hash_info,
.type = TCA_ID_POLICE,
.owner = THIS_MODULE,
.act = tcf_act_police,
.dump = tcf_act_police_dump,
- .cleanup = tcf_act_police_cleanup,
.init = tcf_act_police_locate,
.walk = tcf_act_police_walker
};
@@ -370,19 +359,12 @@ static struct tc_action_ops act_police_ops = {
static int __init
police_init_module(void)
{
- int err = tcf_hashinfo_init(&police_hash_info, POL_TAB_MASK);
- if (err)
- return err;
- err = tcf_register_action(&act_police_ops);
- if (err)
- tcf_hashinfo_destroy(&police_hash_info);
- return err;
+ return tcf_register_action(&act_police_ops, POL_TAB_MASK);
}
static void __exit
police_cleanup_module(void)
{
- tcf_hashinfo_destroy(&police_hash_info);
tcf_unregister_action(&act_police_ops);
}
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 8ef2f1f..992c231 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -25,7 +25,6 @@
#include <net/tc_act/tc_defact.h>
#define SIMP_TAB_MASK 7
-static struct tcf_hashinfo simp_hash_info;
#define SIMP_MAX_DATA 32
static int tcf_simp(struct sk_buff *skb, const struct tc_action *a,
@@ -47,20 +46,10 @@ static int tcf_simp(struct sk_buff *skb, const struct tc_action *a,
return d->tcf_action;
}
-static int tcf_simp_release(struct tcf_defact *d, int bind)
+static void tcf_simp_release(struct tc_action *a, int bind)
{
- int ret = 0;
- if (d) {
- if (bind)
- d->tcf_bindcnt--;
- d->tcf_refcnt--;
- if (d->tcf_bindcnt <= 0 && d->tcf_refcnt <= 0) {
- kfree(d->tcfd_defdata);
- tcf_hash_destroy(&d->common, &simp_hash_info);
- ret = 1;
- }
- }
- return ret;
+ struct tcf_defact *d = to_defact(a);
+ kfree(d->tcfd_defdata);
}
static int alloc_defdata(struct tcf_defact *d, char *defdata)
@@ -94,7 +83,6 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
struct nlattr *tb[TCA_DEF_MAX + 1];
struct tc_defact *parm;
struct tcf_defact *d;
- struct tcf_common *pc;
char *defdata;
int ret = 0, err;
@@ -114,29 +102,25 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
parm = nla_data(tb[TCA_DEF_PARMS]);
defdata = nla_data(tb[TCA_DEF_DATA]);
- pc = tcf_hash_check(parm->index, a, bind);
- if (!pc) {
- pc = tcf_hash_create(parm->index, est, a, sizeof(*d), bind);
- if (IS_ERR(pc))
- return PTR_ERR(pc);
+ if (!tcf_hash_check(parm->index, a, bind)) {
+ ret = tcf_hash_create(parm->index, est, a, sizeof(*d), bind);
+ if (ret)
+ return ret;
- d = to_defact(pc);
+ d = to_defact(a);
ret = alloc_defdata(d, defdata);
if (ret < 0) {
- if (est)
- gen_kill_estimator(&pc->tcfc_bstats,
- &pc->tcfc_rate_est);
- kfree_rcu(pc, tcfc_rcu);
+ tcf_hash_cleanup(a, est);
return ret;
}
d->tcf_action = parm->action;
ret = ACT_P_CREATED;
} else {
- d = to_defact(pc);
+ d = to_defact(a);
if (bind)
return 0;
- tcf_simp_release(d, bind);
+ tcf_hash_release(a, bind);
if (!ovr)
return -EEXIST;
@@ -144,19 +128,10 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
}
if (ret == ACT_P_CREATED)
- tcf_hash_insert(pc, a->ops->hinfo);
+ tcf_hash_insert(a);
return ret;
}
-static int tcf_simp_cleanup(struct tc_action *a, int bind)
-{
- struct tcf_defact *d = a->priv;
-
- if (d)
- return tcf_simp_release(d, bind);
- return 0;
-}
-
static int tcf_simp_dump(struct sk_buff *skb, struct tc_action *a,
int bind, int ref)
{
@@ -187,12 +162,11 @@ nla_put_failure:
static struct tc_action_ops act_simp_ops = {
.kind = "simple",
- .hinfo = &simp_hash_info,
.type = TCA_ACT_SIMP,
.owner = THIS_MODULE,
.act = tcf_simp,
.dump = tcf_simp_dump,
- .cleanup = tcf_simp_cleanup,
+ .cleanup = tcf_simp_release,
.init = tcf_simp_init,
};
@@ -202,23 +176,15 @@ MODULE_LICENSE("GPL");
static int __init simp_init_module(void)
{
- int err, ret;
- err = tcf_hashinfo_init(&simp_hash_info, SIMP_TAB_MASK);
- if (err)
- return err;
-
- ret = tcf_register_action(&act_simp_ops);
+ int ret;
+ ret = tcf_register_action(&act_simp_ops, SIMP_TAB_MASK);
if (!ret)
pr_info("Simple TC action Loaded\n");
- else
- tcf_hashinfo_destroy(&simp_hash_info);
-
return ret;
}
static void __exit simp_cleanup_module(void)
{
- tcf_hashinfo_destroy(&simp_hash_info);
tcf_unregister_action(&act_simp_ops);
}
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index 9872508..fcfeeaf 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -28,7 +28,6 @@
#include <net/tc_act/tc_skbedit.h>
#define SKBEDIT_TAB_MASK 15
-static struct tcf_hashinfo skbedit_hash_info;
static int tcf_skbedit(struct sk_buff *skb, const struct tc_action *a,
struct tcf_result *res)
@@ -65,7 +64,6 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
struct nlattr *tb[TCA_SKBEDIT_MAX + 1];
struct tc_skbedit *parm;
struct tcf_skbedit *d;
- struct tcf_common *pc;
u32 flags = 0, *priority = NULL, *mark = NULL;
u16 *queue_mapping = NULL;
int ret = 0, err;
@@ -100,19 +98,18 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
parm = nla_data(tb[TCA_SKBEDIT_PARMS]);
- pc = tcf_hash_check(parm->index, a, bind);
- if (!pc) {
- pc = tcf_hash_create(parm->index, est, a, sizeof(*d), bind);
- if (IS_ERR(pc))
- return PTR_ERR(pc);
+ if (!tcf_hash_check(parm->index, a, bind)) {
+ ret = tcf_hash_create(parm->index, est, a, sizeof(*d), bind);
+ if (ret)
+ return ret;
- d = to_skbedit(pc);
+ d = to_skbedit(a);
ret = ACT_P_CREATED;
} else {
- d = to_skbedit(pc);
+ d = to_skbedit(a);
if (bind)
return 0;
- tcf_hash_release(pc, bind, a->ops->hinfo);
+ tcf_hash_release(a, bind);
if (!ovr)
return -EEXIST;
}
@@ -132,19 +129,10 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
spin_unlock_bh(&d->tcf_lock);
if (ret == ACT_P_CREATED)
- tcf_hash_insert(pc, a->ops->hinfo);
+ tcf_hash_insert(a);
return ret;
}
-static int tcf_skbedit_cleanup(struct tc_action *a, int bind)
-{
- struct tcf_skbedit *d = a->priv;
-
- if (d)
- return tcf_hash_release(&d->common, bind, &skbedit_hash_info);
- return 0;
-}
-
static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
int bind, int ref)
{
@@ -186,12 +174,10 @@ nla_put_failure:
static struct tc_action_ops act_skbedit_ops = {
.kind = "skbedit",
- .hinfo = &skbedit_hash_info,
.type = TCA_ACT_SKBEDIT,
.owner = THIS_MODULE,
.act = tcf_skbedit,
.dump = tcf_skbedit_dump,
- .cleanup = tcf_skbedit_cleanup,
.init = tcf_skbedit_init,
};
@@ -201,15 +187,11 @@ MODULE_LICENSE("GPL");
static int __init skbedit_init_module(void)
{
- int err = tcf_hashinfo_init(&skbedit_hash_info, SKBEDIT_TAB_MASK);
- if (err)
- return err;
- return tcf_register_action(&act_skbedit_ops);
+ return tcf_register_action(&act_skbedit_ops, SKBEDIT_TAB_MASK);
}
static void __exit skbedit_cleanup_module(void)
{
- tcf_hashinfo_destroy(&skbedit_hash_info);
tcf_unregister_action(&act_skbedit_ops);
}
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index de1059a..f1669a00 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -117,6 +117,11 @@ struct netem_sched_data {
LOST_IN_BURST_PERIOD,
} _4_state_model;
+ enum {
+ GOOD_STATE = 1,
+ BAD_STATE,
+ } GE_state_model;
+
/* Correlated Loss Generation models */
struct clgstate {
/* state of the Markov chain */
@@ -272,15 +277,15 @@ static bool loss_gilb_ell(struct netem_sched_data *q)
struct clgstate *clg = &q->clg;
switch (clg->state) {
- case 1:
+ case GOOD_STATE:
if (prandom_u32() < clg->a1)
- clg->state = 2;
+ clg->state = BAD_STATE;
if (prandom_u32() < clg->a4)
return true;
break;
- case 2:
+ case BAD_STATE:
if (prandom_u32() < clg->a2)
- clg->state = 1;
+ clg->state = GOOD_STATE;
if (prandom_u32() > clg->a3)
return true;
}
@@ -689,9 +694,8 @@ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
return 0;
}
-static void get_correlation(struct Qdisc *sch, const struct nlattr *attr)
+static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr)
{
- struct netem_sched_data *q = qdisc_priv(sch);
const struct tc_netem_corr *c = nla_data(attr);
init_crandom(&q->delay_cor, c->delay_corr);
@@ -699,27 +703,24 @@ static void get_correlation(struct Qdisc *sch, const struct nlattr *attr)
init_crandom(&q->dup_cor, c->dup_corr);
}
-static void get_reorder(struct Qdisc *sch, const struct nlattr *attr)
+static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr)
{
- struct netem_sched_data *q = qdisc_priv(sch);
const struct tc_netem_reorder *r = nla_data(attr);
q->reorder = r->probability;
init_crandom(&q->reorder_cor, r->correlation);
}
-static void get_corrupt(struct Qdisc *sch, const struct nlattr *attr)
+static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr)
{
- struct netem_sched_data *q = qdisc_priv(sch);
const struct tc_netem_corrupt *r = nla_data(attr);
q->corrupt = r->probability;
init_crandom(&q->corrupt_cor, r->correlation);
}
-static void get_rate(struct Qdisc *sch, const struct nlattr *attr)
+static void get_rate(struct netem_sched_data *q, const struct nlattr *attr)
{
- struct netem_sched_data *q = qdisc_priv(sch);
const struct tc_netem_rate *r = nla_data(attr);
q->rate = r->rate;
@@ -732,9 +733,8 @@ static void get_rate(struct Qdisc *sch, const struct nlattr *attr)
q->cell_size_reciprocal = (struct reciprocal_value) { 0 };
}
-static int get_loss_clg(struct Qdisc *sch, const struct nlattr *attr)
+static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr)
{
- struct netem_sched_data *q = qdisc_priv(sch);
const struct nlattr *la;
int rem;
@@ -752,7 +752,7 @@ static int get_loss_clg(struct Qdisc *sch, const struct nlattr *attr)
q->loss_model = CLG_4_STATES;
- q->clg.state = 1;
+ q->clg.state = TX_IN_GAP_PERIOD;
q->clg.a1 = gi->p13;
q->clg.a2 = gi->p31;
q->clg.a3 = gi->p32;
@@ -770,7 +770,7 @@ static int get_loss_clg(struct Qdisc *sch, const struct nlattr *attr)
}
q->loss_model = CLG_GILB_ELL;
- q->clg.state = 1;
+ q->clg.state = GOOD_STATE;
q->clg.a1 = ge->p;
q->clg.a2 = ge->r;
q->clg.a3 = ge->h;
@@ -821,6 +821,8 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt)
struct netem_sched_data *q = qdisc_priv(sch);
struct nlattr *tb[TCA_NETEM_MAX + 1];
struct tc_netem_qopt *qopt;
+ struct clgstate old_clg;
+ int old_loss_model = CLG_RANDOM;
int ret;
if (opt == NULL)
@@ -831,6 +833,33 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt)
if (ret < 0)
return ret;
+ /* backup q->clg and q->loss_model */
+ old_clg = q->clg;
+ old_loss_model = q->loss_model;
+
+ if (tb[TCA_NETEM_LOSS]) {
+ ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]);
+ if (ret) {
+ q->loss_model = old_loss_model;
+ return ret;
+ }
+ } else {
+ q->loss_model = CLG_RANDOM;
+ }
+
+ if (tb[TCA_NETEM_DELAY_DIST]) {
+ ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]);
+ if (ret) {
+ /* recover clg and loss_model, in case of
+ * q->clg and q->loss_model were modified
+ * in get_loss_clg()
+ */
+ q->clg = old_clg;
+ q->loss_model = old_loss_model;
+ return ret;
+ }
+ }
+
sch->limit = qopt->limit;
q->latency = qopt->latency;
@@ -848,22 +877,16 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt)
q->reorder = ~0;
if (tb[TCA_NETEM_CORR])
- get_correlation(sch, tb[TCA_NETEM_CORR]);
-
- if (tb[TCA_NETEM_DELAY_DIST]) {
- ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST]);
- if (ret)
- return ret;
- }
+ get_correlation(q, tb[TCA_NETEM_CORR]);
if (tb[TCA_NETEM_REORDER])
- get_reorder(sch, tb[TCA_NETEM_REORDER]);
+ get_reorder(q, tb[TCA_NETEM_REORDER]);
if (tb[TCA_NETEM_CORRUPT])
- get_corrupt(sch, tb[TCA_NETEM_CORRUPT]);
+ get_corrupt(q, tb[TCA_NETEM_CORRUPT]);
if (tb[TCA_NETEM_RATE])
- get_rate(sch, tb[TCA_NETEM_RATE]);
+ get_rate(q, tb[TCA_NETEM_RATE]);
if (tb[TCA_NETEM_RATE64])
q->rate = max_t(u64, q->rate,
@@ -872,10 +895,6 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt)
if (tb[TCA_NETEM_ECN])
q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
- q->loss_model = CLG_RANDOM;
- if (tb[TCA_NETEM_LOSS])
- ret = get_loss_clg(sch, tb[TCA_NETEM_LOSS]);
-
return ret;
}
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 4f505a0..87a02bf 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -101,12 +101,11 @@
struct tbf_sched_data {
/* Parameters */
u32 limit; /* Maximal length of backlog: bytes */
+ u32 max_size;
s64 buffer; /* Token bucket depth/rate: MUST BE >= MTU/B */
s64 mtu;
- u32 max_size;
struct psched_ratecfg rate;
struct psched_ratecfg peak;
- bool peak_present;
/* Variables */
s64 tokens; /* Current number of B tokens */
@@ -222,6 +221,11 @@ static unsigned int tbf_drop(struct Qdisc *sch)
return len;
}
+static bool tbf_peak_present(const struct tbf_sched_data *q)
+{
+ return q->peak.rate_bytes_ps;
+}
+
static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
{
struct tbf_sched_data *q = qdisc_priv(sch);
@@ -238,7 +242,7 @@ static struct sk_buff *tbf_dequeue(struct Qdisc *sch)
now = ktime_to_ns(ktime_get());
toks = min_t(s64, now - q->t_c, q->buffer);
- if (q->peak_present) {
+ if (tbf_peak_present(q)) {
ptoks = toks + q->ptokens;
if (ptoks > q->mtu)
ptoks = q->mtu;
@@ -366,6 +370,8 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
} else {
max_size = min_t(u64, max_size, psched_ns_t2l(&peak, mtu));
}
+ } else {
+ memset(&peak, 0, sizeof(peak));
}
if (max_size < psched_mtu(qdisc_dev(sch)))
@@ -410,12 +416,7 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
q->ptokens = q->mtu;
memcpy(&q->rate, &rate, sizeof(struct psched_ratecfg));
- if (qopt->peakrate.rate) {
- memcpy(&q->peak, &peak, sizeof(struct psched_ratecfg));
- q->peak_present = true;
- } else {
- q->peak_present = false;
- }
+ memcpy(&q->peak, &peak, sizeof(struct psched_ratecfg));
sch_tree_unlock(sch);
err = 0;
@@ -458,7 +459,7 @@ static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
opt.limit = q->limit;
psched_ratecfg_getrate(&opt.rate, &q->rate);
- if (q->peak_present)
+ if (tbf_peak_present(q))
psched_ratecfg_getrate(&opt.peakrate, &q->peak);
else
memset(&opt.peakrate, 0, sizeof(opt.peakrate));
@@ -469,7 +470,7 @@ static int tbf_dump(struct Qdisc *sch, struct sk_buff *skb)
if (q->rate.rate_bytes_ps >= (1ULL << 32) &&
nla_put_u64(skb, TCA_TBF_RATE64, q->rate.rate_bytes_ps))
goto nla_put_failure;
- if (q->peak_present &&
+ if (tbf_peak_present(q) &&
q->peak.rate_bytes_ps >= (1ULL << 32) &&
nla_put_u64(skb, TCA_TBF_PRATE64, q->peak.rate_bytes_ps))
goto nla_put_failure;
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index d0810dc..1d348d1 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -652,5 +652,4 @@ void sctp_transport_immediate_rtx(struct sctp_transport *t)
if (!mod_timer(&t->T3_rtx_timer, jiffies + t->rto))
sctp_transport_hold(t);
}
- return;
}
diff --git a/net/socket.c b/net/socket.c
index 879933a..840cffb 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -593,7 +593,7 @@ void sock_release(struct socket *sock)
}
if (rcu_dereference_protected(sock->wq, 1)->fasync_list)
- printk(KERN_ERR "sock_release: fasync list not empty!\n");
+ pr_err("%s: fasync list not empty!\n", __func__);
if (test_bit(SOCK_EXTERNALLY_ALLOCATED, &sock->flags))
return;
@@ -1265,8 +1265,8 @@ int __sock_create(struct net *net, int family, int type, int protocol,
static int warned;
if (!warned) {
warned = 1;
- printk(KERN_INFO "%s uses obsolete (PF_INET,SOCK_PACKET)\n",
- current->comm);
+ pr_info("%s uses obsolete (PF_INET,SOCK_PACKET)\n",
+ current->comm);
}
family = PF_PACKET;
}
@@ -2595,8 +2595,7 @@ int sock_register(const struct net_proto_family *ops)
int err;
if (ops->family >= NPROTO) {
- printk(KERN_CRIT "protocol %d >= NPROTO(%d)\n", ops->family,
- NPROTO);
+ pr_crit("protocol %d >= NPROTO(%d)\n", ops->family, NPROTO);
return -ENOBUFS;
}
@@ -2610,7 +2609,7 @@ int sock_register(const struct net_proto_family *ops)
}
spin_unlock(&net_family_lock);
- printk(KERN_INFO "NET: Registered protocol family %d\n", ops->family);
+ pr_info("NET: Registered protocol family %d\n", ops->family);
return err;
}
EXPORT_SYMBOL(sock_register);
@@ -2638,7 +2637,7 @@ void sock_unregister(int family)
synchronize_rcu();
- printk(KERN_INFO "NET: Unregistered protocol family %d\n", family);
+ pr_info("NET: Unregistered protocol family %d\n", family);
}
EXPORT_SYMBOL(sock_unregister);
diff --git a/net/tipc/addr.h b/net/tipc/addr.h
index 60b00ab..a74acf9 100644
--- a/net/tipc/addr.h
+++ b/net/tipc/addr.h
@@ -37,6 +37,8 @@
#ifndef _TIPC_ADDR_H
#define _TIPC_ADDR_H
+#include "core.h"
+
#define TIPC_ZONE_MASK 0xff000000u
#define TIPC_CLUSTER_MASK 0xfffff000u
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index bf860d9..e0feb7e 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -356,9 +356,9 @@ static void bclink_peek_nack(struct tipc_msg *msg)
}
/*
- * tipc_bclink_send_msg - broadcast a packet to all nodes in cluster
+ * tipc_bclink_xmit - broadcast a packet to all nodes in cluster
*/
-int tipc_bclink_send_msg(struct sk_buff *buf)
+int tipc_bclink_xmit(struct sk_buff *buf)
{
int res;
@@ -370,7 +370,7 @@ int tipc_bclink_send_msg(struct sk_buff *buf)
goto exit;
}
- res = tipc_link_send_buf(bcl, buf);
+ res = __tipc_link_xmit(bcl, buf);
if (likely(res >= 0)) {
bclink_set_last_sent();
bcl->stats.queue_sz_counts++;
@@ -399,19 +399,18 @@ static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
*/
if (((seqno - tipc_own_addr) % TIPC_MIN_LINK_WIN) == 0) {
- tipc_link_send_proto_msg(
- node->active_links[node->addr & 1],
- STATE_MSG, 0, 0, 0, 0, 0);
+ tipc_link_proto_xmit(node->active_links[node->addr & 1],
+ STATE_MSG, 0, 0, 0, 0, 0);
bcl->stats.sent_acks++;
}
}
/**
- * tipc_bclink_recv_pkt - receive a broadcast packet, and deliver upwards
+ * tipc_bclink_rcv - receive a broadcast packet, and deliver upwards
*
* tipc_net_lock is read_locked, no other locks set
*/
-void tipc_bclink_recv_pkt(struct sk_buff *buf)
+void tipc_bclink_rcv(struct sk_buff *buf)
{
struct tipc_msg *msg = buf_msg(buf);
struct tipc_node *node;
@@ -468,7 +467,7 @@ receive:
spin_unlock_bh(&bc_lock);
tipc_node_unlock(node);
if (likely(msg_mcast(msg)))
- tipc_port_recv_mcast(buf, NULL);
+ tipc_port_mcast_rcv(buf, NULL);
else
kfree_skb(buf);
} else if (msg_user(msg) == MSG_BUNDLER) {
@@ -478,12 +477,12 @@ receive:
bcl->stats.recv_bundled += msg_msgcnt(msg);
spin_unlock_bh(&bc_lock);
tipc_node_unlock(node);
- tipc_link_recv_bundle(buf);
+ tipc_link_bundle_rcv(buf);
} else if (msg_user(msg) == MSG_FRAGMENTER) {
int ret;
- ret = tipc_link_recv_fragment(&node->bclink.reasm_head,
- &node->bclink.reasm_tail,
- &buf);
+ ret = tipc_link_frag_rcv(&node->bclink.reasm_head,
+ &node->bclink.reasm_tail,
+ &buf);
if (ret == LINK_REASM_ERROR)
goto unlock;
spin_lock_bh(&bc_lock);
@@ -503,7 +502,7 @@ receive:
bclink_accept_pkt(node, seqno);
spin_unlock_bh(&bc_lock);
tipc_node_unlock(node);
- tipc_named_recv(buf);
+ tipc_named_rcv(buf);
} else {
spin_lock_bh(&bc_lock);
bclink_accept_pkt(node, seqno);
@@ -785,7 +784,6 @@ void tipc_bclink_init(void)
bcl->owner = &bclink->node;
bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
- spin_lock_init(&bcbearer->bearer.lock);
bcl->b_ptr = &bcbearer->bearer;
bcl->state = WORKING_WORKING;
strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME);
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
index 6ee587b..a80ef54 100644
--- a/net/tipc/bcast.h
+++ b/net/tipc/bcast.h
@@ -90,8 +90,8 @@ void tipc_bclink_add_node(u32 addr);
void tipc_bclink_remove_node(u32 addr);
struct tipc_node *tipc_bclink_retransmit_to(void);
void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked);
-int tipc_bclink_send_msg(struct sk_buff *buf);
-void tipc_bclink_recv_pkt(struct sk_buff *buf);
+int tipc_bclink_xmit(struct sk_buff *buf);
+void tipc_bclink_rcv(struct sk_buff *buf);
u32 tipc_bclink_get_last_sent(void);
u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr);
void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent);
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 574b861..89c4c2d 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -51,7 +51,7 @@ static struct tipc_media * const media_info_array[] = {
struct tipc_bearer tipc_bearers[MAX_BEARERS];
-static void bearer_disable(struct tipc_bearer *b_ptr);
+static void bearer_disable(struct tipc_bearer *b_ptr, bool shutting_down);
/**
* tipc_media_find - locates specified media object by name
@@ -327,12 +327,10 @@ restart:
b_ptr->net_plane = bearer_id + 'A';
b_ptr->active = 1;
b_ptr->priority = priority;
- INIT_LIST_HEAD(&b_ptr->links);
- spin_lock_init(&b_ptr->lock);
res = tipc_disc_create(b_ptr, &b_ptr->bcast_addr, disc_domain);
if (res) {
- bearer_disable(b_ptr);
+ bearer_disable(b_ptr, false);
pr_warn("Bearer <%s> rejected, discovery object creation failed\n",
name);
goto exit;
@@ -350,20 +348,9 @@ exit:
*/
static int tipc_reset_bearer(struct tipc_bearer *b_ptr)
{
- struct tipc_link *l_ptr;
- struct tipc_link *temp_l_ptr;
-
read_lock_bh(&tipc_net_lock);
pr_info("Resetting bearer <%s>\n", b_ptr->name);
- spin_lock_bh(&b_ptr->lock);
- list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
- struct tipc_node *n_ptr = l_ptr->owner;
-
- spin_lock_bh(&n_ptr->lock);
- tipc_link_reset(l_ptr);
- spin_unlock_bh(&n_ptr->lock);
- }
- spin_unlock_bh(&b_ptr->lock);
+ tipc_link_reset_list(b_ptr->identity);
read_unlock_bh(&tipc_net_lock);
return 0;
}
@@ -373,25 +360,14 @@ static int tipc_reset_bearer(struct tipc_bearer *b_ptr)
*
* Note: This routine assumes caller holds tipc_net_lock.
*/
-static void bearer_disable(struct tipc_bearer *b_ptr)
+static void bearer_disable(struct tipc_bearer *b_ptr, bool shutting_down)
{
- struct tipc_link *l_ptr;
- struct tipc_link *temp_l_ptr;
- struct tipc_link_req *temp_req;
-
pr_info("Disabling bearer <%s>\n", b_ptr->name);
- spin_lock_bh(&b_ptr->lock);
b_ptr->media->disable_media(b_ptr);
- list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
- tipc_link_delete(l_ptr);
- }
- temp_req = b_ptr->link_req;
- b_ptr->link_req = NULL;
- spin_unlock_bh(&b_ptr->lock);
-
- if (temp_req)
- tipc_disc_delete(temp_req);
+ tipc_link_delete_list(b_ptr->identity, shutting_down);
+ if (b_ptr->link_req)
+ tipc_disc_delete(b_ptr->link_req);
memset(b_ptr, 0, sizeof(struct tipc_bearer));
}
@@ -406,7 +382,7 @@ int tipc_disable_bearer(const char *name)
pr_warn("Attempt to disable unknown bearer <%s>\n", name);
res = -EINVAL;
} else {
- bearer_disable(b_ptr);
+ bearer_disable(b_ptr, false);
res = 0;
}
write_unlock_bh(&tipc_net_lock);
@@ -631,6 +607,6 @@ void tipc_bearer_stop(void)
for (i = 0; i < MAX_BEARERS; i++) {
if (tipc_bearers[i].active)
- bearer_disable(&tipc_bearers[i]);
+ bearer_disable(&tipc_bearers[i], true);
}
}
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index 4f5db9a..425dd81 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -107,10 +107,8 @@ struct tipc_media {
/**
* struct tipc_bearer - Generic TIPC bearer structure
- * @dev: ptr to associated network device
- * @usr_handle: pointer to additional media-specific information about bearer
+ * @media_ptr: pointer to additional media-specific information about bearer
* @mtu: max packet size bearer can support
- * @lock: spinlock for controlling access to bearer
* @addr: media-specific address associated with bearer
* @name: bearer name (format = media:interface)
* @media: ptr to media structure associated with bearer
@@ -120,7 +118,6 @@ struct tipc_media {
* @tolerance: default link tolerance for bearer
* @identity: array index of this bearer within TIPC bearer array
* @link_req: ptr to (optional) structure making periodic link setup requests
- * @links: list of non-congested links associated with bearer
* @active: non-zero if bearer structure is represents a bearer
* @net_plane: network plane ('A' through 'H') currently associated with bearer
* @nodes: indicates which nodes in cluster can be reached through bearer
@@ -134,7 +131,6 @@ struct tipc_bearer {
u32 mtu; /* initalized by media */
struct tipc_media_addr addr; /* initalized by media */
char name[TIPC_MAX_BEARER_NAME];
- spinlock_t lock;
struct tipc_media *media;
struct tipc_media_addr bcast_addr;
u32 priority;
@@ -142,7 +138,6 @@ struct tipc_bearer {
u32 tolerance;
u32 identity;
struct tipc_link_req *link_req;
- struct list_head links;
int active;
char net_plane;
struct tipc_node_map nodes;
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 80c2064..e2491b3 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -1,7 +1,7 @@
/*
* net/tipc/core.c: TIPC module code
*
- * Copyright (c) 2003-2006, Ericsson AB
+ * Copyright (c) 2003-2006, 2013, Ericsson AB
* Copyright (c) 2005-2006, 2010-2013, Wind River Systems
* All rights reserved.
*
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index 412ff41..fa94da6 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -110,11 +110,11 @@ static void disc_dupl_alert(struct tipc_bearer *b_ptr, u32 node_addr,
}
/**
- * tipc_disc_recv_msg - handle incoming link setup message (request or response)
+ * tipc_disc_rcv - handle incoming link setup message (request or response)
* @buf: buffer containing message
* @b_ptr: bearer that message arrived on
*/
-void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr)
+void tipc_disc_rcv(struct sk_buff *buf, struct tipc_bearer *b_ptr)
{
struct tipc_node *n_ptr;
struct tipc_link *link;
diff --git a/net/tipc/discover.h b/net/tipc/discover.h
index 75b67c4..b4fc962 100644
--- a/net/tipc/discover.h
+++ b/net/tipc/discover.h
@@ -44,6 +44,6 @@ int tipc_disc_create(struct tipc_bearer *b_ptr, struct tipc_media_addr *dest,
void tipc_disc_delete(struct tipc_link_req *req);
void tipc_disc_add_dest(struct tipc_link_req *req);
void tipc_disc_remove_dest(struct tipc_link_req *req);
-void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr);
+void tipc_disc_rcv(struct sk_buff *buf, struct tipc_bearer *b_ptr);
#endif
diff --git a/net/tipc/link.c b/net/tipc/link.c
index da6018b..d1a764b 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -77,19 +77,19 @@ static const char *link_unk_evt = "Unknown link event ";
static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
struct sk_buff *buf);
-static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf);
-static int tipc_link_tunnel_rcv(struct tipc_link **l_ptr,
+static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf);
+static int tipc_link_tunnel_rcv(struct tipc_node *n_ptr,
struct sk_buff **buf);
static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance);
-static int link_send_sections_long(struct tipc_port *sender,
- struct iovec const *msg_sect,
- unsigned int len, u32 destnode);
+static int tipc_link_iovec_long_xmit(struct tipc_port *sender,
+ struct iovec const *msg_sect,
+ unsigned int len, u32 destnode);
static void link_state_event(struct tipc_link *l_ptr, u32 event);
static void link_reset_statistics(struct tipc_link *l_ptr);
static void link_print(struct tipc_link *l_ptr, const char *str);
-static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf);
-static void tipc_link_send_sync(struct tipc_link *l);
-static void tipc_link_recv_sync(struct tipc_node *n, struct sk_buff *buf);
+static int tipc_link_frag_xmit(struct tipc_link *l_ptr, struct sk_buff *buf);
+static void tipc_link_sync_xmit(struct tipc_link *l);
+static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf);
/*
* Simple link routines
@@ -147,11 +147,6 @@ int tipc_link_is_active(struct tipc_link *l_ptr)
/**
* link_timeout - handle expiration of link timer
* @l_ptr: pointer to link
- *
- * This routine must not grab "tipc_net_lock" to avoid a potential deadlock conflict
- * with tipc_link_delete(). (There is no risk that the node will be deleted by
- * another thread because tipc_link_delete() always cancels the link timer before
- * tipc_node_delete() is called.)
*/
static void link_timeout(struct tipc_link *l_ptr)
{
@@ -213,8 +208,8 @@ static void link_set_timer(struct tipc_link *l_ptr, u32 time)
* Returns pointer to link.
*/
struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
- struct tipc_bearer *b_ptr,
- const struct tipc_media_addr *media_addr)
+ struct tipc_bearer *b_ptr,
+ const struct tipc_media_addr *media_addr)
{
struct tipc_link *l_ptr;
struct tipc_msg *msg;
@@ -279,41 +274,43 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
k_init_timer(&l_ptr->timer, (Handler)link_timeout,
(unsigned long)l_ptr);
- list_add_tail(&l_ptr->link_list, &b_ptr->links);
link_state_event(l_ptr, STARTING_EVT);
return l_ptr;
}
-/**
- * tipc_link_delete - delete a link
- * @l_ptr: pointer to link
- *
- * Note: 'tipc_net_lock' is write_locked, bearer is locked.
- * This routine must not grab the node lock until after link timer cancellation
- * to avoid a potential deadlock situation.
- */
-void tipc_link_delete(struct tipc_link *l_ptr)
-{
- if (!l_ptr) {
- pr_err("Attempt to delete non-existent link\n");
- return;
- }
- k_cancel_timer(&l_ptr->timer);
+void tipc_link_delete_list(unsigned int bearer_id, bool shutting_down)
+{
+ struct tipc_link *l_ptr;
+ struct tipc_node *n_ptr;
- tipc_node_lock(l_ptr->owner);
- tipc_link_reset(l_ptr);
- tipc_node_detach_link(l_ptr->owner, l_ptr);
- tipc_link_purge_queues(l_ptr);
- list_del_init(&l_ptr->link_list);
- tipc_node_unlock(l_ptr->owner);
- k_term_timer(&l_ptr->timer);
- kfree(l_ptr);
+ list_for_each_entry(n_ptr, &tipc_node_list, list) {
+ spin_lock_bh(&n_ptr->lock);
+ l_ptr = n_ptr->links[bearer_id];
+ if (l_ptr) {
+ tipc_link_reset(l_ptr);
+ if (shutting_down || !tipc_node_is_up(n_ptr)) {
+ tipc_node_detach_link(l_ptr->owner, l_ptr);
+ tipc_link_reset_fragments(l_ptr);
+ spin_unlock_bh(&n_ptr->lock);
+
+ /* Nobody else can access this link now: */
+ del_timer_sync(&l_ptr->timer);
+ kfree(l_ptr);
+ } else {
+ /* Detach/delete when failover is finished: */
+ l_ptr->flags |= LINK_STOPPED;
+ spin_unlock_bh(&n_ptr->lock);
+ del_timer_sync(&l_ptr->timer);
+ }
+ continue;
+ }
+ spin_unlock_bh(&n_ptr->lock);
+ }
}
-
/**
* link_schedule_port - schedule port for deferred sending
* @l_ptr: pointer to link
@@ -461,6 +458,19 @@ void tipc_link_reset(struct tipc_link *l_ptr)
link_reset_statistics(l_ptr);
}
+void tipc_link_reset_list(unsigned int bearer_id)
+{
+ struct tipc_link *l_ptr;
+ struct tipc_node *n_ptr;
+
+ list_for_each_entry(n_ptr, &tipc_node_list, list) {
+ spin_lock_bh(&n_ptr->lock);
+ l_ptr = n_ptr->links[bearer_id];
+ if (l_ptr)
+ tipc_link_reset(l_ptr);
+ spin_unlock_bh(&n_ptr->lock);
+ }
+}
static void link_activate(struct tipc_link *l_ptr)
{
@@ -479,7 +489,10 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
struct tipc_link *other;
u32 cont_intv = l_ptr->continuity_interval;
- if (!l_ptr->started && (event != STARTING_EVT))
+ if (l_ptr->flags & LINK_STOPPED)
+ return;
+
+ if (!(l_ptr->flags & LINK_STARTED) && (event != STARTING_EVT))
return; /* Not yet. */
/* Check whether changeover is going on */
@@ -499,12 +512,12 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
if (l_ptr->next_in_no != l_ptr->checkpoint) {
l_ptr->checkpoint = l_ptr->next_in_no;
if (tipc_bclink_acks_missing(l_ptr->owner)) {
- tipc_link_send_proto_msg(l_ptr, STATE_MSG,
- 0, 0, 0, 0, 0);
+ tipc_link_proto_xmit(l_ptr, STATE_MSG,
+ 0, 0, 0, 0, 0);
l_ptr->fsm_msg_cnt++;
} else if (l_ptr->max_pkt < l_ptr->max_pkt_target) {
- tipc_link_send_proto_msg(l_ptr, STATE_MSG,
- 1, 0, 0, 0, 0);
+ tipc_link_proto_xmit(l_ptr, STATE_MSG,
+ 1, 0, 0, 0, 0);
l_ptr->fsm_msg_cnt++;
}
link_set_timer(l_ptr, cont_intv);
@@ -512,7 +525,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
}
l_ptr->state = WORKING_UNKNOWN;
l_ptr->fsm_msg_cnt = 0;
- tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
+ tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
l_ptr->fsm_msg_cnt++;
link_set_timer(l_ptr, cont_intv / 4);
break;
@@ -522,7 +535,8 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
tipc_link_reset(l_ptr);
l_ptr->state = RESET_RESET;
l_ptr->fsm_msg_cnt = 0;
- tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
+ tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
+ 0, 0, 0, 0, 0);
l_ptr->fsm_msg_cnt++;
link_set_timer(l_ptr, cont_intv);
break;
@@ -544,7 +558,8 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
tipc_link_reset(l_ptr);
l_ptr->state = RESET_RESET;
l_ptr->fsm_msg_cnt = 0;
- tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
+ tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
+ 0, 0, 0, 0, 0);
l_ptr->fsm_msg_cnt++;
link_set_timer(l_ptr, cont_intv);
break;
@@ -554,14 +569,14 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
l_ptr->fsm_msg_cnt = 0;
l_ptr->checkpoint = l_ptr->next_in_no;
if (tipc_bclink_acks_missing(l_ptr->owner)) {
- tipc_link_send_proto_msg(l_ptr, STATE_MSG,
- 0, 0, 0, 0, 0);
+ tipc_link_proto_xmit(l_ptr, STATE_MSG,
+ 0, 0, 0, 0, 0);
l_ptr->fsm_msg_cnt++;
}
link_set_timer(l_ptr, cont_intv);
} else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) {
- tipc_link_send_proto_msg(l_ptr, STATE_MSG,
- 1, 0, 0, 0, 0);
+ tipc_link_proto_xmit(l_ptr, STATE_MSG,
+ 1, 0, 0, 0, 0);
l_ptr->fsm_msg_cnt++;
link_set_timer(l_ptr, cont_intv / 4);
} else { /* Link has failed */
@@ -570,8 +585,8 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
tipc_link_reset(l_ptr);
l_ptr->state = RESET_UNKNOWN;
l_ptr->fsm_msg_cnt = 0;
- tipc_link_send_proto_msg(l_ptr, RESET_MSG,
- 0, 0, 0, 0, 0);
+ tipc_link_proto_xmit(l_ptr, RESET_MSG,
+ 0, 0, 0, 0, 0);
l_ptr->fsm_msg_cnt++;
link_set_timer(l_ptr, cont_intv);
}
@@ -591,24 +606,25 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
l_ptr->state = WORKING_WORKING;
l_ptr->fsm_msg_cnt = 0;
link_activate(l_ptr);
- tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
+ tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
l_ptr->fsm_msg_cnt++;
if (l_ptr->owner->working_links == 1)
- tipc_link_send_sync(l_ptr);
+ tipc_link_sync_xmit(l_ptr);
link_set_timer(l_ptr, cont_intv);
break;
case RESET_MSG:
l_ptr->state = RESET_RESET;
l_ptr->fsm_msg_cnt = 0;
- tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 1, 0, 0, 0, 0);
+ tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
+ 1, 0, 0, 0, 0);
l_ptr->fsm_msg_cnt++;
link_set_timer(l_ptr, cont_intv);
break;
case STARTING_EVT:
- l_ptr->started = 1;
+ l_ptr->flags |= LINK_STARTED;
/* fall through */
case TIMEOUT_EVT:
- tipc_link_send_proto_msg(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
+ tipc_link_proto_xmit(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
l_ptr->fsm_msg_cnt++;
link_set_timer(l_ptr, cont_intv);
break;
@@ -626,16 +642,17 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
l_ptr->state = WORKING_WORKING;
l_ptr->fsm_msg_cnt = 0;
link_activate(l_ptr);
- tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
+ tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
l_ptr->fsm_msg_cnt++;
if (l_ptr->owner->working_links == 1)
- tipc_link_send_sync(l_ptr);
+ tipc_link_sync_xmit(l_ptr);
link_set_timer(l_ptr, cont_intv);
break;
case RESET_MSG:
break;
case TIMEOUT_EVT:
- tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
+ tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
+ 0, 0, 0, 0, 0);
l_ptr->fsm_msg_cnt++;
link_set_timer(l_ptr, cont_intv);
break;
@@ -721,11 +738,11 @@ static void link_add_chain_to_outqueue(struct tipc_link *l_ptr,
}
/*
- * tipc_link_send_buf() is the 'full path' for messages, called from
- * inside TIPC when the 'fast path' in tipc_send_buf
+ * tipc_link_xmit() is the 'full path' for messages, called from
+ * inside TIPC when the 'fast path' in tipc_send_xmit
* has failed, and from link_send()
*/
-int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
+int __tipc_link_xmit(struct tipc_link *l_ptr, struct sk_buff *buf)
{
struct tipc_msg *msg = buf_msg(buf);
u32 size = msg_size(msg);
@@ -753,7 +770,7 @@ int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
/* Fragmentation needed ? */
if (size > max_packet)
- return link_send_long_buf(l_ptr, buf);
+ return tipc_link_frag_xmit(l_ptr, buf);
/* Packet can be queued or sent. */
if (likely(!link_congested(l_ptr))) {
@@ -797,11 +814,11 @@ int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
}
/*
- * tipc_link_send(): same as tipc_link_send_buf(), but the link to use has
- * not been selected yet, and the the owner node is not locked
+ * tipc_link_xmit(): same as __tipc_link_xmit(), but the link to use
+ * has not been selected yet, and the the owner node is not locked
* Called by TIPC internal users, e.g. the name distributor
*/
-int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector)
+int tipc_link_xmit(struct sk_buff *buf, u32 dest, u32 selector)
{
struct tipc_link *l_ptr;
struct tipc_node *n_ptr;
@@ -813,7 +830,7 @@ int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector)
tipc_node_lock(n_ptr);
l_ptr = n_ptr->active_links[selector & 1];
if (l_ptr)
- res = tipc_link_send_buf(l_ptr, buf);
+ res = __tipc_link_xmit(l_ptr, buf);
else
kfree_skb(buf);
tipc_node_unlock(n_ptr);
@@ -825,14 +842,14 @@ int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector)
}
/*
- * tipc_link_send_sync - synchronize broadcast link endpoints.
+ * tipc_link_sync_xmit - synchronize broadcast link endpoints.
*
* Give a newly added peer node the sequence number where it should
* start receiving and acking broadcast packets.
*
* Called with node locked
*/
-static void tipc_link_send_sync(struct tipc_link *l)
+static void tipc_link_sync_xmit(struct tipc_link *l)
{
struct sk_buff *buf;
struct tipc_msg *msg;
@@ -849,14 +866,14 @@ static void tipc_link_send_sync(struct tipc_link *l)
}
/*
- * tipc_link_recv_sync - synchronize broadcast link endpoints.
+ * tipc_link_sync_rcv - synchronize broadcast link endpoints.
* Receive the sequence number where we should start receiving and
* acking broadcast packets from a newly added peer node, and open
* up for reception of such packets.
*
* Called with node locked
*/
-static void tipc_link_recv_sync(struct tipc_node *n, struct sk_buff *buf)
+static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf)
{
struct tipc_msg *msg = buf_msg(buf);
@@ -866,7 +883,7 @@ static void tipc_link_recv_sync(struct tipc_node *n, struct sk_buff *buf)
}
/*
- * tipc_link_send_names - send name table entries to new neighbor
+ * tipc_link_names_xmit - send name table entries to new neighbor
*
* Send routine for bulk delivery of name table messages when contact
* with a new neighbor occurs. No link congestion checking is performed
@@ -874,7 +891,7 @@ static void tipc_link_recv_sync(struct tipc_node *n, struct sk_buff *buf)
* small enough not to require fragmentation.
* Called without any locks held.
*/
-void tipc_link_send_names(struct list_head *message_list, u32 dest)
+void tipc_link_names_xmit(struct list_head *message_list, u32 dest)
{
struct tipc_node *n_ptr;
struct tipc_link *l_ptr;
@@ -909,13 +926,13 @@ void tipc_link_send_names(struct list_head *message_list, u32 dest)
}
/*
- * link_send_buf_fast: Entry for data messages where the
+ * tipc_link_xmit_fast: Entry for data messages where the
* destination link is known and the header is complete,
* inclusive total message length. Very time critical.
* Link is locked. Returns user data length.
*/
-static int link_send_buf_fast(struct tipc_link *l_ptr, struct sk_buff *buf,
- u32 *used_max_pkt)
+static int tipc_link_xmit_fast(struct tipc_link *l_ptr, struct sk_buff *buf,
+ u32 *used_max_pkt)
{
struct tipc_msg *msg = buf_msg(buf);
int res = msg_data_sz(msg);
@@ -931,18 +948,18 @@ static int link_send_buf_fast(struct tipc_link *l_ptr, struct sk_buff *buf,
else
*used_max_pkt = l_ptr->max_pkt;
}
- return tipc_link_send_buf(l_ptr, buf); /* All other cases */
+ return __tipc_link_xmit(l_ptr, buf); /* All other cases */
}
/*
- * tipc_link_send_sections_fast: Entry for messages where the
+ * tipc_link_iovec_xmit_fast: Entry for messages where the
* destination processor is known and the header is complete,
* except for total message length.
* Returns user data length or errno.
*/
-int tipc_link_send_sections_fast(struct tipc_port *sender,
- struct iovec const *msg_sect,
- unsigned int len, u32 destaddr)
+int tipc_link_iovec_xmit_fast(struct tipc_port *sender,
+ struct iovec const *msg_sect,
+ unsigned int len, u32 destaddr)
{
struct tipc_msg *hdr = &sender->phdr;
struct tipc_link *l_ptr;
@@ -968,8 +985,8 @@ again:
l_ptr = node->active_links[selector];
if (likely(l_ptr)) {
if (likely(buf)) {
- res = link_send_buf_fast(l_ptr, buf,
- &sender->max_pkt);
+ res = tipc_link_xmit_fast(l_ptr, buf,
+ &sender->max_pkt);
exit:
tipc_node_unlock(node);
read_unlock_bh(&tipc_net_lock);
@@ -995,24 +1012,21 @@ exit:
if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt)
goto again;
- return link_send_sections_long(sender, msg_sect, len,
- destaddr);
+ return tipc_link_iovec_long_xmit(sender, msg_sect,
+ len, destaddr);
}
tipc_node_unlock(node);
}
read_unlock_bh(&tipc_net_lock);
/* Couldn't find a link to the destination node */
- if (buf)
- return tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
- if (res >= 0)
- return tipc_port_reject_sections(sender, hdr, msg_sect,
- len, TIPC_ERR_NO_NODE);
- return res;
+ kfree_skb(buf);
+ tipc_port_iovec_reject(sender, hdr, msg_sect, len, TIPC_ERR_NO_NODE);
+ return -ENETUNREACH;
}
/*
- * link_send_sections_long(): Entry for long messages where the
+ * tipc_link_iovec_long_xmit(): Entry for long messages where the
* destination node is known and the header is complete,
* inclusive total message length.
* Link and bearer congestion status have been checked to be ok,
@@ -1025,9 +1039,9 @@ exit:
*
* Returns user data length or errno.
*/
-static int link_send_sections_long(struct tipc_port *sender,
- struct iovec const *msg_sect,
- unsigned int len, u32 destaddr)
+static int tipc_link_iovec_long_xmit(struct tipc_port *sender,
+ struct iovec const *msg_sect,
+ unsigned int len, u32 destaddr)
{
struct tipc_link *l_ptr;
struct tipc_node *node;
@@ -1146,8 +1160,9 @@ error:
} else {
reject:
kfree_skb_list(buf_chain);
- return tipc_port_reject_sections(sender, hdr, msg_sect,
- len, TIPC_ERR_NO_NODE);
+ tipc_port_iovec_reject(sender, hdr, msg_sect, len,
+ TIPC_ERR_NO_NODE);
+ return -ENETUNREACH;
}
/* Append chain of fragments to send queue & send them */
@@ -1441,7 +1456,6 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
u32 seq_no;
u32 ackd;
u32 released = 0;
- int type;
head = head->next;
buf->next = NULL;
@@ -1463,9 +1477,9 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
if (unlikely(msg_non_seq(msg))) {
if (msg_user(msg) == LINK_CONFIG)
- tipc_disc_recv_msg(buf, b_ptr);
+ tipc_disc_rcv(buf, b_ptr);
else
- tipc_bclink_recv_pkt(buf);
+ tipc_bclink_rcv(buf);
continue;
}
@@ -1489,7 +1503,7 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
if ((n_ptr->block_setup & WAIT_PEER_DOWN) &&
msg_user(msg) == LINK_PROTOCOL &&
(msg_type(msg) == RESET_MSG ||
- msg_type(msg) == ACTIVATE_MSG) &&
+ msg_type(msg) == ACTIVATE_MSG) &&
!msg_redundant_link(msg))
n_ptr->block_setup &= ~WAIT_PEER_DOWN;
@@ -1508,7 +1522,6 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
while ((crs != l_ptr->next_out) &&
less_eq(buf_seqno(crs), ackd)) {
struct sk_buff *next = crs->next;
-
kfree_skb(crs);
crs = next;
released++;
@@ -1521,18 +1534,19 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
/* Try sending any messages link endpoint has pending */
if (unlikely(l_ptr->next_out))
tipc_link_push_queue(l_ptr);
+
if (unlikely(!list_empty(&l_ptr->waiting_ports)))
tipc_link_wakeup_ports(l_ptr, 0);
+
if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) {
l_ptr->stats.sent_acks++;
- tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
+ tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
}
- /* Now (finally!) process the incoming message */
-protocol_check:
+ /* Process the incoming packet */
if (unlikely(!link_working_working(l_ptr))) {
if (msg_user(msg) == LINK_PROTOCOL) {
- link_recv_proto_msg(l_ptr, buf);
+ tipc_link_proto_rcv(l_ptr, buf);
head = link_insert_deferred_queue(l_ptr, head);
tipc_node_unlock(n_ptr);
continue;
@@ -1561,67 +1575,65 @@ protocol_check:
l_ptr->next_in_no++;
if (unlikely(l_ptr->oldest_deferred_in))
head = link_insert_deferred_queue(l_ptr, head);
-deliver:
- if (likely(msg_isdata(msg))) {
- tipc_node_unlock(n_ptr);
- tipc_port_recv_msg(buf);
- continue;
+
+ /* Deliver packet/message to correct user: */
+ if (unlikely(msg_user(msg) == CHANGEOVER_PROTOCOL)) {
+ if (!tipc_link_tunnel_rcv(n_ptr, &buf)) {
+ tipc_node_unlock(n_ptr);
+ continue;
+ }
+ msg = buf_msg(buf);
+ } else if (msg_user(msg) == MSG_FRAGMENTER) {
+ int rc;
+
+ l_ptr->stats.recv_fragments++;
+ rc = tipc_link_frag_rcv(&l_ptr->reasm_head,
+ &l_ptr->reasm_tail,
+ &buf);
+ if (rc == LINK_REASM_COMPLETE) {
+ l_ptr->stats.recv_fragmented++;
+ msg = buf_msg(buf);
+ } else {
+ if (rc == LINK_REASM_ERROR)
+ tipc_link_reset(l_ptr);
+ tipc_node_unlock(n_ptr);
+ continue;
+ }
}
+
switch (msg_user(msg)) {
- int ret;
+ case TIPC_LOW_IMPORTANCE:
+ case TIPC_MEDIUM_IMPORTANCE:
+ case TIPC_HIGH_IMPORTANCE:
+ case TIPC_CRITICAL_IMPORTANCE:
+ tipc_node_unlock(n_ptr);
+ tipc_port_rcv(buf);
+ continue;
case MSG_BUNDLER:
l_ptr->stats.recv_bundles++;
l_ptr->stats.recv_bundled += msg_msgcnt(msg);
tipc_node_unlock(n_ptr);
- tipc_link_recv_bundle(buf);
+ tipc_link_bundle_rcv(buf);
continue;
case NAME_DISTRIBUTOR:
n_ptr->bclink.recv_permitted = true;
tipc_node_unlock(n_ptr);
- tipc_named_recv(buf);
- continue;
- case BCAST_PROTOCOL:
- tipc_link_recv_sync(n_ptr, buf);
- tipc_node_unlock(n_ptr);
+ tipc_named_rcv(buf);
continue;
case CONN_MANAGER:
tipc_node_unlock(n_ptr);
- tipc_port_recv_proto_msg(buf);
+ tipc_port_proto_rcv(buf);
continue;
- case MSG_FRAGMENTER:
- l_ptr->stats.recv_fragments++;
- ret = tipc_link_recv_fragment(&l_ptr->reasm_head,
- &l_ptr->reasm_tail,
- &buf);
- if (ret == LINK_REASM_COMPLETE) {
- l_ptr->stats.recv_fragmented++;
- msg = buf_msg(buf);
- goto deliver;
- }
- if (ret == LINK_REASM_ERROR)
- tipc_link_reset(l_ptr);
- tipc_node_unlock(n_ptr);
- continue;
- case CHANGEOVER_PROTOCOL:
- type = msg_type(msg);
- if (tipc_link_tunnel_rcv(&l_ptr, &buf)) {
- msg = buf_msg(buf);
- seq_no = msg_seqno(msg);
- if (type == ORIGINAL_MSG)
- goto deliver;
- goto protocol_check;
- }
+ case BCAST_PROTOCOL:
+ tipc_link_sync_rcv(n_ptr, buf);
break;
default:
kfree_skb(buf);
- buf = NULL;
break;
}
tipc_node_unlock(n_ptr);
- tipc_net_route_msg(buf);
continue;
unlock_discard:
-
tipc_node_unlock(n_ptr);
discard:
kfree_skb(buf);
@@ -1688,7 +1700,7 @@ static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
u32 seq_no = buf_seqno(buf);
if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) {
- link_recv_proto_msg(l_ptr, buf);
+ tipc_link_proto_rcv(l_ptr, buf);
return;
}
@@ -1711,7 +1723,7 @@ static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
l_ptr->stats.deferred_recv++;
TIPC_SKB_CB(buf)->deferred = true;
if ((l_ptr->deferred_inqueue_sz % 16) == 1)
- tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
+ tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
} else
l_ptr->stats.duplicates++;
}
@@ -1719,9 +1731,8 @@ static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
/*
* Send protocol message to the other endpoint.
*/
-void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ,
- int probe_msg, u32 gap, u32 tolerance,
- u32 priority, u32 ack_mtu)
+void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
+ u32 gap, u32 tolerance, u32 priority, u32 ack_mtu)
{
struct sk_buff *buf = NULL;
struct tipc_msg *msg = l_ptr->pmsg;
@@ -1820,7 +1831,7 @@ void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ,
* Note that network plane id propagates through the network, and may
* change at any time. The node with lowest address rules
*/
-static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf)
+static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf)
{
u32 rec_gap = 0;
u32 max_pkt_info;
@@ -1939,8 +1950,8 @@ static void link_recv_proto_msg(struct tipc_link *l_ptr, struct sk_buff *buf)
msg_last_bcast(msg));
if (rec_gap || (msg_probe(msg))) {
- tipc_link_send_proto_msg(l_ptr, STATE_MSG,
- 0, rec_gap, 0, 0, max_pkt_ack);
+ tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, rec_gap, 0,
+ 0, max_pkt_ack);
}
if (msg_seq_gap(msg)) {
l_ptr->stats.recv_nacks++;
@@ -1979,7 +1990,7 @@ static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr,
}
skb_copy_to_linear_data(buf, tunnel_hdr, INT_H_SIZE);
skb_copy_to_linear_data_offset(buf, INT_H_SIZE, msg, length);
- tipc_link_send_buf(tunnel, buf);
+ __tipc_link_xmit(tunnel, buf);
}
@@ -2012,7 +2023,7 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
if (buf) {
skb_copy_to_linear_data(buf, &tunnel_hdr, INT_H_SIZE);
msg_set_size(&tunnel_hdr, INT_H_SIZE);
- tipc_link_send_buf(tunnel, buf);
+ __tipc_link_xmit(tunnel, buf);
} else {
pr_warn("%sunable to send changeover msg\n",
link_co_err);
@@ -2046,7 +2057,7 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
}
}
-/* tipc_link_dup_send_queue(): A second link has become active. Tunnel a
+/* tipc_link_dup_queue_xmit(): A second link has become active. Tunnel a
* duplicate of the first link's send queue via the new link. This way, we
* are guaranteed that currently queued packets from a socket are delivered
* before future traffic from the same socket, even if this is using the
@@ -2055,7 +2066,7 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
* and sequence order is preserved per sender/receiver socket pair.
* Owner node is locked.
*/
-void tipc_link_dup_send_queue(struct tipc_link *l_ptr,
+void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr,
struct tipc_link *tunnel)
{
struct sk_buff *iter;
@@ -2085,7 +2096,7 @@ void tipc_link_dup_send_queue(struct tipc_link *l_ptr,
skb_copy_to_linear_data(outbuf, &tunnel_hdr, INT_H_SIZE);
skb_copy_to_linear_data_offset(outbuf, INT_H_SIZE, iter->data,
length);
- tipc_link_send_buf(tunnel, outbuf);
+ __tipc_link_xmit(tunnel, outbuf);
if (!tipc_link_is_up(l_ptr))
return;
iter = iter->next;
@@ -2112,89 +2123,114 @@ static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos)
return eb;
}
-/* tipc_link_tunnel_rcv(): Receive a tunneled packet, sent
- * via other link as result of a failover (ORIGINAL_MSG) or
- * a new active link (DUPLICATE_MSG). Failover packets are
- * returned to the active link for delivery upwards.
+
+
+/* tipc_link_dup_rcv(): Receive a tunnelled DUPLICATE_MSG packet.
+ * Owner node is locked.
+ */
+static void tipc_link_dup_rcv(struct tipc_link *l_ptr,
+ struct sk_buff *t_buf)
+{
+ struct sk_buff *buf;
+
+ if (!tipc_link_is_up(l_ptr))
+ return;
+
+ buf = buf_extract(t_buf, INT_H_SIZE);
+ if (buf == NULL) {
+ pr_warn("%sfailed to extract inner dup pkt\n", link_co_err);
+ return;
+ }
+
+ /* Add buffer to deferred queue, if applicable: */
+ link_handle_out_of_seq_msg(l_ptr, buf);
+}
+
+/* tipc_link_failover_rcv(): Receive a tunnelled ORIGINAL_MSG packet
* Owner node is locked.
*/
-static int tipc_link_tunnel_rcv(struct tipc_link **l_ptr,
- struct sk_buff **buf)
+static struct sk_buff *tipc_link_failover_rcv(struct tipc_link *l_ptr,
+ struct sk_buff *t_buf)
{
- struct sk_buff *tunnel_buf = *buf;
- struct tipc_link *dest_link;
+ struct tipc_msg *t_msg = buf_msg(t_buf);
+ struct sk_buff *buf = NULL;
struct tipc_msg *msg;
- struct tipc_msg *tunnel_msg = buf_msg(tunnel_buf);
- u32 msg_typ = msg_type(tunnel_msg);
- u32 msg_count = msg_msgcnt(tunnel_msg);
- u32 bearer_id = msg_bearer_id(tunnel_msg);
- if (bearer_id >= MAX_BEARERS)
- goto exit;
- dest_link = (*l_ptr)->owner->links[bearer_id];
- if (!dest_link)
- goto exit;
- if (dest_link == *l_ptr) {
- pr_err("Unexpected changeover message on link <%s>\n",
- (*l_ptr)->name);
- goto exit;
- }
- *l_ptr = dest_link;
- msg = msg_get_wrapped(tunnel_msg);
+ if (tipc_link_is_up(l_ptr))
+ tipc_link_reset(l_ptr);
- if (msg_typ == DUPLICATE_MSG) {
- if (less(msg_seqno(msg), mod(dest_link->next_in_no)))
- goto exit;
- *buf = buf_extract(tunnel_buf, INT_H_SIZE);
- if (*buf == NULL) {
- pr_warn("%sduplicate msg dropped\n", link_co_err);
+ /* First failover packet? */
+ if (l_ptr->exp_msg_count == START_CHANGEOVER)
+ l_ptr->exp_msg_count = msg_msgcnt(t_msg);
+
+ /* Should there be an inner packet? */
+ if (l_ptr->exp_msg_count) {
+ l_ptr->exp_msg_count--;
+ buf = buf_extract(t_buf, INT_H_SIZE);
+ if (buf == NULL) {
+ pr_warn("%sno inner failover pkt\n", link_co_err);
goto exit;
}
- kfree_skb(tunnel_buf);
- return 1;
- }
+ msg = buf_msg(buf);
- /* First original message ?: */
- if (tipc_link_is_up(dest_link)) {
- pr_info("%s<%s>, changeover initiated by peer\n", link_rst_msg,
- dest_link->name);
- tipc_link_reset(dest_link);
- dest_link->exp_msg_count = msg_count;
- if (!msg_count)
- goto exit;
- } else if (dest_link->exp_msg_count == START_CHANGEOVER) {
- dest_link->exp_msg_count = msg_count;
- if (!msg_count)
+ if (less(msg_seqno(msg), l_ptr->reset_checkpoint)) {
+ kfree_skb(buf);
+ buf = NULL;
goto exit;
+ }
+ if (msg_user(msg) == MSG_FRAGMENTER) {
+ l_ptr->stats.recv_fragments++;
+ tipc_link_frag_rcv(&l_ptr->reasm_head,
+ &l_ptr->reasm_tail,
+ &buf);
+ }
}
+exit:
+ if ((l_ptr->exp_msg_count == 0) && (l_ptr->flags & LINK_STOPPED)) {
+ tipc_node_detach_link(l_ptr->owner, l_ptr);
+ kfree(l_ptr);
+ }
+ return buf;
+}
- /* Receive original message */
- if (dest_link->exp_msg_count == 0) {
- pr_warn("%sgot too many tunnelled messages\n", link_co_err);
+/* tipc_link_tunnel_rcv(): Receive a tunnelled packet, sent
+ * via other link as result of a failover (ORIGINAL_MSG) or
+ * a new active link (DUPLICATE_MSG). Failover packets are
+ * returned to the active link for delivery upwards.
+ * Owner node is locked.
+ */
+static int tipc_link_tunnel_rcv(struct tipc_node *n_ptr,
+ struct sk_buff **buf)
+{
+ struct sk_buff *t_buf = *buf;
+ struct tipc_link *l_ptr;
+ struct tipc_msg *t_msg = buf_msg(t_buf);
+ u32 bearer_id = msg_bearer_id(t_msg);
+
+ *buf = NULL;
+
+ if (bearer_id >= MAX_BEARERS)
goto exit;
- }
- dest_link->exp_msg_count--;
- if (less(msg_seqno(msg), dest_link->reset_checkpoint)) {
+
+ l_ptr = n_ptr->links[bearer_id];
+ if (!l_ptr)
goto exit;
- } else {
- *buf = buf_extract(tunnel_buf, INT_H_SIZE);
- if (*buf != NULL) {
- kfree_skb(tunnel_buf);
- return 1;
- } else {
- pr_warn("%soriginal msg dropped\n", link_co_err);
- }
- }
+
+ if (msg_type(t_msg) == DUPLICATE_MSG)
+ tipc_link_dup_rcv(l_ptr, t_buf);
+ else if (msg_type(t_msg) == ORIGINAL_MSG)
+ *buf = tipc_link_failover_rcv(l_ptr, t_buf);
+ else
+ pr_warn("%sunknown tunnel pkt received\n", link_co_err);
exit:
- *buf = NULL;
- kfree_skb(tunnel_buf);
- return 0;
+ kfree_skb(t_buf);
+ return *buf != NULL;
}
/*
* Bundler functionality:
*/
-void tipc_link_recv_bundle(struct sk_buff *buf)
+void tipc_link_bundle_rcv(struct sk_buff *buf)
{
u32 msgcount = msg_msgcnt(buf_msg(buf));
u32 pos = INT_H_SIZE;
@@ -2217,11 +2253,11 @@ void tipc_link_recv_bundle(struct sk_buff *buf)
*/
/*
- * link_send_long_buf: Entry for buffers needing fragmentation.
+ * tipc_link_frag_xmit: Entry for buffers needing fragmentation.
* The buffer is complete, inclusive total message length.
* Returns user data length.
*/
-static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
+static int tipc_link_frag_xmit(struct tipc_link *l_ptr, struct sk_buff *buf)
{
struct sk_buff *buf_chain = NULL;
struct sk_buff *buf_chain_tail = (struct sk_buff *)&buf_chain;
@@ -2284,12 +2320,11 @@ static int link_send_long_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
return dsz;
}
-/*
- * tipc_link_recv_fragment(): Called with node lock on. Returns
+/* tipc_link_frag_rcv(): Called with node lock on. Returns
* the reassembled buffer if message is complete.
*/
-int tipc_link_recv_fragment(struct sk_buff **head, struct sk_buff **tail,
- struct sk_buff **fbuf)
+int tipc_link_frag_rcv(struct sk_buff **head, struct sk_buff **tail,
+ struct sk_buff **fbuf)
{
struct sk_buff *frag = *fbuf;
struct tipc_msg *msg = buf_msg(frag);
@@ -2303,6 +2338,7 @@ int tipc_link_recv_fragment(struct sk_buff **head, struct sk_buff **tail,
goto out_free;
*head = frag;
skb_frag_list_init(*head);
+ *fbuf = NULL;
return 0;
} else if (*head &&
skb_try_coalesce(*head, frag, &headstolen, &delta)) {
@@ -2322,10 +2358,12 @@ int tipc_link_recv_fragment(struct sk_buff **head, struct sk_buff **tail,
*tail = *head = NULL;
return LINK_REASM_COMPLETE;
}
+ *fbuf = NULL;
return 0;
out_free:
pr_warn_ratelimited("Link unable to reassemble fragmented message\n");
kfree_skb(*fbuf);
+ *fbuf = NULL;
return LINK_REASM_ERROR;
}
@@ -2359,35 +2397,40 @@ void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window)
l_ptr->queue_limit[MSG_FRAGMENTER] = 4000;
}
-/**
- * link_find_link - locate link by name
- * @name: ptr to link name string
- * @node: ptr to area to be filled with ptr to associated node
- *
+/* tipc_link_find_owner - locate owner node of link by link's name
+ * @name: pointer to link name string
+ * @bearer_id: pointer to index in 'node->links' array where the link was found.
* Caller must hold 'tipc_net_lock' to ensure node and bearer are not deleted;
* this also prevents link deletion.
*
- * Returns pointer to link (or 0 if invalid link name).
+ * Returns pointer to node owning the link, or 0 if no matching link is found.
*/
-static struct tipc_link *link_find_link(const char *name,
- struct tipc_node **node)
+static struct tipc_node *tipc_link_find_owner(const char *link_name,
+ unsigned int *bearer_id)
{
struct tipc_link *l_ptr;
struct tipc_node *n_ptr;
+ struct tipc_node *tmp_n_ptr;
+ struct tipc_node *found_node = 0;
+
int i;
- list_for_each_entry(n_ptr, &tipc_node_list, list) {
+ *bearer_id = 0;
+ list_for_each_entry_safe(n_ptr, tmp_n_ptr, &tipc_node_list, list) {
+ tipc_node_lock(n_ptr);
for (i = 0; i < MAX_BEARERS; i++) {
l_ptr = n_ptr->links[i];
- if (l_ptr && !strcmp(l_ptr->name, name))
- goto found;
+ if (l_ptr && !strcmp(l_ptr->name, link_name)) {
+ *bearer_id = i;
+ found_node = n_ptr;
+ break;
+ }
}
+ tipc_node_unlock(n_ptr);
+ if (found_node)
+ break;
}
- l_ptr = NULL;
- n_ptr = NULL;
-found:
- *node = n_ptr;
- return l_ptr;
+ return found_node;
}
/**
@@ -2429,32 +2472,33 @@ static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd)
struct tipc_link *l_ptr;
struct tipc_bearer *b_ptr;
struct tipc_media *m_ptr;
+ int bearer_id;
int res = 0;
- l_ptr = link_find_link(name, &node);
- if (l_ptr) {
- /*
- * acquire node lock for tipc_link_send_proto_msg().
- * see "TIPC locking policy" in net.c.
- */
+ node = tipc_link_find_owner(name, &bearer_id);
+ if (node) {
tipc_node_lock(node);
- switch (cmd) {
- case TIPC_CMD_SET_LINK_TOL:
- link_set_supervision_props(l_ptr, new_value);
- tipc_link_send_proto_msg(l_ptr,
- STATE_MSG, 0, 0, new_value, 0, 0);
- break;
- case TIPC_CMD_SET_LINK_PRI:
- l_ptr->priority = new_value;
- tipc_link_send_proto_msg(l_ptr,
- STATE_MSG, 0, 0, 0, new_value, 0);
- break;
- case TIPC_CMD_SET_LINK_WINDOW:
- tipc_link_set_queue_limits(l_ptr, new_value);
- break;
- default:
- res = -EINVAL;
- break;
+ l_ptr = node->links[bearer_id];
+
+ if (l_ptr) {
+ switch (cmd) {
+ case TIPC_CMD_SET_LINK_TOL:
+ link_set_supervision_props(l_ptr, new_value);
+ tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0,
+ new_value, 0, 0);
+ break;
+ case TIPC_CMD_SET_LINK_PRI:
+ l_ptr->priority = new_value;
+ tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0,
+ 0, new_value, 0);
+ break;
+ case TIPC_CMD_SET_LINK_WINDOW:
+ tipc_link_set_queue_limits(l_ptr, new_value);
+ break;
+ default:
+ res = -EINVAL;
+ break;
+ }
}
tipc_node_unlock(node);
return res;
@@ -2549,6 +2593,7 @@ struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_
char *link_name;
struct tipc_link *l_ptr;
struct tipc_node *node;
+ unsigned int bearer_id;
if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
@@ -2559,15 +2604,19 @@ struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_
return tipc_cfg_reply_error_string("link not found");
return tipc_cfg_reply_none();
}
-
read_lock_bh(&tipc_net_lock);
- l_ptr = link_find_link(link_name, &node);
- if (!l_ptr) {
+ node = tipc_link_find_owner(link_name, &bearer_id);
+ if (!node) {
read_unlock_bh(&tipc_net_lock);
return tipc_cfg_reply_error_string("link not found");
}
-
tipc_node_lock(node);
+ l_ptr = node->links[bearer_id];
+ if (!l_ptr) {
+ tipc_node_unlock(node);
+ read_unlock_bh(&tipc_net_lock);
+ return tipc_cfg_reply_error_string("link not found");
+ }
link_reset_statistics(l_ptr);
tipc_node_unlock(node);
read_unlock_bh(&tipc_net_lock);
@@ -2597,18 +2646,27 @@ static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
struct tipc_node *node;
char *status;
u32 profile_total = 0;
+ unsigned int bearer_id;
int ret;
if (!strcmp(name, tipc_bclink_name))
return tipc_bclink_stats(buf, buf_size);
read_lock_bh(&tipc_net_lock);
- l = link_find_link(name, &node);
- if (!l) {
+ node = tipc_link_find_owner(name, &bearer_id);
+ if (!node) {
read_unlock_bh(&tipc_net_lock);
return 0;
}
tipc_node_lock(node);
+
+ l = node->links[bearer_id];
+ if (!l) {
+ tipc_node_unlock(node);
+ read_unlock_bh(&tipc_net_lock);
+ return 0;
+ }
+
s = &l->stats;
if (tipc_link_is_active(l))
diff --git a/net/tipc/link.h b/net/tipc/link.h
index 3b6aa65..8c0b49b 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -1,7 +1,7 @@
/*
* net/tipc/link.h: Include file for TIPC link code
*
- * Copyright (c) 1995-2006, Ericsson AB
+ * Copyright (c) 1995-2006, 2013, Ericsson AB
* Copyright (c) 2004-2005, 2010-2011, Wind River Systems
* All rights reserved.
*
@@ -40,27 +40,28 @@
#include "msg.h"
#include "node.h"
-/*
- * Link reassembly status codes
+/* Link reassembly status codes
*/
#define LINK_REASM_ERROR -1
#define LINK_REASM_COMPLETE 1
-/*
- * Out-of-range value for link sequence numbers
+/* Out-of-range value for link sequence numbers
*/
#define INVALID_LINK_SEQ 0x10000
-/*
- * Link states
+/* Link working states
*/
#define WORKING_WORKING 560810u
#define WORKING_UNKNOWN 560811u
#define RESET_UNKNOWN 560812u
#define RESET_RESET 560813u
-/*
- * Starting value for maximum packet size negotiation on unicast links
+/* Link endpoint execution states
+ */
+#define LINK_STARTED 0x0001
+#define LINK_STOPPED 0x0002
+
+/* Starting value for maximum packet size negotiation on unicast links
* (unless bearer MTU is less)
*/
#define MAX_PKT_DEFAULT 1500
@@ -102,8 +103,7 @@ struct tipc_stats {
* @media_addr: media address to use when sending messages over link
* @timer: link timer
* @owner: pointer to peer node
- * @link_list: adjacent links in bearer's list of links
- * @started: indicates if link has been started
+ * @flags: execution state flags for link endpoint instance
* @checkpoint: reference point for triggering link continuity checking
* @peer_session: link session # being used by peer end of link
* @peer_bearer_id: bearer id used by link's peer endpoint
@@ -149,10 +149,9 @@ struct tipc_link {
struct tipc_media_addr media_addr;
struct timer_list timer;
struct tipc_node *owner;
- struct list_head link_list;
/* Management and link supervision data */
- int started;
+ unsigned int flags;
u32 checkpoint;
u32 peer_session;
u32 peer_bearer_id;
@@ -215,10 +214,9 @@ struct tipc_port;
struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
struct tipc_bearer *b_ptr,
const struct tipc_media_addr *media_addr);
-void tipc_link_delete(struct tipc_link *l_ptr);
+void tipc_link_delete_list(unsigned int bearer_id, bool shutting_down);
void tipc_link_failover_send_queue(struct tipc_link *l_ptr);
-void tipc_link_dup_send_queue(struct tipc_link *l_ptr,
- struct tipc_link *dest);
+void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr, struct tipc_link *dest);
void tipc_link_reset_fragments(struct tipc_link *l_ptr);
int tipc_link_is_up(struct tipc_link *l_ptr);
int tipc_link_is_active(struct tipc_link *l_ptr);
@@ -231,23 +229,24 @@ struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area,
struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area,
int req_tlv_space);
void tipc_link_reset(struct tipc_link *l_ptr);
-int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector);
-void tipc_link_send_names(struct list_head *message_list, u32 dest);
+void tipc_link_reset_list(unsigned int bearer_id);
+int tipc_link_xmit(struct sk_buff *buf, u32 dest, u32 selector);
+void tipc_link_names_xmit(struct list_head *message_list, u32 dest);
+int __tipc_link_xmit(struct tipc_link *l_ptr, struct sk_buff *buf);
int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf);
u32 tipc_link_get_max_pkt(u32 dest, u32 selector);
-int tipc_link_send_sections_fast(struct tipc_port *sender,
- struct iovec const *msg_sect,
- unsigned int len, u32 destnode);
-void tipc_link_recv_bundle(struct sk_buff *buf);
-int tipc_link_recv_fragment(struct sk_buff **reasm_head,
- struct sk_buff **reasm_tail,
- struct sk_buff **fbuf);
-void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ, int prob,
- u32 gap, u32 tolerance, u32 priority,
- u32 acked_mtu);
+int tipc_link_iovec_xmit_fast(struct tipc_port *sender,
+ struct iovec const *msg_sect,
+ unsigned int len, u32 destnode);
+void tipc_link_bundle_rcv(struct sk_buff *buf);
+int tipc_link_frag_rcv(struct sk_buff **reasm_head,
+ struct sk_buff **reasm_tail,
+ struct sk_buff **fbuf);
+void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int prob,
+ u32 gap, u32 tolerance, u32 priority, u32 acked_mtu);
void tipc_link_push_queue(struct tipc_link *l_ptr);
u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail,
- struct sk_buff *buf);
+ struct sk_buff *buf);
void tipc_link_wakeup_ports(struct tipc_link *l_ptr, int all);
void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window);
void tipc_link_retransmit(struct tipc_link *l_ptr,
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index e0d0805..893c49a 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -138,7 +138,7 @@ static void named_cluster_distribute(struct sk_buff *buf)
if (!buf_copy)
break;
msg_set_destnode(buf_msg(buf_copy), n_ptr->addr);
- tipc_link_send(buf_copy, n_ptr->addr, n_ptr->addr);
+ tipc_link_xmit(buf_copy, n_ptr->addr, n_ptr->addr);
}
}
@@ -262,7 +262,7 @@ void tipc_named_node_up(unsigned long nodearg)
named_distribute(&message_list, node, &publ_zone, max_item_buf);
read_unlock_bh(&tipc_nametbl_lock);
- tipc_link_send_names(&message_list, node);
+ tipc_link_names_xmit(&message_list, node);
}
/**
@@ -293,9 +293,9 @@ static void named_purge_publ(struct publication *publ)
}
/**
- * tipc_named_recv - process name table update message sent by another node
+ * tipc_named_rcv - process name table update message sent by another node
*/
-void tipc_named_recv(struct sk_buff *buf)
+void tipc_named_rcv(struct sk_buff *buf)
{
struct publication *publ;
struct tipc_msg *msg = buf_msg(buf);
diff --git a/net/tipc/name_distr.h b/net/tipc/name_distr.h
index 1e41bdd..9b312cc 100644
--- a/net/tipc/name_distr.h
+++ b/net/tipc/name_distr.h
@@ -42,7 +42,7 @@
void tipc_named_publish(struct publication *publ);
void tipc_named_withdraw(struct publication *publ);
void tipc_named_node_up(unsigned long node);
-void tipc_named_recv(struct sk_buff *buf);
+void tipc_named_rcv(struct sk_buff *buf);
void tipc_named_reinit(void);
#endif
diff --git a/net/tipc/net.c b/net/tipc/net.c
index 7d305ec..31b606e 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -146,19 +146,19 @@ void tipc_net_route_msg(struct sk_buff *buf)
if (tipc_in_scope(dnode, tipc_own_addr)) {
if (msg_isdata(msg)) {
if (msg_mcast(msg))
- tipc_port_recv_mcast(buf, NULL);
+ tipc_port_mcast_rcv(buf, NULL);
else if (msg_destport(msg))
- tipc_port_recv_msg(buf);
+ tipc_port_rcv(buf);
else
net_route_named_msg(buf);
return;
}
switch (msg_user(msg)) {
case NAME_DISTRIBUTOR:
- tipc_named_recv(buf);
+ tipc_named_rcv(buf);
break;
case CONN_MANAGER:
- tipc_port_recv_proto_msg(buf);
+ tipc_port_proto_rcv(buf);
break;
default:
kfree_skb(buf);
@@ -168,7 +168,7 @@ void tipc_net_route_msg(struct sk_buff *buf)
/* Handle message for another node */
skb_trim(buf, msg_size(msg));
- tipc_link_send(buf, dnode, msg_link_selector(msg));
+ tipc_link_xmit(buf, dnode, msg_link_selector(msg));
}
void tipc_net_start(u32 addr)
diff --git a/net/tipc/node.c b/net/tipc/node.c
index efe4d41..0b0f6c7 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -162,7 +162,7 @@ void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
pr_info("New link <%s> becomes standby\n", l_ptr->name);
return;
}
- tipc_link_dup_send_queue(active[0], l_ptr);
+ tipc_link_dup_queue_xmit(active[0], l_ptr);
if (l_ptr->priority == active[0]->priority) {
active[0] = l_ptr;
return;
@@ -249,9 +249,15 @@ void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
{
- n_ptr->links[l_ptr->b_ptr->identity] = NULL;
- atomic_dec(&tipc_num_links);
- n_ptr->link_cnt--;
+ int i;
+
+ for (i = 0; i < MAX_BEARERS; i++) {
+ if (l_ptr != n_ptr->links[i])
+ continue;
+ n_ptr->links[i] = NULL;
+ atomic_dec(&tipc_num_links);
+ n_ptr->link_cnt--;
+ }
}
static void node_established_contact(struct tipc_node *n_ptr)
diff --git a/net/tipc/port.c b/net/tipc/port.c
index b742b26..c7c2b54 100644
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@ -87,10 +87,11 @@ int tipc_port_peer_msg(struct tipc_port *p_ptr, struct tipc_msg *msg)
}
/**
- * tipc_multicast - send a multicast message to local and remote destinations
+ * tipc_port_mcast_xmit - send a multicast message to local and remote
+ * destinations
*/
-int tipc_multicast(u32 ref, struct tipc_name_seq const *seq,
- struct iovec const *msg_sect, unsigned int len)
+int tipc_port_mcast_xmit(u32 ref, struct tipc_name_seq const *seq,
+ struct iovec const *msg_sect, unsigned int len)
{
struct tipc_msg *hdr;
struct sk_buff *buf;
@@ -131,7 +132,7 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq,
return -ENOMEM;
}
}
- res = tipc_bclink_send_msg(buf);
+ res = tipc_bclink_xmit(buf);
if ((res < 0) && (dports.count != 0))
kfree_skb(ibuf);
} else {
@@ -140,7 +141,7 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq,
if (res >= 0) {
if (ibuf)
- tipc_port_recv_mcast(ibuf, &dports);
+ tipc_port_mcast_rcv(ibuf, &dports);
} else {
tipc_port_list_free(&dports);
}
@@ -148,11 +149,11 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq,
}
/**
- * tipc_port_recv_mcast - deliver multicast message to all destination ports
+ * tipc_port_mcast_rcv - deliver multicast message to all destination ports
*
* If there is no port list, perform a lookup to create one
*/
-void tipc_port_recv_mcast(struct sk_buff *buf, struct tipc_port_list *dp)
+void tipc_port_mcast_rcv(struct sk_buff *buf, struct tipc_port_list *dp)
{
struct tipc_msg *msg;
struct tipc_port_list dports = {0, NULL, };
@@ -176,7 +177,7 @@ void tipc_port_recv_mcast(struct sk_buff *buf, struct tipc_port_list *dp)
msg_set_destnode(msg, tipc_own_addr);
if (dp->count == 1) {
msg_set_destport(msg, dp->ports[0]);
- tipc_port_recv_msg(buf);
+ tipc_port_rcv(buf);
tipc_port_list_free(dp);
return;
}
@@ -191,7 +192,7 @@ void tipc_port_recv_mcast(struct sk_buff *buf, struct tipc_port_list *dp)
if ((index == 0) && (cnt != 0))
item = item->next;
msg_set_destport(buf_msg(b), item->ports[index]);
- tipc_port_recv_msg(b);
+ tipc_port_rcv(b);
}
}
exit:
@@ -422,17 +423,17 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err)
/* send returned message & dispose of rejected message */
src_node = msg_prevnode(msg);
if (in_own_node(src_node))
- tipc_port_recv_msg(rbuf);
+ tipc_port_rcv(rbuf);
else
- tipc_link_send(rbuf, src_node, msg_link_selector(rmsg));
+ tipc_link_xmit(rbuf, src_node, msg_link_selector(rmsg));
exit:
kfree_skb(buf);
return data_sz;
}
-int tipc_port_reject_sections(struct tipc_port *p_ptr, struct tipc_msg *hdr,
- struct iovec const *msg_sect, unsigned int len,
- int err)
+int tipc_port_iovec_reject(struct tipc_port *p_ptr, struct tipc_msg *hdr,
+ struct iovec const *msg_sect, unsigned int len,
+ int err)
{
struct sk_buff *buf;
int res;
@@ -519,7 +520,7 @@ static struct sk_buff *port_build_peer_abort_msg(struct tipc_port *p_ptr, u32 er
return buf;
}
-void tipc_port_recv_proto_msg(struct sk_buff *buf)
+void tipc_port_proto_rcv(struct sk_buff *buf)
{
struct tipc_msg *msg = buf_msg(buf);
struct tipc_port *p_ptr;
@@ -760,7 +761,7 @@ int tipc_withdraw(struct tipc_port *p_ptr, unsigned int scope,
return res;
}
-int tipc_connect(u32 ref, struct tipc_portid const *peer)
+int tipc_port_connect(u32 ref, struct tipc_portid const *peer)
{
struct tipc_port *p_ptr;
int res;
@@ -768,17 +769,17 @@ int tipc_connect(u32 ref, struct tipc_portid const *peer)
p_ptr = tipc_port_lock(ref);
if (!p_ptr)
return -EINVAL;
- res = __tipc_connect(ref, p_ptr, peer);
+ res = __tipc_port_connect(ref, p_ptr, peer);
tipc_port_unlock(p_ptr);
return res;
}
/*
- * __tipc_connect - connect to a remote peer
+ * __tipc_port_connect - connect to a remote peer
*
* Port must be locked.
*/
-int __tipc_connect(u32 ref, struct tipc_port *p_ptr,
+int __tipc_port_connect(u32 ref, struct tipc_port *p_ptr,
struct tipc_portid const *peer)
{
struct tipc_msg *msg;
@@ -815,7 +816,7 @@ exit:
*
* Port must be locked.
*/
-int __tipc_disconnect(struct tipc_port *tp_ptr)
+int __tipc_port_disconnect(struct tipc_port *tp_ptr)
{
if (tp_ptr->connected) {
tp_ptr->connected = 0;
@@ -828,10 +829,10 @@ int __tipc_disconnect(struct tipc_port *tp_ptr)
}
/*
- * tipc_disconnect(): Disconnect port form peer.
+ * tipc_port_disconnect(): Disconnect port form peer.
* This is a node local operation.
*/
-int tipc_disconnect(u32 ref)
+int tipc_port_disconnect(u32 ref)
{
struct tipc_port *p_ptr;
int res;
@@ -839,15 +840,15 @@ int tipc_disconnect(u32 ref)
p_ptr = tipc_port_lock(ref);
if (!p_ptr)
return -EINVAL;
- res = __tipc_disconnect(p_ptr);
+ res = __tipc_port_disconnect(p_ptr);
tipc_port_unlock(p_ptr);
return res;
}
/*
- * tipc_shutdown(): Send a SHUTDOWN msg to peer and disconnect
+ * tipc_port_shutdown(): Send a SHUTDOWN msg to peer and disconnect
*/
-int tipc_shutdown(u32 ref)
+int tipc_port_shutdown(u32 ref)
{
struct tipc_port *p_ptr;
struct sk_buff *buf = NULL;
@@ -859,13 +860,13 @@ int tipc_shutdown(u32 ref)
buf = port_build_peer_abort_msg(p_ptr, TIPC_CONN_SHUTDOWN);
tipc_port_unlock(p_ptr);
tipc_net_route_msg(buf);
- return tipc_disconnect(ref);
+ return tipc_port_disconnect(ref);
}
/**
- * tipc_port_recv_msg - receive message from lower layer and deliver to port user
+ * tipc_port_rcv - receive message from lower layer and deliver to port user
*/
-int tipc_port_recv_msg(struct sk_buff *buf)
+int tipc_port_rcv(struct sk_buff *buf)
{
struct tipc_port *p_ptr;
struct tipc_msg *msg = buf_msg(buf);
@@ -894,19 +895,19 @@ int tipc_port_recv_msg(struct sk_buff *buf)
}
/*
- * tipc_port_recv_sections(): Concatenate and deliver sectioned
- * message for this node.
+ * tipc_port_iovec_rcv: Concatenate and deliver sectioned
+ * message for this node.
*/
-static int tipc_port_recv_sections(struct tipc_port *sender,
- struct iovec const *msg_sect,
- unsigned int len)
+static int tipc_port_iovec_rcv(struct tipc_port *sender,
+ struct iovec const *msg_sect,
+ unsigned int len)
{
struct sk_buff *buf;
int res;
res = tipc_msg_build(&sender->phdr, msg_sect, len, MAX_MSG_SIZE, &buf);
if (likely(buf))
- tipc_port_recv_msg(buf);
+ tipc_port_rcv(buf);
return res;
}
@@ -927,10 +928,10 @@ int tipc_send(u32 ref, struct iovec const *msg_sect, unsigned int len)
if (!tipc_port_congested(p_ptr)) {
destnode = port_peernode(p_ptr);
if (likely(!in_own_node(destnode)))
- res = tipc_link_send_sections_fast(p_ptr, msg_sect,
- len, destnode);
+ res = tipc_link_iovec_xmit_fast(p_ptr, msg_sect, len,
+ destnode);
else
- res = tipc_port_recv_sections(p_ptr, msg_sect, len);
+ res = tipc_port_iovec_rcv(p_ptr, msg_sect, len);
if (likely(res != -ELINKCONG)) {
p_ptr->congested = 0;
@@ -974,13 +975,13 @@ int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain,
if (likely(destport || destnode)) {
if (likely(in_own_node(destnode)))
- res = tipc_port_recv_sections(p_ptr, msg_sect, len);
+ res = tipc_port_iovec_rcv(p_ptr, msg_sect, len);
else if (tipc_own_addr)
- res = tipc_link_send_sections_fast(p_ptr, msg_sect,
- len, destnode);
+ res = tipc_link_iovec_xmit_fast(p_ptr, msg_sect, len,
+ destnode);
else
- res = tipc_port_reject_sections(p_ptr, msg, msg_sect,
- len, TIPC_ERR_NO_NODE);
+ res = tipc_port_iovec_reject(p_ptr, msg, msg_sect,
+ len, TIPC_ERR_NO_NODE);
if (likely(res != -ELINKCONG)) {
if (res > 0)
p_ptr->sent++;
@@ -991,8 +992,8 @@ int tipc_send2name(u32 ref, struct tipc_name const *name, unsigned int domain,
}
return -ELINKCONG;
}
- return tipc_port_reject_sections(p_ptr, msg, msg_sect, len,
- TIPC_ERR_NO_NAME);
+ return tipc_port_iovec_reject(p_ptr, msg, msg_sect, len,
+ TIPC_ERR_NO_NAME);
}
/**
@@ -1017,12 +1018,12 @@ int tipc_send2port(u32 ref, struct tipc_portid const *dest,
msg_set_hdr_sz(msg, BASIC_H_SIZE);
if (in_own_node(dest->node))
- res = tipc_port_recv_sections(p_ptr, msg_sect, len);
+ res = tipc_port_iovec_rcv(p_ptr, msg_sect, len);
else if (tipc_own_addr)
- res = tipc_link_send_sections_fast(p_ptr, msg_sect, len,
- dest->node);
+ res = tipc_link_iovec_xmit_fast(p_ptr, msg_sect, len,
+ dest->node);
else
- res = tipc_port_reject_sections(p_ptr, msg, msg_sect, len,
+ res = tipc_port_iovec_reject(p_ptr, msg, msg_sect, len,
TIPC_ERR_NO_NODE);
if (likely(res != -ELINKCONG)) {
if (res > 0)
diff --git a/net/tipc/port.h b/net/tipc/port.h
index 34f12bd..3ec3e94 100644
--- a/net/tipc/port.h
+++ b/net/tipc/port.h
@@ -132,25 +132,25 @@ int tipc_publish(struct tipc_port *p_ptr, unsigned int scope,
int tipc_withdraw(struct tipc_port *p_ptr, unsigned int scope,
struct tipc_name_seq const *name_seq);
-int tipc_connect(u32 portref, struct tipc_portid const *port);
+int tipc_port_connect(u32 portref, struct tipc_portid const *port);
-int tipc_disconnect(u32 portref);
+int tipc_port_disconnect(u32 portref);
-int tipc_shutdown(u32 ref);
+int tipc_port_shutdown(u32 ref);
/*
* The following routines require that the port be locked on entry
*/
-int __tipc_disconnect(struct tipc_port *tp_ptr);
-int __tipc_connect(u32 ref, struct tipc_port *p_ptr,
+int __tipc_port_disconnect(struct tipc_port *tp_ptr);
+int __tipc_port_connect(u32 ref, struct tipc_port *p_ptr,
struct tipc_portid const *peer);
int tipc_port_peer_msg(struct tipc_port *p_ptr, struct tipc_msg *msg);
/*
* TIPC messaging routines
*/
-int tipc_port_recv_msg(struct sk_buff *buf);
+int tipc_port_rcv(struct sk_buff *buf);
int tipc_send(u32 portref, struct iovec const *msg_sect, unsigned int len);
int tipc_send2name(u32 portref, struct tipc_name const *name, u32 domain,
@@ -159,15 +159,15 @@ int tipc_send2name(u32 portref, struct tipc_name const *name, u32 domain,
int tipc_send2port(u32 portref, struct tipc_portid const *dest,
struct iovec const *msg_sect, unsigned int len);
-int tipc_multicast(u32 portref, struct tipc_name_seq const *seq,
- struct iovec const *msg, unsigned int len);
+int tipc_port_mcast_xmit(u32 portref, struct tipc_name_seq const *seq,
+ struct iovec const *msg, unsigned int len);
-int tipc_port_reject_sections(struct tipc_port *p_ptr, struct tipc_msg *hdr,
- struct iovec const *msg_sect, unsigned int len,
- int err);
+int tipc_port_iovec_reject(struct tipc_port *p_ptr, struct tipc_msg *hdr,
+ struct iovec const *msg_sect, unsigned int len,
+ int err);
struct sk_buff *tipc_port_get_ports(void);
-void tipc_port_recv_proto_msg(struct sk_buff *buf);
-void tipc_port_recv_mcast(struct sk_buff *buf, struct tipc_port_list *dp);
+void tipc_port_proto_rcv(struct sk_buff *buf);
+void tipc_port_mcast_rcv(struct sk_buff *buf, struct tipc_port_list *dp);
void tipc_port_reinit(void);
/**
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index a4cf274..336e18d 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -60,8 +60,8 @@ static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf);
static void wakeupdispatch(struct tipc_port *tport);
static void tipc_data_ready(struct sock *sk, int len);
static void tipc_write_space(struct sock *sk);
-static int release(struct socket *sock);
-static int accept(struct socket *sock, struct socket *new_sock, int flags);
+static int tipc_release(struct socket *sock);
+static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags);
static const struct proto_ops packet_ops;
static const struct proto_ops stream_ops;
@@ -254,7 +254,7 @@ int tipc_sock_create_local(int type, struct socket **res)
*/
void tipc_sock_release_local(struct socket *sock)
{
- release(sock);
+ tipc_release(sock);
sock->ops = NULL;
sock_release(sock);
}
@@ -280,7 +280,7 @@ int tipc_sock_accept_local(struct socket *sock, struct socket **newsock,
if (ret < 0)
return ret;
- ret = accept(sock, *newsock, flags);
+ ret = tipc_accept(sock, *newsock, flags);
if (ret < 0) {
sock_release(*newsock);
return ret;
@@ -290,7 +290,7 @@ int tipc_sock_accept_local(struct socket *sock, struct socket **newsock,
}
/**
- * release - destroy a TIPC socket
+ * tipc_release - destroy a TIPC socket
* @sock: socket to destroy
*
* This routine cleans up any messages that are still queued on the socket.
@@ -305,7 +305,7 @@ int tipc_sock_accept_local(struct socket *sock, struct socket **newsock,
*
* Returns 0 on success, errno otherwise
*/
-static int release(struct socket *sock)
+static int tipc_release(struct socket *sock)
{
struct sock *sk = sock->sk;
struct tipc_port *tport;
@@ -336,7 +336,7 @@ static int release(struct socket *sock)
if ((sock->state == SS_CONNECTING) ||
(sock->state == SS_CONNECTED)) {
sock->state = SS_DISCONNECTING;
- tipc_disconnect(tport->ref);
+ tipc_port_disconnect(tport->ref);
}
tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
}
@@ -362,7 +362,7 @@ static int release(struct socket *sock)
}
/**
- * bind - associate or disassocate TIPC name(s) with a socket
+ * tipc_bind - associate or disassocate TIPC name(s) with a socket
* @sock: socket structure
* @uaddr: socket address describing name(s) and desired operation
* @uaddr_len: size of socket address data structure
@@ -376,7 +376,8 @@ static int release(struct socket *sock)
* NOTE: This routine doesn't need to take the socket lock since it doesn't
* access any non-constant socket information.
*/
-static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len)
+static int tipc_bind(struct socket *sock, struct sockaddr *uaddr,
+ int uaddr_len)
{
struct sock *sk = sock->sk;
struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
@@ -421,7 +422,7 @@ exit:
}
/**
- * get_name - get port ID of socket or peer socket
+ * tipc_getname - get port ID of socket or peer socket
* @sock: socket structure
* @uaddr: area for returned socket address
* @uaddr_len: area for returned length of socket address
@@ -433,8 +434,8 @@ exit:
* accesses socket information that is unchanging (or which changes in
* a completely predictable manner).
*/
-static int get_name(struct socket *sock, struct sockaddr *uaddr,
- int *uaddr_len, int peer)
+static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
+ int *uaddr_len, int peer)
{
struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
struct tipc_sock *tsock = tipc_sk(sock->sk);
@@ -461,7 +462,7 @@ static int get_name(struct socket *sock, struct sockaddr *uaddr,
}
/**
- * poll - read and possibly block on pollmask
+ * tipc_poll - read and possibly block on pollmask
* @file: file structure associated with the socket
* @sock: socket for which to calculate the poll bits
* @wait: ???
@@ -500,8 +501,8 @@ static int get_name(struct socket *sock, struct sockaddr *uaddr,
* imply that the operation will succeed, merely that it should be performed
* and will not block.
*/
-static unsigned int poll(struct file *file, struct socket *sock,
- poll_table *wait)
+static unsigned int tipc_poll(struct file *file, struct socket *sock,
+ poll_table *wait)
{
struct sock *sk = sock->sk;
u32 mask = 0;
@@ -588,7 +589,7 @@ static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p)
}
/**
- * send_msg - send message in connectionless manner
+ * tipc_sendmsg - send message in connectionless manner
* @iocb: if NULL, indicates that socket lock is already held
* @sock: socket structure
* @m: message to send
@@ -601,8 +602,8 @@ static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p)
*
* Returns the number of bytes sent on success, or errno otherwise
*/
-static int send_msg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *m, size_t total_len)
+static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *m, size_t total_len)
{
struct sock *sk = sock->sk;
struct tipc_port *tport = tipc_sk_port(sk);
@@ -669,10 +670,10 @@ static int send_msg(struct kiocb *iocb, struct socket *sock,
res = dest_name_check(dest, m);
if (res)
break;
- res = tipc_multicast(tport->ref,
- &dest->addr.nameseq,
- m->msg_iov,
- total_len);
+ res = tipc_port_mcast_xmit(tport->ref,
+ &dest->addr.nameseq,
+ m->msg_iov,
+ total_len);
}
if (likely(res != -ELINKCONG)) {
if (needs_conn && (res >= 0))
@@ -719,7 +720,7 @@ static int tipc_wait_for_sndpkt(struct socket *sock, long *timeo_p)
}
/**
- * send_packet - send a connection-oriented message
+ * tipc_send_packet - send a connection-oriented message
* @iocb: if NULL, indicates that socket lock is already held
* @sock: socket structure
* @m: message to send
@@ -729,8 +730,8 @@ static int tipc_wait_for_sndpkt(struct socket *sock, long *timeo_p)
*
* Returns the number of bytes sent on success, or errno otherwise
*/
-static int send_packet(struct kiocb *iocb, struct socket *sock,
- struct msghdr *m, size_t total_len)
+static int tipc_send_packet(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *m, size_t total_len)
{
struct sock *sk = sock->sk;
struct tipc_port *tport = tipc_sk_port(sk);
@@ -740,7 +741,7 @@ static int send_packet(struct kiocb *iocb, struct socket *sock,
/* Handle implied connection establishment */
if (unlikely(dest))
- return send_msg(iocb, sock, m, total_len);
+ return tipc_sendmsg(iocb, sock, m, total_len);
if (total_len > TIPC_MAX_USER_MSG_SIZE)
return -EMSGSIZE;
@@ -772,7 +773,7 @@ exit:
}
/**
- * send_stream - send stream-oriented data
+ * tipc_send_stream - send stream-oriented data
* @iocb: (unused)
* @sock: socket structure
* @m: data to send
@@ -783,8 +784,8 @@ exit:
* Returns the number of bytes sent on success (or partial success),
* or errno if no data sent
*/
-static int send_stream(struct kiocb *iocb, struct socket *sock,
- struct msghdr *m, size_t total_len)
+static int tipc_send_stream(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *m, size_t total_len)
{
struct sock *sk = sock->sk;
struct tipc_port *tport = tipc_sk_port(sk);
@@ -804,7 +805,7 @@ static int send_stream(struct kiocb *iocb, struct socket *sock,
/* Handle special cases where there is no connection */
if (unlikely(sock->state != SS_CONNECTED)) {
if (sock->state == SS_UNCONNECTED)
- res = send_packet(NULL, sock, m, total_len);
+ res = tipc_send_packet(NULL, sock, m, total_len);
else
res = sock->state == SS_DISCONNECTING ? -EPIPE : -ENOTCONN;
goto exit;
@@ -849,7 +850,8 @@ static int send_stream(struct kiocb *iocb, struct socket *sock,
bytes_to_send = curr_left;
my_iov.iov_base = curr_start;
my_iov.iov_len = bytes_to_send;
- res = send_packet(NULL, sock, &my_msg, bytes_to_send);
+ res = tipc_send_packet(NULL, sock, &my_msg,
+ bytes_to_send);
if (res < 0) {
if (bytes_sent)
res = bytes_sent;
@@ -886,7 +888,7 @@ static int auto_connect(struct socket *sock, struct tipc_msg *msg)
if (!p_ptr)
return -EINVAL;
- __tipc_connect(tsock->p->ref, p_ptr, &tsock->peer_name);
+ __tipc_port_connect(tsock->p->ref, p_ptr, &tsock->peer_name);
if (msg_importance(msg) > TIPC_CRITICAL_IMPORTANCE)
return -EINVAL;
@@ -1021,7 +1023,7 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long timeo)
}
/**
- * recv_msg - receive packet-oriented message
+ * tipc_recvmsg - receive packet-oriented message
* @iocb: (unused)
* @m: descriptor for message info
* @buf_len: total size of user buffer area
@@ -1032,8 +1034,8 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long timeo)
*
* Returns size of returned message data, errno otherwise
*/
-static int recv_msg(struct kiocb *iocb, struct socket *sock,
- struct msghdr *m, size_t buf_len, int flags)
+static int tipc_recvmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *m, size_t buf_len, int flags)
{
struct sock *sk = sock->sk;
struct tipc_port *tport = tipc_sk_port(sk);
@@ -1115,7 +1117,7 @@ exit:
}
/**
- * recv_stream - receive stream-oriented data
+ * tipc_recv_stream - receive stream-oriented data
* @iocb: (unused)
* @m: descriptor for message info
* @buf_len: total size of user buffer area
@@ -1126,8 +1128,8 @@ exit:
*
* Returns size of returned message data, errno otherwise
*/
-static int recv_stream(struct kiocb *iocb, struct socket *sock,
- struct msghdr *m, size_t buf_len, int flags)
+static int tipc_recv_stream(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *m, size_t buf_len, int flags)
{
struct sock *sk = sock->sk;
struct tipc_port *tport = tipc_sk_port(sk);
@@ -1289,7 +1291,7 @@ static u32 filter_connect(struct tipc_sock *tsock, struct sk_buff **buf)
if (msg_connected(msg) && tipc_port_peer_msg(tsock->p, msg)) {
if (unlikely(msg_errcode(msg))) {
sock->state = SS_DISCONNECTING;
- __tipc_disconnect(tsock->p);
+ __tipc_port_disconnect(tsock->p);
}
retval = TIPC_OK;
}
@@ -1504,7 +1506,7 @@ static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
}
/**
- * connect - establish a connection to another TIPC port
+ * tipc_connect - establish a connection to another TIPC port
* @sock: socket structure
* @dest: socket address for destination port
* @destlen: size of socket address data structure
@@ -1512,8 +1514,8 @@ static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
*
* Returns 0 on success, errno otherwise
*/
-static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
- int flags)
+static int tipc_connect(struct socket *sock, struct sockaddr *dest,
+ int destlen, int flags)
{
struct sock *sk = sock->sk;
struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
@@ -1554,7 +1556,7 @@ static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
if (!timeout)
m.msg_flags = MSG_DONTWAIT;
- res = send_msg(NULL, sock, &m, 0);
+ res = tipc_sendmsg(NULL, sock, &m, 0);
if ((res < 0) && (res != -EWOULDBLOCK))
goto exit;
@@ -1585,13 +1587,13 @@ exit:
}
/**
- * listen - allow socket to listen for incoming connections
+ * tipc_listen - allow socket to listen for incoming connections
* @sock: socket structure
* @len: (unused)
*
* Returns 0 on success, errno otherwise
*/
-static int listen(struct socket *sock, int len)
+static int tipc_listen(struct socket *sock, int len)
{
struct sock *sk = sock->sk;
int res;
@@ -1646,14 +1648,14 @@ static int tipc_wait_for_accept(struct socket *sock, long timeo)
}
/**
- * accept - wait for connection request
+ * tipc_accept - wait for connection request
* @sock: listening socket
* @newsock: new socket that is to be connected
* @flags: file-related flags associated with socket
*
* Returns 0 on success, errno otherwise
*/
-static int accept(struct socket *sock, struct socket *new_sock, int flags)
+static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags)
{
struct sock *new_sk, *sk = sock->sk;
struct sk_buff *buf;
@@ -1700,7 +1702,7 @@ static int accept(struct socket *sock, struct socket *new_sock, int flags)
/* Connect new socket to it's peer */
new_tsock->peer_name.ref = msg_origport(msg);
new_tsock->peer_name.node = msg_orignode(msg);
- tipc_connect(new_ref, &new_tsock->peer_name);
+ tipc_port_connect(new_ref, &new_tsock->peer_name);
new_sock->state = SS_CONNECTED;
tipc_set_portimportance(new_ref, msg_importance(msg));
@@ -1717,7 +1719,7 @@ static int accept(struct socket *sock, struct socket *new_sock, int flags)
struct msghdr m = {NULL,};
advance_rx_queue(sk);
- send_packet(NULL, new_sock, &m, 0);
+ tipc_send_packet(NULL, new_sock, &m, 0);
} else {
__skb_dequeue(&sk->sk_receive_queue);
__skb_queue_head(&new_sk->sk_receive_queue, buf);
@@ -1731,7 +1733,7 @@ exit:
}
/**
- * shutdown - shutdown socket connection
+ * tipc_shutdown - shutdown socket connection
* @sock: socket structure
* @how: direction to close (must be SHUT_RDWR)
*
@@ -1739,7 +1741,7 @@ exit:
*
* Returns 0 on success, errno otherwise
*/
-static int shutdown(struct socket *sock, int how)
+static int tipc_shutdown(struct socket *sock, int how)
{
struct sock *sk = sock->sk;
struct tipc_port *tport = tipc_sk_port(sk);
@@ -1763,10 +1765,10 @@ restart:
kfree_skb(buf);
goto restart;
}
- tipc_disconnect(tport->ref);
+ tipc_port_disconnect(tport->ref);
tipc_reject_msg(buf, TIPC_CONN_SHUTDOWN);
} else {
- tipc_shutdown(tport->ref);
+ tipc_port_shutdown(tport->ref);
}
sock->state = SS_DISCONNECTING;
@@ -1792,7 +1794,7 @@ restart:
}
/**
- * setsockopt - set socket option
+ * tipc_setsockopt - set socket option
* @sock: socket structure
* @lvl: option level
* @opt: option identifier
@@ -1804,8 +1806,8 @@ restart:
*
* Returns 0 on success, errno otherwise
*/
-static int setsockopt(struct socket *sock, int lvl, int opt, char __user *ov,
- unsigned int ol)
+static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
+ char __user *ov, unsigned int ol)
{
struct sock *sk = sock->sk;
struct tipc_port *tport = tipc_sk_port(sk);
@@ -1851,7 +1853,7 @@ static int setsockopt(struct socket *sock, int lvl, int opt, char __user *ov,
}
/**
- * getsockopt - get socket option
+ * tipc_getsockopt - get socket option
* @sock: socket structure
* @lvl: option level
* @opt: option identifier
@@ -1863,8 +1865,8 @@ static int setsockopt(struct socket *sock, int lvl, int opt, char __user *ov,
*
* Returns 0 on success, errno otherwise
*/
-static int getsockopt(struct socket *sock, int lvl, int opt, char __user *ov,
- int __user *ol)
+static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
+ char __user *ov, int __user *ol)
{
struct sock *sk = sock->sk;
struct tipc_port *tport = tipc_sk_port(sk);
@@ -1925,20 +1927,20 @@ static int getsockopt(struct socket *sock, int lvl, int opt, char __user *ov,
static const struct proto_ops msg_ops = {
.owner = THIS_MODULE,
.family = AF_TIPC,
- .release = release,
- .bind = bind,
- .connect = connect,
+ .release = tipc_release,
+ .bind = tipc_bind,
+ .connect = tipc_connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
- .getname = get_name,
- .poll = poll,
+ .getname = tipc_getname,
+ .poll = tipc_poll,
.ioctl = sock_no_ioctl,
.listen = sock_no_listen,
- .shutdown = shutdown,
- .setsockopt = setsockopt,
- .getsockopt = getsockopt,
- .sendmsg = send_msg,
- .recvmsg = recv_msg,
+ .shutdown = tipc_shutdown,
+ .setsockopt = tipc_setsockopt,
+ .getsockopt = tipc_getsockopt,
+ .sendmsg = tipc_sendmsg,
+ .recvmsg = tipc_recvmsg,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage
};
@@ -1946,20 +1948,20 @@ static const struct proto_ops msg_ops = {
static const struct proto_ops packet_ops = {
.owner = THIS_MODULE,
.family = AF_TIPC,
- .release = release,
- .bind = bind,
- .connect = connect,
+ .release = tipc_release,
+ .bind = tipc_bind,
+ .connect = tipc_connect,
.socketpair = sock_no_socketpair,
- .accept = accept,
- .getname = get_name,
- .poll = poll,
+ .accept = tipc_accept,
+ .getname = tipc_getname,
+ .poll = tipc_poll,
.ioctl = sock_no_ioctl,
- .listen = listen,
- .shutdown = shutdown,
- .setsockopt = setsockopt,
- .getsockopt = getsockopt,
- .sendmsg = send_packet,
- .recvmsg = recv_msg,
+ .listen = tipc_listen,
+ .shutdown = tipc_shutdown,
+ .setsockopt = tipc_setsockopt,
+ .getsockopt = tipc_getsockopt,
+ .sendmsg = tipc_send_packet,
+ .recvmsg = tipc_recvmsg,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage
};
@@ -1967,20 +1969,20 @@ static const struct proto_ops packet_ops = {
static const struct proto_ops stream_ops = {
.owner = THIS_MODULE,
.family = AF_TIPC,
- .release = release,
- .bind = bind,
- .connect = connect,
+ .release = tipc_release,
+ .bind = tipc_bind,
+ .connect = tipc_connect,
.socketpair = sock_no_socketpair,
- .accept = accept,
- .getname = get_name,
- .poll = poll,
+ .accept = tipc_accept,
+ .getname = tipc_getname,
+ .poll = tipc_poll,
.ioctl = sock_no_ioctl,
- .listen = listen,
- .shutdown = shutdown,
- .setsockopt = setsockopt,
- .getsockopt = getsockopt,
- .sendmsg = send_stream,
- .recvmsg = recv_stream,
+ .listen = tipc_listen,
+ .shutdown = tipc_shutdown,
+ .setsockopt = tipc_setsockopt,
+ .getsockopt = tipc_getsockopt,
+ .sendmsg = tipc_send_stream,
+ .recvmsg = tipc_recv_stream,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage
};
diff --git a/net/wireless/ap.c b/net/wireless/ap.c
index 11ee4ed..68602be 100644
--- a/net/wireless/ap.c
+++ b/net/wireless/ap.c
@@ -27,9 +27,10 @@ static int __cfg80211_stop_ap(struct cfg80211_registered_device *rdev,
err = rdev_stop_ap(rdev, dev);
if (!err) {
wdev->beacon_interval = 0;
- wdev->channel = NULL;
+ memset(&wdev->chandef, 0, sizeof(wdev->chandef));
wdev->ssid_len = 0;
rdev_set_qos_map(rdev, dev, NULL);
+ nl80211_send_ap_stopped(wdev);
}
return err;
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index 78559b5..2d4268c 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -642,7 +642,8 @@ int cfg80211_set_monitor_channel(struct cfg80211_registered_device *rdev,
void
cfg80211_get_chan_state(struct wireless_dev *wdev,
struct ieee80211_channel **chan,
- enum cfg80211_chan_mode *chanmode)
+ enum cfg80211_chan_mode *chanmode,
+ u8 *radar_detect)
{
*chan = NULL;
*chanmode = CHAN_MODE_UNDEFINED;
@@ -660,6 +661,11 @@ cfg80211_get_chan_state(struct wireless_dev *wdev,
!wdev->ibss_dfs_possible)
? CHAN_MODE_SHARED
: CHAN_MODE_EXCLUSIVE;
+
+ /* consider worst-case - IBSS can try to return to the
+ * original user-specified channel as creator */
+ if (wdev->ibss_dfs_possible)
+ *radar_detect |= BIT(wdev->chandef.width);
return;
}
break;
@@ -674,17 +680,26 @@ cfg80211_get_chan_state(struct wireless_dev *wdev,
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_P2P_GO:
if (wdev->cac_started) {
- *chan = wdev->channel;
+ *chan = wdev->chandef.chan;
*chanmode = CHAN_MODE_SHARED;
+ *radar_detect |= BIT(wdev->chandef.width);
} else if (wdev->beacon_interval) {
- *chan = wdev->channel;
+ *chan = wdev->chandef.chan;
*chanmode = CHAN_MODE_SHARED;
+
+ if (cfg80211_chandef_dfs_required(wdev->wiphy,
+ &wdev->chandef))
+ *radar_detect |= BIT(wdev->chandef.width);
}
return;
case NL80211_IFTYPE_MESH_POINT:
if (wdev->mesh_id_len) {
- *chan = wdev->channel;
+ *chan = wdev->chandef.chan;
*chanmode = CHAN_MODE_SHARED;
+
+ if (cfg80211_chandef_dfs_required(wdev->wiphy,
+ &wdev->chandef))
+ *radar_detect |= BIT(wdev->chandef.width);
}
return;
case NL80211_IFTYPE_MONITOR:
@@ -701,6 +716,4 @@ cfg80211_get_chan_state(struct wireless_dev *wdev,
case NUM_NL80211_IFTYPES:
WARN_ON(1);
}
-
- return;
}
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 010892b..76ae6a6 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -737,7 +737,7 @@ void cfg80211_unregister_wdev(struct wireless_dev *wdev)
}
EXPORT_SYMBOL(cfg80211_unregister_wdev);
-static struct device_type wiphy_type = {
+static const struct device_type wiphy_type = {
.name = "wlan",
};
diff --git a/net/wireless/core.h b/net/wireless/core.h
index f1d193b..4068300 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -211,6 +211,7 @@ struct cfg80211_event {
} dc;
struct {
u8 bssid[ETH_ALEN];
+ struct ieee80211_channel *channel;
} ij;
};
};
@@ -258,7 +259,8 @@ int __cfg80211_leave_ibss(struct cfg80211_registered_device *rdev,
struct net_device *dev, bool nowext);
int cfg80211_leave_ibss(struct cfg80211_registered_device *rdev,
struct net_device *dev, bool nowext);
-void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid);
+void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid,
+ struct ieee80211_channel *channel);
int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev,
struct wireless_dev *wdev);
@@ -443,7 +445,8 @@ static inline unsigned int elapsed_jiffies_msecs(unsigned long start)
void
cfg80211_get_chan_state(struct wireless_dev *wdev,
struct ieee80211_channel **chan,
- enum cfg80211_chan_mode *chanmode);
+ enum cfg80211_chan_mode *chanmode,
+ u8 *radar_detect);
int cfg80211_set_monitor_channel(struct cfg80211_registered_device *rdev,
struct cfg80211_chan_def *chandef);
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
index f911c5f9f..1470b90 100644
--- a/net/wireless/ibss.c
+++ b/net/wireless/ibss.c
@@ -14,7 +14,8 @@
#include "rdev-ops.h"
-void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid)
+void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid,
+ struct ieee80211_channel *channel)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_bss *bss;
@@ -28,8 +29,7 @@ void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid)
if (!wdev->ssid_len)
return;
- bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid,
- wdev->ssid, wdev->ssid_len,
+ bss = cfg80211_get_bss(wdev->wiphy, channel, bssid, NULL, 0,
WLAN_CAPABILITY_IBSS, WLAN_CAPABILITY_IBSS);
if (WARN_ON(!bss))
@@ -54,21 +54,26 @@ void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid)
#endif
}
-void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid, gfp_t gfp)
+void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid,
+ struct ieee80211_channel *channel, gfp_t gfp)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_dev(wdev->wiphy);
struct cfg80211_event *ev;
unsigned long flags;
- trace_cfg80211_ibss_joined(dev, bssid);
+ trace_cfg80211_ibss_joined(dev, bssid, channel);
+
+ if (WARN_ON(!channel))
+ return;
ev = kzalloc(sizeof(*ev), gfp);
if (!ev)
return;
ev->type = EVENT_IBSS_JOINED;
- memcpy(ev->cr.bssid, bssid, ETH_ALEN);
+ memcpy(ev->ij.bssid, bssid, ETH_ALEN);
+ ev->ij.channel = channel;
spin_lock_irqsave(&wdev->event_lock, flags);
list_add_tail(&ev->list, &wdev->event_list);
@@ -117,6 +122,7 @@ int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
wdev->ibss_fixed = params->channel_fixed;
wdev->ibss_dfs_possible = params->userspace_handles_dfs;
+ wdev->chandef = params->chandef;
#ifdef CONFIG_CFG80211_WEXT
wdev->wext.ibss.chandef = params->chandef;
#endif
@@ -200,6 +206,7 @@ static void __cfg80211_clear_ibss(struct net_device *dev, bool nowext)
wdev->current_bss = NULL;
wdev->ssid_len = 0;
+ memset(&wdev->chandef, 0, sizeof(wdev->chandef));
#ifdef CONFIG_CFG80211_WEXT
if (!nowext)
wdev->wext.ibss.ssid_len = 0;
diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c
index 8858624..d42a3fc 100644
--- a/net/wireless/mesh.c
+++ b/net/wireless/mesh.c
@@ -195,7 +195,7 @@ int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev,
if (!err) {
memcpy(wdev->ssid, setup->mesh_id, setup->mesh_id_len);
wdev->mesh_id_len = setup->mesh_id_len;
- wdev->channel = setup->chandef.chan;
+ wdev->chandef = setup->chandef;
}
return err;
@@ -244,7 +244,7 @@ int cfg80211_set_mesh_channel(struct cfg80211_registered_device *rdev,
err = rdev_libertas_set_mesh_channel(rdev, wdev->netdev,
chandef->chan);
if (!err)
- wdev->channel = chandef->chan;
+ wdev->chandef = *chandef;
return err;
}
@@ -276,7 +276,7 @@ static int __cfg80211_leave_mesh(struct cfg80211_registered_device *rdev,
err = rdev_leave_mesh(rdev, dev);
if (!err) {
wdev->mesh_id_len = 0;
- wdev->channel = NULL;
+ memset(&wdev->chandef, 0, sizeof(wdev->chandef));
rdev_set_qos_map(rdev, dev, NULL);
}
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 52cca05..d47c9d1 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -772,7 +772,7 @@ void cfg80211_cac_event(struct net_device *netdev,
if (WARN_ON(!wdev->cac_started))
return;
- if (WARN_ON(!wdev->channel))
+ if (WARN_ON(!wdev->chandef.chan))
return;
switch (event) {
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 4fe2e6e..8e6b6a2 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -382,6 +382,8 @@ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = {
[NL80211_ATTR_VENDOR_DATA] = { .type = NLA_BINARY },
[NL80211_ATTR_QOS_MAP] = { .type = NLA_BINARY,
.len = IEEE80211_QOS_MAP_LEN_MAX },
+ [NL80211_ATTR_MAC_HINT] = { .len = ETH_ALEN },
+ [NL80211_ATTR_WIPHY_FREQ_HINT] = { .type = NLA_U32 },
};
/* policy for the key attributes */
@@ -855,6 +857,19 @@ static int nl80211_key_allowed(struct wireless_dev *wdev)
return 0;
}
+static struct ieee80211_channel *nl80211_get_valid_chan(struct wiphy *wiphy,
+ struct nlattr *tb)
+{
+ struct ieee80211_channel *chan;
+
+ if (tb == NULL)
+ return NULL;
+ chan = ieee80211_get_channel(wiphy, nla_get_u32(tb));
+ if (!chan || chan->flags & IEEE80211_CHAN_DISABLED)
+ return NULL;
+ return chan;
+}
+
static int nl80211_put_iftypes(struct sk_buff *msg, u32 attr, u16 ifmodes)
{
struct nlattr *nl_modes = nla_nest_start(msg, attr);
@@ -1586,6 +1601,12 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *dev,
(nla_put_flag(msg, NL80211_ATTR_SUPPORT_5_MHZ) ||
nla_put_flag(msg, NL80211_ATTR_SUPPORT_10_MHZ)))
goto nla_put_failure;
+
+ if (dev->wiphy.max_ap_assoc_sta &&
+ nla_put_u32(msg, NL80211_ATTR_MAX_AP_ASSOC_STA,
+ dev->wiphy.max_ap_assoc_sta))
+ goto nla_put_failure;
+
state->split_start++;
break;
case 11:
@@ -2035,10 +2056,12 @@ static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info)
nla_for_each_nested(nl_txq_params,
info->attrs[NL80211_ATTR_WIPHY_TXQ_PARAMS],
rem_txq_params) {
- nla_parse(tb, NL80211_TXQ_ATTR_MAX,
- nla_data(nl_txq_params),
- nla_len(nl_txq_params),
- txq_params_policy);
+ result = nla_parse(tb, NL80211_TXQ_ATTR_MAX,
+ nla_data(nl_txq_params),
+ nla_len(nl_txq_params),
+ txq_params_policy);
+ if (result)
+ return result;
result = parse_txq_params(tb, &txq_params);
if (result)
return result;
@@ -3259,7 +3282,7 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
if (!err) {
wdev->preset_chandef = params.chandef;
wdev->beacon_interval = params.beacon_interval;
- wdev->channel = params.chandef.chan;
+ wdev->chandef = params.chandef;
wdev->ssid_len = params.ssid_len;
memcpy(wdev->ssid, params.ssid, wdev->ssid_len);
}
@@ -3902,8 +3925,8 @@ static struct net_device *get_vlan(struct genl_info *info,
return ERR_PTR(ret);
}
-static struct nla_policy
-nl80211_sta_wme_policy[NL80211_STA_WME_MAX + 1] __read_mostly = {
+static const struct nla_policy
+nl80211_sta_wme_policy[NL80211_STA_WME_MAX + 1] = {
[NL80211_STA_WME_UAPSD_QUEUES] = { .type = NLA_U8 },
[NL80211_STA_WME_MAX_SP] = { .type = NLA_U8 },
};
@@ -4604,8 +4627,6 @@ static int parse_reg_rule(struct nlattr *tb[],
return -EINVAL;
if (!tb[NL80211_ATTR_FREQ_RANGE_END])
return -EINVAL;
- if (!tb[NL80211_ATTR_FREQ_RANGE_MAX_BW])
- return -EINVAL;
if (!tb[NL80211_ATTR_POWER_RULE_MAX_EIRP])
return -EINVAL;
@@ -4615,8 +4636,9 @@ static int parse_reg_rule(struct nlattr *tb[],
nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_START]);
freq_range->end_freq_khz =
nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_END]);
- freq_range->max_bandwidth_khz =
- nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_MAX_BW]);
+ if (tb[NL80211_ATTR_FREQ_RANGE_MAX_BW])
+ freq_range->max_bandwidth_khz =
+ nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_MAX_BW]);
power_rule->max_eirp =
nla_get_u32(tb[NL80211_ATTR_POWER_RULE_MAX_EIRP]);
@@ -5086,6 +5108,7 @@ static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info)
const struct ieee80211_reg_rule *reg_rule;
const struct ieee80211_freq_range *freq_range;
const struct ieee80211_power_rule *power_rule;
+ unsigned int max_bandwidth_khz;
reg_rule = &regdom->reg_rules[i];
freq_range = &reg_rule->freq_range;
@@ -5095,6 +5118,11 @@ static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info)
if (!nl_reg_rule)
goto nla_put_failure_rcu;
+ max_bandwidth_khz = freq_range->max_bandwidth_khz;
+ if (!max_bandwidth_khz)
+ max_bandwidth_khz = reg_get_max_bandwidth(regdom,
+ reg_rule);
+
if (nla_put_u32(msg, NL80211_ATTR_REG_RULE_FLAGS,
reg_rule->flags) ||
nla_put_u32(msg, NL80211_ATTR_FREQ_RANGE_START,
@@ -5102,7 +5130,7 @@ static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info)
nla_put_u32(msg, NL80211_ATTR_FREQ_RANGE_END,
freq_range->end_freq_khz) ||
nla_put_u32(msg, NL80211_ATTR_FREQ_RANGE_MAX_BW,
- freq_range->max_bandwidth_khz) ||
+ max_bandwidth_khz) ||
nla_put_u32(msg, NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN,
power_rule->max_antenna_gain) ||
nla_put_u32(msg, NL80211_ATTR_POWER_RULE_MAX_EIRP,
@@ -5178,9 +5206,11 @@ static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info)
nla_for_each_nested(nl_reg_rule, info->attrs[NL80211_ATTR_REG_RULES],
rem_reg_rules) {
- nla_parse(tb, NL80211_REG_RULE_ATTR_MAX,
- nla_data(nl_reg_rule), nla_len(nl_reg_rule),
- reg_rule_policy);
+ r = nla_parse(tb, NL80211_REG_RULE_ATTR_MAX,
+ nla_data(nl_reg_rule), nla_len(nl_reg_rule),
+ reg_rule_policy);
+ if (r)
+ goto bad_reg;
r = parse_reg_rule(tb, &rd->reg_rules[rule_idx]);
if (r)
goto bad_reg;
@@ -5443,6 +5473,7 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
enum ieee80211_band band;
size_t ie_len;
struct nlattr *tb[NL80211_SCHED_SCAN_MATCH_ATTR_MAX + 1];
+ s32 default_match_rssi = NL80211_SCAN_RSSI_THOLD_OFF;
if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) ||
!rdev->ops->sched_scan_start)
@@ -5477,11 +5508,40 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
if (n_ssids > wiphy->max_sched_scan_ssids)
return -EINVAL;
- if (info->attrs[NL80211_ATTR_SCHED_SCAN_MATCH])
+ /*
+ * First, count the number of 'real' matchsets. Due to an issue with
+ * the old implementation, matchsets containing only the RSSI attribute
+ * (NL80211_SCHED_SCAN_MATCH_ATTR_RSSI) are considered as the 'default'
+ * RSSI for all matchsets, rather than their own matchset for reporting
+ * all APs with a strong RSSI. This is needed to be compatible with
+ * older userspace that treated a matchset with only the RSSI as the
+ * global RSSI for all other matchsets - if there are other matchsets.
+ */
+ if (info->attrs[NL80211_ATTR_SCHED_SCAN_MATCH]) {
nla_for_each_nested(attr,
info->attrs[NL80211_ATTR_SCHED_SCAN_MATCH],
- tmp)
- n_match_sets++;
+ tmp) {
+ struct nlattr *rssi;
+
+ err = nla_parse(tb, NL80211_SCHED_SCAN_MATCH_ATTR_MAX,
+ nla_data(attr), nla_len(attr),
+ nl80211_match_policy);
+ if (err)
+ return err;
+ /* add other standalone attributes here */
+ if (tb[NL80211_SCHED_SCAN_MATCH_ATTR_SSID]) {
+ n_match_sets++;
+ continue;
+ }
+ rssi = tb[NL80211_SCHED_SCAN_MATCH_ATTR_RSSI];
+ if (rssi)
+ default_match_rssi = nla_get_s32(rssi);
+ }
+ }
+
+ /* However, if there's no other matchset, add the RSSI one */
+ if (!n_match_sets && default_match_rssi != NL80211_SCAN_RSSI_THOLD_OFF)
+ n_match_sets = 1;
if (n_match_sets > wiphy->max_match_sets)
return -EINVAL;
@@ -5602,11 +5662,22 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
tmp) {
struct nlattr *ssid, *rssi;
- nla_parse(tb, NL80211_SCHED_SCAN_MATCH_ATTR_MAX,
- nla_data(attr), nla_len(attr),
- nl80211_match_policy);
+ err = nla_parse(tb, NL80211_SCHED_SCAN_MATCH_ATTR_MAX,
+ nla_data(attr), nla_len(attr),
+ nl80211_match_policy);
+ if (err)
+ goto out_free;
ssid = tb[NL80211_SCHED_SCAN_MATCH_ATTR_SSID];
if (ssid) {
+ if (WARN_ON(i >= n_match_sets)) {
+ /* this indicates a programming error,
+ * the loop above should have verified
+ * things properly
+ */
+ err = -EINVAL;
+ goto out_free;
+ }
+
if (nla_len(ssid) > IEEE80211_MAX_SSID_LEN) {
err = -EINVAL;
goto out_free;
@@ -5615,15 +5686,28 @@ static int nl80211_start_sched_scan(struct sk_buff *skb,
nla_data(ssid), nla_len(ssid));
request->match_sets[i].ssid.ssid_len =
nla_len(ssid);
+ /* special attribute - old implemenation w/a */
+ request->match_sets[i].rssi_thold =
+ default_match_rssi;
+ rssi = tb[NL80211_SCHED_SCAN_MATCH_ATTR_RSSI];
+ if (rssi)
+ request->match_sets[i].rssi_thold =
+ nla_get_s32(rssi);
}
- rssi = tb[NL80211_SCHED_SCAN_MATCH_ATTR_RSSI];
- if (rssi)
- request->rssi_thold = nla_get_u32(rssi);
- else
- request->rssi_thold =
- NL80211_SCAN_RSSI_THOLD_OFF;
i++;
}
+
+ /* there was no other matchset, so the RSSI one is alone */
+ if (i == 0)
+ request->match_sets[0].rssi_thold = default_match_rssi;
+
+ request->min_rssi_thold = INT_MAX;
+ for (i = 0; i < n_match_sets; i++)
+ request->min_rssi_thold =
+ min(request->match_sets[i].rssi_thold,
+ request->min_rssi_thold);
+ } else {
+ request->min_rssi_thold = NL80211_SCAN_RSSI_THOLD_OFF;
}
if (info->attrs[NL80211_ATTR_IE]) {
@@ -5719,7 +5803,7 @@ static int nl80211_start_radar_detection(struct sk_buff *skb,
err = rdev->ops->start_radar_detection(&rdev->wiphy, dev, &chandef);
if (!err) {
- wdev->channel = chandef.chan;
+ wdev->chandef = chandef;
wdev->cac_started = true;
wdev->cac_start_time = jiffies;
}
@@ -5751,10 +5835,15 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
/* useless if AP is not running */
if (!wdev->beacon_interval)
- return -EINVAL;
+ return -ENOTCONN;
break;
case NL80211_IFTYPE_ADHOC:
+ if (!wdev->ssid_len)
+ return -ENOTCONN;
+ break;
case NL80211_IFTYPE_MESH_POINT:
+ if (!wdev->mesh_id_len)
+ return -ENOTCONN;
break;
default:
return -EOPNOTSUPP;
@@ -6192,9 +6281,9 @@ static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info)
return -EOPNOTSUPP;
bssid = nla_data(info->attrs[NL80211_ATTR_MAC]);
- chan = ieee80211_get_channel(&rdev->wiphy,
- nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]));
- if (!chan || (chan->flags & IEEE80211_CHAN_DISABLED))
+ chan = nl80211_get_valid_chan(&rdev->wiphy,
+ info->attrs[NL80211_ATTR_WIPHY_FREQ]);
+ if (!chan)
return -EINVAL;
ssid = nla_data(info->attrs[NL80211_ATTR_SSID]);
@@ -6347,9 +6436,9 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
bssid = nla_data(info->attrs[NL80211_ATTR_MAC]);
- chan = ieee80211_get_channel(&rdev->wiphy,
- nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]));
- if (!chan || (chan->flags & IEEE80211_CHAN_DISABLED))
+ chan = nl80211_get_valid_chan(&rdev->wiphy,
+ info->attrs[NL80211_ATTR_WIPHY_FREQ]);
+ if (!chan)
return -EINVAL;
ssid = nla_data(info->attrs[NL80211_ATTR_SSID]);
@@ -6985,6 +7074,9 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
if (info->attrs[NL80211_ATTR_MAC])
connect.bssid = nla_data(info->attrs[NL80211_ATTR_MAC]);
+ else if (info->attrs[NL80211_ATTR_MAC_HINT])
+ connect.bssid_hint =
+ nla_data(info->attrs[NL80211_ATTR_MAC_HINT]);
connect.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]);
connect.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]);
@@ -7003,11 +7095,14 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
}
if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) {
- connect.channel =
- ieee80211_get_channel(wiphy,
- nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]));
- if (!connect.channel ||
- connect.channel->flags & IEEE80211_CHAN_DISABLED)
+ connect.channel = nl80211_get_valid_chan(
+ wiphy, info->attrs[NL80211_ATTR_WIPHY_FREQ]);
+ if (!connect.channel)
+ return -EINVAL;
+ } else if (info->attrs[NL80211_ATTR_WIPHY_FREQ_HINT]) {
+ connect.channel_hint = nl80211_get_valid_chan(
+ wiphy, info->attrs[NL80211_ATTR_WIPHY_FREQ_HINT]);
+ if (!connect.channel_hint)
return -EINVAL;
}
@@ -7421,6 +7516,7 @@ static const struct nla_policy nl80211_txattr_policy[NL80211_TXRATE_MAX + 1] = {
[NL80211_TXRATE_HT] = { .type = NLA_BINARY,
.len = NL80211_MAX_SUPP_HT_RATES },
[NL80211_TXRATE_VHT] = { .len = sizeof(struct nl80211_txrate_vht)},
+ [NL80211_TXRATE_GI] = { .type = NLA_U8 },
};
static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb,
@@ -7467,16 +7563,19 @@ static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb,
* directly to the enum ieee80211_band values used in cfg80211.
*/
BUILD_BUG_ON(NL80211_MAX_SUPP_HT_RATES > IEEE80211_HT_MCS_MASK_LEN * 8);
- nla_for_each_nested(tx_rates, info->attrs[NL80211_ATTR_TX_RATES], rem)
- {
+ nla_for_each_nested(tx_rates, info->attrs[NL80211_ATTR_TX_RATES], rem) {
enum ieee80211_band band = nla_type(tx_rates);
+ int err;
+
if (band < 0 || band >= IEEE80211_NUM_BANDS)
return -EINVAL;
sband = rdev->wiphy.bands[band];
if (sband == NULL)
return -EINVAL;
- nla_parse(tb, NL80211_TXRATE_MAX, nla_data(tx_rates),
- nla_len(tx_rates), nl80211_txattr_policy);
+ err = nla_parse(tb, NL80211_TXRATE_MAX, nla_data(tx_rates),
+ nla_len(tx_rates), nl80211_txattr_policy);
+ if (err)
+ return err;
if (tb[NL80211_TXRATE_LEGACY]) {
mask.control[band].legacy = rateset_to_mask(
sband,
@@ -7501,6 +7600,12 @@ static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb,
mask.control[band].vht_mcs))
return -EINVAL;
}
+ if (tb[NL80211_TXRATE_GI]) {
+ mask.control[band].gi =
+ nla_get_u8(tb[NL80211_TXRATE_GI]);
+ if (mask.control[band].gi > NL80211_TXRATE_FORCE_LGI)
+ return -EINVAL;
+ }
if (mask.control[band].legacy == 0) {
/* don't allow empty legacy rates if HT or VHT
@@ -7777,8 +7882,8 @@ static int nl80211_get_power_save(struct sk_buff *skb, struct genl_info *info)
return err;
}
-static struct nla_policy
-nl80211_attr_cqm_policy[NL80211_ATTR_CQM_MAX + 1] __read_mostly = {
+static const struct nla_policy
+nl80211_attr_cqm_policy[NL80211_ATTR_CQM_MAX + 1] = {
[NL80211_ATTR_CQM_RSSI_THOLD] = { .type = NLA_U32 },
[NL80211_ATTR_CQM_RSSI_HYST] = { .type = NLA_U32 },
[NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT] = { .type = NLA_U32 },
@@ -11107,7 +11212,8 @@ void cfg80211_ch_switch_notify(struct net_device *dev,
wdev->iftype != NL80211_IFTYPE_MESH_POINT))
return;
- wdev->channel = chandef->chan;
+ wdev->chandef = *chandef;
+ wdev->preset_chandef = *chandef;
nl80211_ch_switch_notify(rdev, dev, chandef, GFP_KERNEL);
}
EXPORT_SYMBOL(cfg80211_ch_switch_notify);
@@ -11621,6 +11727,35 @@ void cfg80211_crit_proto_stopped(struct wireless_dev *wdev, gfp_t gfp)
}
EXPORT_SYMBOL(cfg80211_crit_proto_stopped);
+void nl80211_send_ap_stopped(struct wireless_dev *wdev)
+{
+ struct wiphy *wiphy = wdev->wiphy;
+ struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy);
+ struct sk_buff *msg;
+ void *hdr;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!msg)
+ return;
+
+ hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_STOP_AP);
+ if (!hdr)
+ goto out;
+
+ if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+ nla_put_u32(msg, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex) ||
+ nla_put_u64(msg, NL80211_ATTR_WDEV, wdev_id(wdev)))
+ goto out;
+
+ genlmsg_end(msg, hdr);
+
+ genlmsg_multicast_netns(&nl80211_fam, wiphy_net(wiphy), msg, 0,
+ NL80211_MCGRP_MLME, GFP_KERNEL);
+ return;
+ out:
+ nlmsg_free(msg);
+}
+
/* initialisation/exit functions */
int nl80211_init(void)
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index 7579974..1e6df96 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -74,6 +74,8 @@ nl80211_radar_notify(struct cfg80211_registered_device *rdev,
enum nl80211_radar_event event,
struct net_device *netdev, gfp_t gfp);
+void nl80211_send_ap_stopped(struct wireless_dev *wdev);
+
void cfg80211_rdev_free_coalesce(struct cfg80211_registered_device *rdev);
#endif /* __NET_WIRELESS_NL80211_H */
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index f054137..24c257c 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -91,7 +91,7 @@ static struct regulatory_request __rcu *last_request =
/* To trigger userspace events */
static struct platform_device *reg_pdev;
-static struct device_type reg_device_type = {
+static const struct device_type reg_device_type = {
.uevent = reg_device_uevent,
};
@@ -522,6 +522,77 @@ bool reg_is_valid_request(const char *alpha2)
return alpha2_equal(lr->alpha2, alpha2);
}
+static const struct ieee80211_regdomain *reg_get_regdomain(struct wiphy *wiphy)
+{
+ struct regulatory_request *lr = get_last_request();
+
+ /*
+ * Follow the driver's regulatory domain, if present, unless a country
+ * IE has been processed or a user wants to help complaince further
+ */
+ if (lr->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE &&
+ lr->initiator != NL80211_REGDOM_SET_BY_USER &&
+ wiphy->regd)
+ return get_wiphy_regdom(wiphy);
+
+ return get_cfg80211_regdom();
+}
+
+unsigned int reg_get_max_bandwidth(const struct ieee80211_regdomain *rd,
+ const struct ieee80211_reg_rule *rule)
+{
+ const struct ieee80211_freq_range *freq_range = &rule->freq_range;
+ const struct ieee80211_freq_range *freq_range_tmp;
+ const struct ieee80211_reg_rule *tmp;
+ u32 start_freq, end_freq, idx, no;
+
+ for (idx = 0; idx < rd->n_reg_rules; idx++)
+ if (rule == &rd->reg_rules[idx])
+ break;
+
+ if (idx == rd->n_reg_rules)
+ return 0;
+
+ /* get start_freq */
+ no = idx;
+
+ while (no) {
+ tmp = &rd->reg_rules[--no];
+ freq_range_tmp = &tmp->freq_range;
+
+ if (freq_range_tmp->end_freq_khz < freq_range->start_freq_khz)
+ break;
+
+ if (freq_range_tmp->max_bandwidth_khz)
+ break;
+
+ freq_range = freq_range_tmp;
+ }
+
+ start_freq = freq_range->start_freq_khz;
+
+ /* get end_freq */
+ freq_range = &rule->freq_range;
+ no = idx;
+
+ while (no < rd->n_reg_rules - 1) {
+ tmp = &rd->reg_rules[++no];
+ freq_range_tmp = &tmp->freq_range;
+
+ if (freq_range_tmp->start_freq_khz > freq_range->end_freq_khz)
+ break;
+
+ if (freq_range_tmp->max_bandwidth_khz)
+ break;
+
+ freq_range = freq_range_tmp;
+ }
+
+ end_freq = freq_range->end_freq_khz;
+
+ return end_freq - start_freq;
+}
+
/* Sanity check on a regulatory rule */
static bool is_valid_reg_rule(const struct ieee80211_reg_rule *rule)
{
@@ -630,7 +701,9 @@ reg_intersect_dfs_region(const enum nl80211_dfs_regions dfs_region1,
* Helper for regdom_intersect(), this does the real
* mathematical intersection fun
*/
-static int reg_rules_intersect(const struct ieee80211_reg_rule *rule1,
+static int reg_rules_intersect(const struct ieee80211_regdomain *rd1,
+ const struct ieee80211_regdomain *rd2,
+ const struct ieee80211_reg_rule *rule1,
const struct ieee80211_reg_rule *rule2,
struct ieee80211_reg_rule *intersected_rule)
{
@@ -638,7 +711,7 @@ static int reg_rules_intersect(const struct ieee80211_reg_rule *rule1,
struct ieee80211_freq_range *freq_range;
const struct ieee80211_power_rule *power_rule1, *power_rule2;
struct ieee80211_power_rule *power_rule;
- u32 freq_diff;
+ u32 freq_diff, max_bandwidth1, max_bandwidth2;
freq_range1 = &rule1->freq_range;
freq_range2 = &rule2->freq_range;
@@ -652,8 +725,24 @@ static int reg_rules_intersect(const struct ieee80211_reg_rule *rule1,
freq_range2->start_freq_khz);
freq_range->end_freq_khz = min(freq_range1->end_freq_khz,
freq_range2->end_freq_khz);
- freq_range->max_bandwidth_khz = min(freq_range1->max_bandwidth_khz,
- freq_range2->max_bandwidth_khz);
+
+ max_bandwidth1 = freq_range1->max_bandwidth_khz;
+ max_bandwidth2 = freq_range2->max_bandwidth_khz;
+
+ /*
+ * In case max_bandwidth1 == 0 and max_bandwith2 == 0 set
+ * output bandwidth as 0 (auto calculation). Next we will
+ * calculate this correctly in handle_channel function.
+ * In other case calculate output bandwidth here.
+ */
+ if (max_bandwidth1 || max_bandwidth2) {
+ if (!max_bandwidth1)
+ max_bandwidth1 = reg_get_max_bandwidth(rd1, rule1);
+ if (!max_bandwidth2)
+ max_bandwidth2 = reg_get_max_bandwidth(rd2, rule2);
+ }
+
+ freq_range->max_bandwidth_khz = min(max_bandwidth1, max_bandwidth2);
freq_diff = freq_range->end_freq_khz - freq_range->start_freq_khz;
if (freq_range->max_bandwidth_khz > freq_diff)
@@ -713,7 +802,8 @@ regdom_intersect(const struct ieee80211_regdomain *rd1,
rule1 = &rd1->reg_rules[x];
for (y = 0; y < rd2->n_reg_rules; y++) {
rule2 = &rd2->reg_rules[y];
- if (!reg_rules_intersect(rule1, rule2, &dummy_rule))
+ if (!reg_rules_intersect(rd1, rd2, rule1, rule2,
+ &dummy_rule))
num_rules++;
}
}
@@ -738,7 +828,8 @@ regdom_intersect(const struct ieee80211_regdomain *rd1,
* a memcpy()
*/
intersected_rule = &rd->reg_rules[rule_idx];
- r = reg_rules_intersect(rule1, rule2, intersected_rule);
+ r = reg_rules_intersect(rd1, rd2, rule1, rule2,
+ intersected_rule);
/*
* No need to memset here the intersected rule here as
* we're not using the stack anymore
@@ -821,18 +912,8 @@ const struct ieee80211_reg_rule *freq_reg_info(struct wiphy *wiphy,
u32 center_freq)
{
const struct ieee80211_regdomain *regd;
- struct regulatory_request *lr = get_last_request();
- /*
- * Follow the driver's regulatory domain, if present, unless a country
- * IE has been processed or a user wants to help complaince further
- */
- if (lr->initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE &&
- lr->initiator != NL80211_REGDOM_SET_BY_USER &&
- wiphy->regd)
- regd = get_wiphy_regdom(wiphy);
- else
- regd = get_cfg80211_regdom();
+ regd = reg_get_regdomain(wiphy);
return freq_reg_info_regd(wiphy, center_freq, regd);
}
@@ -903,6 +984,8 @@ static void handle_channel(struct wiphy *wiphy,
const struct ieee80211_freq_range *freq_range = NULL;
struct wiphy *request_wiphy = NULL;
struct regulatory_request *lr = get_last_request();
+ const struct ieee80211_regdomain *regd;
+ u32 max_bandwidth_khz;
request_wiphy = wiphy_idx_to_wiphy(lr->wiphy_idx);
@@ -944,11 +1027,18 @@ static void handle_channel(struct wiphy *wiphy,
power_rule = &reg_rule->power_rule;
freq_range = &reg_rule->freq_range;
- if (freq_range->max_bandwidth_khz < MHZ_TO_KHZ(40))
+ max_bandwidth_khz = freq_range->max_bandwidth_khz;
+ /* Check if auto calculation requested */
+ if (!max_bandwidth_khz) {
+ regd = reg_get_regdomain(wiphy);
+ max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule);
+ }
+
+ if (max_bandwidth_khz < MHZ_TO_KHZ(40))
bw_flags = IEEE80211_CHAN_NO_HT40;
- if (freq_range->max_bandwidth_khz < MHZ_TO_KHZ(80))
+ if (max_bandwidth_khz < MHZ_TO_KHZ(80))
bw_flags |= IEEE80211_CHAN_NO_80MHZ;
- if (freq_range->max_bandwidth_khz < MHZ_TO_KHZ(160))
+ if (max_bandwidth_khz < MHZ_TO_KHZ(160))
bw_flags |= IEEE80211_CHAN_NO_160MHZ;
if (lr->initiator == NL80211_REGDOM_SET_BY_DRIVER &&
@@ -1334,6 +1424,7 @@ static void handle_channel_custom(struct wiphy *wiphy,
const struct ieee80211_reg_rule *reg_rule = NULL;
const struct ieee80211_power_rule *power_rule = NULL;
const struct ieee80211_freq_range *freq_range = NULL;
+ u32 max_bandwidth_khz;
reg_rule = freq_reg_info_regd(wiphy, MHZ_TO_KHZ(chan->center_freq),
regd);
@@ -1351,11 +1442,16 @@ static void handle_channel_custom(struct wiphy *wiphy,
power_rule = &reg_rule->power_rule;
freq_range = &reg_rule->freq_range;
- if (freq_range->max_bandwidth_khz < MHZ_TO_KHZ(40))
+ max_bandwidth_khz = freq_range->max_bandwidth_khz;
+ /* Check if auto calculation requested */
+ if (!max_bandwidth_khz)
+ max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule);
+
+ if (max_bandwidth_khz < MHZ_TO_KHZ(40))
bw_flags = IEEE80211_CHAN_NO_HT40;
- if (freq_range->max_bandwidth_khz < MHZ_TO_KHZ(80))
+ if (max_bandwidth_khz < MHZ_TO_KHZ(80))
bw_flags |= IEEE80211_CHAN_NO_80MHZ;
- if (freq_range->max_bandwidth_khz < MHZ_TO_KHZ(160))
+ if (max_bandwidth_khz < MHZ_TO_KHZ(160))
bw_flags |= IEEE80211_CHAN_NO_160MHZ;
chan->flags |= map_regdom_flags(reg_rule->flags) | bw_flags;
@@ -1683,17 +1779,9 @@ static void reg_process_hint(struct regulatory_request *reg_request)
struct wiphy *wiphy = NULL;
enum reg_request_treatment treatment;
- if (WARN_ON(!reg_request->alpha2))
- return;
-
if (reg_request->wiphy_idx != WIPHY_IDX_INVALID)
wiphy = wiphy_idx_to_wiphy(reg_request->wiphy_idx);
- if (reg_request->initiator == NL80211_REGDOM_SET_BY_DRIVER && !wiphy) {
- kfree(reg_request);
- return;
- }
-
switch (reg_request->initiator) {
case NL80211_REGDOM_SET_BY_CORE:
reg_process_hint_core(reg_request);
@@ -1703,23 +1791,33 @@ static void reg_process_hint(struct regulatory_request *reg_request)
if (treatment == REG_REQ_IGNORE ||
treatment == REG_REQ_ALREADY_SET)
return;
- schedule_delayed_work(&reg_timeout, msecs_to_jiffies(3142));
+ queue_delayed_work(system_power_efficient_wq,
+ &reg_timeout, msecs_to_jiffies(3142));
return;
case NL80211_REGDOM_SET_BY_DRIVER:
+ if (!wiphy)
+ goto out_free;
treatment = reg_process_hint_driver(wiphy, reg_request);
break;
case NL80211_REGDOM_SET_BY_COUNTRY_IE:
+ if (!wiphy)
+ goto out_free;
treatment = reg_process_hint_country_ie(wiphy, reg_request);
break;
default:
WARN(1, "invalid initiator %d\n", reg_request->initiator);
- return;
+ goto out_free;
}
/* This is required so that the orig_* parameters are saved */
if (treatment == REG_REQ_ALREADY_SET && wiphy &&
wiphy->regulatory_flags & REGULATORY_STRICT_REG)
wiphy_update_regulatory(wiphy, reg_request->initiator);
+
+ return;
+
+out_free:
+ kfree(reg_request);
}
/*
@@ -2147,6 +2245,7 @@ static void print_rd_rules(const struct ieee80211_regdomain *rd)
const struct ieee80211_reg_rule *reg_rule = NULL;
const struct ieee80211_freq_range *freq_range = NULL;
const struct ieee80211_power_rule *power_rule = NULL;
+ char bw[32];
pr_info(" (start_freq - end_freq @ bandwidth), (max_antenna_gain, max_eirp)\n");
@@ -2155,22 +2254,29 @@ static void print_rd_rules(const struct ieee80211_regdomain *rd)
freq_range = &reg_rule->freq_range;
power_rule = &reg_rule->power_rule;
+ if (!freq_range->max_bandwidth_khz)
+ snprintf(bw, 32, "%d KHz, AUTO",
+ reg_get_max_bandwidth(rd, reg_rule));
+ else
+ snprintf(bw, 32, "%d KHz",
+ freq_range->max_bandwidth_khz);
+
/*
* There may not be documentation for max antenna gain
* in certain regions
*/
if (power_rule->max_antenna_gain)
- pr_info(" (%d KHz - %d KHz @ %d KHz), (%d mBi, %d mBm)\n",
+ pr_info(" (%d KHz - %d KHz @ %s), (%d mBi, %d mBm)\n",
freq_range->start_freq_khz,
freq_range->end_freq_khz,
- freq_range->max_bandwidth_khz,
+ bw,
power_rule->max_antenna_gain,
power_rule->max_eirp);
else
- pr_info(" (%d KHz - %d KHz @ %d KHz), (N/A, %d mBm)\n",
+ pr_info(" (%d KHz - %d KHz @ %s), (N/A, %d mBm)\n",
freq_range->start_freq_khz,
freq_range->end_freq_khz,
- freq_range->max_bandwidth_khz,
+ bw,
power_rule->max_eirp);
}
}
@@ -2294,7 +2400,8 @@ static int reg_set_rd_driver(const struct ieee80211_regdomain *rd,
request_wiphy = wiphy_idx_to_wiphy(driver_request->wiphy_idx);
if (!request_wiphy) {
- schedule_delayed_work(&reg_timeout, 0);
+ queue_delayed_work(system_power_efficient_wq,
+ &reg_timeout, 0);
return -ENODEV;
}
@@ -2354,7 +2461,8 @@ static int reg_set_rd_country_ie(const struct ieee80211_regdomain *rd,
request_wiphy = wiphy_idx_to_wiphy(country_ie_request->wiphy_idx);
if (!request_wiphy) {
- schedule_delayed_work(&reg_timeout, 0);
+ queue_delayed_work(system_power_efficient_wq,
+ &reg_timeout, 0);
return -ENODEV;
}
diff --git a/net/wireless/reg.h b/net/wireless/reg.h
index 02bd8f4..1852461 100644
--- a/net/wireless/reg.h
+++ b/net/wireless/reg.h
@@ -34,6 +34,8 @@ int __init regulatory_init(void);
void regulatory_exit(void);
int set_regdom(const struct ieee80211_regdomain *rd);
+unsigned int reg_get_max_bandwidth(const struct ieee80211_regdomain *rd,
+ const struct ieee80211_reg_rule *rule);
bool reg_last_request_cell_base(void);
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index fbcc23e..5eaeed5 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -2278,11 +2278,6 @@ DECLARE_EVENT_CLASS(cfg80211_rx_evt,
TP_printk(NETDEV_PR_FMT ", " MAC_PR_FMT, NETDEV_PR_ARG, MAC_PR_ARG(addr))
);
-DEFINE_EVENT(cfg80211_rx_evt, cfg80211_ibss_joined,
- TP_PROTO(struct net_device *netdev, const u8 *addr),
- TP_ARGS(netdev, addr)
-);
-
DEFINE_EVENT(cfg80211_rx_evt, cfg80211_rx_spurious_frame,
TP_PROTO(struct net_device *netdev, const u8 *addr),
TP_ARGS(netdev, addr)
@@ -2293,6 +2288,24 @@ DEFINE_EVENT(cfg80211_rx_evt, cfg80211_rx_unexpected_4addr_frame,
TP_ARGS(netdev, addr)
);
+TRACE_EVENT(cfg80211_ibss_joined,
+ TP_PROTO(struct net_device *netdev, const u8 *bssid,
+ struct ieee80211_channel *channel),
+ TP_ARGS(netdev, bssid, channel),
+ TP_STRUCT__entry(
+ NETDEV_ENTRY
+ MAC_ENTRY(bssid)
+ CHAN_ENTRY
+ ),
+ TP_fast_assign(
+ NETDEV_ASSIGN;
+ MAC_ASSIGN(bssid, bssid);
+ CHAN_ASSIGN(channel);
+ ),
+ TP_printk(NETDEV_PR_FMT ", bssid: " MAC_PR_FMT ", " CHAN_PR_FMT,
+ NETDEV_PR_ARG, MAC_PR_ARG(bssid), CHAN_PR_ARG)
+);
+
TRACE_EVENT(cfg80211_probe_status,
TP_PROTO(struct net_device *netdev, const u8 *addr, u64 cookie,
bool acked),
diff --git a/net/wireless/util.c b/net/wireless/util.c
index d39c371..ad03af3 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -11,6 +11,7 @@
#include <net/ip.h>
#include <net/dsfield.h>
#include <linux/if_vlan.h>
+#include <linux/mpls.h>
#include "core.h"
#include "rdev-ops.h"
@@ -717,6 +718,21 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb,
case htons(ETH_P_IPV6):
dscp = ipv6_get_dsfield(ipv6_hdr(skb)) & 0xfc;
break;
+ case htons(ETH_P_MPLS_UC):
+ case htons(ETH_P_MPLS_MC): {
+ struct mpls_label mpls_tmp, *mpls;
+
+ mpls = skb_header_pointer(skb, sizeof(struct ethhdr),
+ sizeof(*mpls), &mpls_tmp);
+ if (!mpls)
+ return 0;
+
+ return (ntohl(mpls->entry) & MPLS_LS_TC_MASK)
+ >> MPLS_LS_TC_SHIFT;
+ }
+ case htons(ETH_P_80221):
+ /* 802.21 is always network control traffic */
+ return 7;
default:
return 0;
}
@@ -820,7 +836,8 @@ void cfg80211_process_wdev_events(struct wireless_dev *wdev)
ev->dc.reason, true);
break;
case EVENT_IBSS_JOINED:
- __cfg80211_ibss_joined(wdev->netdev, ev->ij.bssid);
+ __cfg80211_ibss_joined(wdev->netdev, ev->ij.bssid,
+ ev->ij.channel);
break;
}
wdev_unlock(wdev);
@@ -1356,7 +1373,7 @@ int cfg80211_can_use_iftype_chan(struct cfg80211_registered_device *rdev,
*/
mutex_lock_nested(&wdev_iter->mtx, 1);
__acquire(wdev_iter->mtx);
- cfg80211_get_chan_state(wdev_iter, &ch, &chmode);
+ cfg80211_get_chan_state(wdev_iter, &ch, &chmode, &radar_detect);
wdev_unlock(wdev_iter);
switch (chmode) {
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index 6c7ac01..4218164 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -108,7 +108,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
int err;
__be32 seq;
__be32 seq_hi;
- struct xfrm_state *x;
+ struct xfrm_state *x = NULL;
xfrm_address_t *daddr;
struct xfrm_mode *inner_mode;
unsigned int family;
@@ -120,9 +120,14 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
async = 1;
x = xfrm_input_state(skb);
seq = XFRM_SKB_CB(skb)->seq.input.low;
+ family = x->outer_mode->afinfo->family;
goto resume;
}
+ daddr = (xfrm_address_t *)(skb_network_header(skb) +
+ XFRM_SPI_SKB_CB(skb)->daddroff);
+ family = XFRM_SPI_SKB_CB(skb)->family;
+
/* Allocate new secpath or COW existing one. */
if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) {
struct sec_path *sp;
@@ -137,10 +142,6 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
skb->sp = sp;
}
- daddr = (xfrm_address_t *)(skb_network_header(skb) +
- XFRM_SPI_SKB_CB(skb)->daddroff);
- family = XFRM_SPI_SKB_CB(skb)->family;
-
seq = 0;
if (!spi && (err = xfrm_parse_spi(skb, nexthdr, &spi, &seq)) != 0) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMINHDRERROR);
@@ -162,6 +163,11 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
skb->sp->xvec[skb->sp->len++] = x;
+ if (xfrm_tunnel_check(skb, x, family)) {
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEMODEERROR);
+ goto drop;
+ }
+
spin_lock(&x->lock);
if (unlikely(x->km.state == XFRM_STATE_ACQ)) {
XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR);
@@ -201,7 +207,6 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
if (nexthdr == -EINPROGRESS)
return 0;
-
resume:
spin_lock(&x->lock);
if (nexthdr <= 0) {
@@ -263,6 +268,10 @@ resume:
}
} while (!err);
+ err = xfrm_rcv_cb(skb, family, x->type->proto, 0);
+ if (err)
+ goto drop;
+
nf_reset(skb);
if (decaps) {
@@ -276,6 +285,7 @@ resume:
drop_unlock:
spin_unlock(&x->lock);
drop:
+ xfrm_rcv_cb(skb, family, x && x->type ? x->type->proto : nexthdr, -1);
kfree_skb(skb);
return 0;
}
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 1d5c7bf..a75fae4 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -39,8 +39,6 @@
#define XFRM_QUEUE_TMO_MAX ((unsigned)(60*HZ))
#define XFRM_MAX_QUEUE_LEN 100
-static struct dst_entry *xfrm_policy_sk_bundles;
-
static DEFINE_SPINLOCK(xfrm_policy_afinfo_lock);
static struct xfrm_policy_afinfo __rcu *xfrm_policy_afinfo[NPROTO]
__read_mostly;
@@ -661,7 +659,7 @@ int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl)
hlist_add_head(&policy->bydst, chain);
xfrm_pol_hold(policy);
net->xfrm.policy_count[dir]++;
- atomic_inc(&flow_cache_genid);
+ atomic_inc(&net->xfrm.flow_cache_genid);
/* After previous checking, family can either be AF_INET or AF_INET6 */
if (policy->family == AF_INET)
@@ -2109,13 +2107,6 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
goto no_transform;
}
- dst_hold(&xdst->u.dst);
-
- spin_lock_bh(&net->xfrm.xfrm_policy_sk_bundle_lock);
- xdst->u.dst.next = xfrm_policy_sk_bundles;
- xfrm_policy_sk_bundles = &xdst->u.dst;
- spin_unlock_bh(&net->xfrm.xfrm_policy_sk_bundle_lock);
-
route = xdst->route;
}
}
@@ -2549,33 +2540,15 @@ static struct dst_entry *xfrm_negative_advice(struct dst_entry *dst)
return dst;
}
-static void __xfrm_garbage_collect(struct net *net)
-{
- struct dst_entry *head, *next;
-
- spin_lock_bh(&net->xfrm.xfrm_policy_sk_bundle_lock);
- head = xfrm_policy_sk_bundles;
- xfrm_policy_sk_bundles = NULL;
- spin_unlock_bh(&net->xfrm.xfrm_policy_sk_bundle_lock);
-
- while (head) {
- next = head->next;
- dst_free(head);
- head = next;
- }
-}
-
void xfrm_garbage_collect(struct net *net)
{
- flow_cache_flush();
- __xfrm_garbage_collect(net);
+ flow_cache_flush(net);
}
EXPORT_SYMBOL(xfrm_garbage_collect);
static void xfrm_garbage_collect_deferred(struct net *net)
{
- flow_cache_flush_deferred();
- __xfrm_garbage_collect(net);
+ flow_cache_flush_deferred(net);
}
static void xfrm_init_pmtu(struct dst_entry *dst)
@@ -2944,9 +2917,9 @@ static int __net_init xfrm_net_init(struct net *net)
/* Initialize the per-net locks here */
spin_lock_init(&net->xfrm.xfrm_state_lock);
rwlock_init(&net->xfrm.xfrm_policy_lock);
- spin_lock_init(&net->xfrm.xfrm_policy_sk_bundle_lock);
mutex_init(&net->xfrm.xfrm_cfg_mutex);
+ flow_cache_init(net);
return 0;
out_sysctl:
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 40f1b3e..06970fe 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -161,6 +161,7 @@ static DEFINE_SPINLOCK(xfrm_state_gc_lock);
int __xfrm_state_delete(struct xfrm_state *x);
int km_query(struct xfrm_state *x, struct xfrm_tmpl *t, struct xfrm_policy *pol);
+bool km_is_alive(const struct km_event *c);
void km_state_expired(struct xfrm_state *x, int hard, u32 portid);
static DEFINE_SPINLOCK(xfrm_type_lock);
@@ -788,6 +789,7 @@ xfrm_state_find(const xfrm_address_t *daddr, const xfrm_address_t *saddr,
struct xfrm_state *best = NULL;
u32 mark = pol->mark.v & pol->mark.m;
unsigned short encap_family = tmpl->encap_family;
+ struct km_event c;
to_put = NULL;
@@ -832,6 +834,17 @@ found:
error = -EEXIST;
goto out;
}
+
+ c.net = net;
+ /* If the KMs have no listeners (yet...), avoid allocating an SA
+ * for each and every packet - garbage collection might not
+ * handle the flood.
+ */
+ if (!km_is_alive(&c)) {
+ error = -ESRCH;
+ goto out;
+ }
+
x = xfrm_state_alloc(net);
if (x == NULL) {
error = -ENOMEM;
@@ -1135,10 +1148,9 @@ out:
EXPORT_SYMBOL(xfrm_state_add);
#ifdef CONFIG_XFRM_MIGRATE
-static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
+static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig)
{
struct net *net = xs_net(orig);
- int err = -ENOMEM;
struct xfrm_state *x = xfrm_state_alloc(net);
if (!x)
goto out;
@@ -1192,15 +1204,13 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
}
if (orig->replay_esn) {
- err = xfrm_replay_clone(x, orig);
- if (err)
+ if (xfrm_replay_clone(x, orig))
goto error;
}
memcpy(&x->mark, &orig->mark, sizeof(x->mark));
- err = xfrm_init_state(x);
- if (err)
+ if (xfrm_init_state(x) < 0)
goto error;
x->props.flags = orig->props.flags;
@@ -1218,8 +1228,6 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
error:
xfrm_state_put(x);
out:
- if (errp)
- *errp = err;
return NULL;
}
@@ -1274,9 +1282,8 @@ struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x,
struct xfrm_migrate *m)
{
struct xfrm_state *xc;
- int err;
- xc = xfrm_state_clone(x, &err);
+ xc = xfrm_state_clone(x);
if (!xc)
return NULL;
@@ -1289,7 +1296,7 @@ struct xfrm_state *xfrm_state_migrate(struct xfrm_state *x,
state is to be updated as it is a part of triplet */
xfrm_state_insert(xc);
} else {
- if ((err = xfrm_state_add(xc)) < 0)
+ if (xfrm_state_add(xc) < 0)
goto error;
}
@@ -1601,6 +1608,23 @@ unlock:
}
EXPORT_SYMBOL(xfrm_alloc_spi);
+static bool __xfrm_state_filter_match(struct xfrm_state *x,
+ struct xfrm_filter *filter)
+{
+ if (filter) {
+ if ((filter->family == AF_INET ||
+ filter->family == AF_INET6) &&
+ x->props.family != filter->family)
+ return false;
+
+ return addr_match(&x->props.saddr, &filter->saddr,
+ filter->splen) &&
+ addr_match(&x->id.daddr, &filter->daddr,
+ filter->dplen);
+ }
+ return true;
+}
+
int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
int (*func)(struct xfrm_state *, int, void*),
void *data)
@@ -1623,6 +1647,8 @@ int xfrm_state_walk(struct net *net, struct xfrm_state_walk *walk,
state = container_of(x, struct xfrm_state, km);
if (!xfrm_id_proto_match(state->id.proto, walk->proto))
continue;
+ if (!__xfrm_state_filter_match(state, walk->filter))
+ continue;
err = func(state, walk->seq, data);
if (err) {
list_move_tail(&walk->all, &x->all);
@@ -1641,17 +1667,21 @@ out:
}
EXPORT_SYMBOL(xfrm_state_walk);
-void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto)
+void xfrm_state_walk_init(struct xfrm_state_walk *walk, u8 proto,
+ struct xfrm_filter *filter)
{
INIT_LIST_HEAD(&walk->all);
walk->proto = proto;
walk->state = XFRM_STATE_DEAD;
walk->seq = 0;
+ walk->filter = filter;
}
EXPORT_SYMBOL(xfrm_state_walk_init);
void xfrm_state_walk_done(struct xfrm_state_walk *walk, struct net *net)
{
+ kfree(walk->filter);
+
if (list_empty(&walk->all))
return;
@@ -1804,6 +1834,24 @@ int km_report(struct net *net, u8 proto, struct xfrm_selector *sel, xfrm_address
}
EXPORT_SYMBOL(km_report);
+bool km_is_alive(const struct km_event *c)
+{
+ struct xfrm_mgr *km;
+ bool is_alive = false;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(km, &xfrm_km_list, list) {
+ if (km->is_alive && km->is_alive(c)) {
+ is_alive = true;
+ break;
+ }
+ }
+ rcu_read_unlock();
+
+ return is_alive;
+}
+EXPORT_SYMBOL(km_is_alive);
+
int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optval, int optlen)
{
int err;
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index c274179..195dbe2 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -137,7 +137,8 @@ static inline int verify_replay(struct xfrm_usersa_info *p,
if (!rt)
return 0;
- if (p->id.proto != IPPROTO_ESP)
+ /* As only ESP and AH support ESN feature. */
+ if ((p->id.proto != IPPROTO_ESP) && (p->id.proto != IPPROTO_AH))
return -EINVAL;
if (p->replay_window != 0)
@@ -881,6 +882,7 @@ static int xfrm_dump_sa_done(struct netlink_callback *cb)
return 0;
}
+static const struct nla_policy xfrma_policy[XFRMA_MAX+1];
static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
@@ -896,8 +898,31 @@ static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb)
info.nlmsg_flags = NLM_F_MULTI;
if (!cb->args[0]) {
+ struct nlattr *attrs[XFRMA_MAX+1];
+ struct xfrm_filter *filter = NULL;
+ u8 proto = 0;
+ int err;
+
cb->args[0] = 1;
- xfrm_state_walk_init(walk, 0);
+
+ err = nlmsg_parse(cb->nlh, 0, attrs, XFRMA_MAX,
+ xfrma_policy);
+ if (err < 0)
+ return err;
+
+ if (attrs[XFRMA_FILTER]) {
+ filter = kmalloc(sizeof(*filter), GFP_KERNEL);
+ if (filter == NULL)
+ return -ENOMEM;
+
+ memcpy(filter, nla_data(attrs[XFRMA_FILTER]),
+ sizeof(*filter));
+ }
+
+ if (attrs[XFRMA_PROTO])
+ proto = nla_get_u8(attrs[XFRMA_PROTO]);
+
+ xfrm_state_walk_init(walk, proto, filter);
}
(void) xfrm_state_walk(net, walk, dump_one_state, &info);
@@ -2303,6 +2328,8 @@ static const struct nla_policy xfrma_policy[XFRMA_MAX+1] = {
[XFRMA_TFCPAD] = { .type = NLA_U32 },
[XFRMA_REPLAY_ESN_VAL] = { .len = sizeof(struct xfrm_replay_state_esn) },
[XFRMA_SA_EXTRA_FLAGS] = { .type = NLA_U32 },
+ [XFRMA_PROTO] = { .type = NLA_U8 },
+ [XFRMA_FILTER] = { .len = sizeof(struct xfrm_filter) },
};
static const struct xfrm_link {
@@ -2976,6 +3003,11 @@ static int xfrm_send_mapping(struct xfrm_state *x, xfrm_address_t *ipaddr,
return nlmsg_multicast(net->xfrm.nlsk, skb, 0, XFRMNLGRP_MAPPING, GFP_ATOMIC);
}
+static bool xfrm_is_alive(const struct km_event *c)
+{
+ return (bool)xfrm_acquire_is_on(c->net);
+}
+
static struct xfrm_mgr netlink_mgr = {
.id = "netlink",
.notify = xfrm_send_state_notify,
@@ -2985,6 +3017,7 @@ static struct xfrm_mgr netlink_mgr = {
.report = xfrm_send_report,
.migrate = xfrm_send_migrate,
.new_mapping = xfrm_send_mapping,
+ .is_alive = xfrm_is_alive,
};
static int __net_init xfrm_user_net_init(struct net *net)
diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
index 48c3cc9..dfe3fda 100644
--- a/security/selinux/include/xfrm.h
+++ b/security/selinux/include/xfrm.h
@@ -45,10 +45,11 @@ static inline void selinux_xfrm_notify_policyload(void)
{
struct net *net;
- atomic_inc(&flow_cache_genid);
rtnl_lock();
- for_each_net(net)
+ for_each_net(net) {
+ atomic_inc(&net->xfrm.flow_cache_genid);
rt_genid_bump_all(net);
+ }
rtnl_unlock();
}
#else
diff --git a/tools/net/bpf_dbg.c b/tools/net/bpf_dbg.c
index 65dc757..bb31813 100644
--- a/tools/net/bpf_dbg.c
+++ b/tools/net/bpf_dbg.c
@@ -87,9 +87,6 @@
__attribute__ ((format (printf, (pos_fmtstr), (pos_fmtargs))))
#endif
-#define CMD(_name, _func) { .name = _name, .func = _func, }
-#define OP(_op, _name) [_op] = _name
-
enum {
CMD_OK,
CMD_ERR,
@@ -145,32 +142,32 @@ static size_t pcap_map_size = 0;
static char *pcap_ptr_va_start, *pcap_ptr_va_curr;
static const char * const op_table[] = {
- OP(BPF_ST, "st"),
- OP(BPF_STX, "stx"),
- OP(BPF_LD_B, "ldb"),
- OP(BPF_LD_H, "ldh"),
- OP(BPF_LD_W, "ld"),
- OP(BPF_LDX, "ldx"),
- OP(BPF_LDX_B, "ldxb"),
- OP(BPF_JMP_JA, "ja"),
- OP(BPF_JMP_JEQ, "jeq"),
- OP(BPF_JMP_JGT, "jgt"),
- OP(BPF_JMP_JGE, "jge"),
- OP(BPF_JMP_JSET, "jset"),
- OP(BPF_ALU_ADD, "add"),
- OP(BPF_ALU_SUB, "sub"),
- OP(BPF_ALU_MUL, "mul"),
- OP(BPF_ALU_DIV, "div"),
- OP(BPF_ALU_MOD, "mod"),
- OP(BPF_ALU_NEG, "neg"),
- OP(BPF_ALU_AND, "and"),
- OP(BPF_ALU_OR, "or"),
- OP(BPF_ALU_XOR, "xor"),
- OP(BPF_ALU_LSH, "lsh"),
- OP(BPF_ALU_RSH, "rsh"),
- OP(BPF_MISC_TAX, "tax"),
- OP(BPF_MISC_TXA, "txa"),
- OP(BPF_RET, "ret"),
+ [BPF_ST] = "st",
+ [BPF_STX] = "stx",
+ [BPF_LD_B] = "ldb",
+ [BPF_LD_H] = "ldh",
+ [BPF_LD_W] = "ld",
+ [BPF_LDX] = "ldx",
+ [BPF_LDX_B] = "ldxb",
+ [BPF_JMP_JA] = "ja",
+ [BPF_JMP_JEQ] = "jeq",
+ [BPF_JMP_JGT] = "jgt",
+ [BPF_JMP_JGE] = "jge",
+ [BPF_JMP_JSET] = "jset",
+ [BPF_ALU_ADD] = "add",
+ [BPF_ALU_SUB] = "sub",
+ [BPF_ALU_MUL] = "mul",
+ [BPF_ALU_DIV] = "div",
+ [BPF_ALU_MOD] = "mod",
+ [BPF_ALU_NEG] = "neg",
+ [BPF_ALU_AND] = "and",
+ [BPF_ALU_OR] = "or",
+ [BPF_ALU_XOR] = "xor",
+ [BPF_ALU_LSH] = "lsh",
+ [BPF_ALU_RSH] = "rsh",
+ [BPF_MISC_TAX] = "tax",
+ [BPF_MISC_TXA] = "txa",
+ [BPF_RET] = "ret",
};
static __check_format_printf(1, 2) int rl_printf(const char *fmt, ...)
@@ -1127,7 +1124,6 @@ static int cmd_step(char *num)
static int cmd_select(char *num)
{
unsigned int which, i;
- struct pcap_pkthdr *hdr;
bool have_next = true;
if (!pcap_loaded() || strlen(num) == 0)
@@ -1144,7 +1140,7 @@ static int cmd_select(char *num)
for (i = 0; i < which && (have_next = pcap_next_pkt()); i++)
/* noop */;
- if (!have_next || (hdr = pcap_curr_pkt()) == NULL) {
+ if (!have_next || pcap_curr_pkt() == NULL) {
rl_printf("no packet #%u available!\n", which);
pcap_reset_pkt();
return CMD_ERR;
@@ -1177,9 +1173,8 @@ static int cmd_breakpoint(char *subcmd)
static int cmd_run(char *num)
{
static uint32_t pass = 0, fail = 0;
- struct pcap_pkthdr *hdr;
bool has_limit = true;
- int ret, pkts = 0, i = 0;
+ int pkts = 0, i = 0;
if (!bpf_prog_loaded() || !pcap_loaded())
return CMD_ERR;
@@ -1189,10 +1184,10 @@ static int cmd_run(char *num)
has_limit = false;
do {
- hdr = pcap_curr_pkt();
- ret = bpf_run_all(bpf_image, bpf_prog_len,
- (uint8_t *) hdr + sizeof(*hdr),
- hdr->caplen, hdr->len);
+ struct pcap_pkthdr *hdr = pcap_curr_pkt();
+ int ret = bpf_run_all(bpf_image, bpf_prog_len,
+ (uint8_t *) hdr + sizeof(*hdr),
+ hdr->caplen, hdr->len);
if (ret > 0)
pass++;
else if (ret == 0)
@@ -1245,14 +1240,14 @@ static int cmd_quit(char *dontcare)
}
static const struct shell_cmd cmds[] = {
- CMD("load", cmd_load),
- CMD("select", cmd_select),
- CMD("step", cmd_step),
- CMD("run", cmd_run),
- CMD("breakpoint", cmd_breakpoint),
- CMD("disassemble", cmd_disassemble),
- CMD("dump", cmd_dump),
- CMD("quit", cmd_quit),
+ { .name = "load", .func = cmd_load },
+ { .name = "select", .func = cmd_select },
+ { .name = "step", .func = cmd_step },
+ { .name = "run", .func = cmd_run },
+ { .name = "breakpoint", .func = cmd_breakpoint },
+ { .name = "disassemble", .func = cmd_disassemble },
+ { .name = "dump", .func = cmd_dump },
+ { .name = "quit", .func = cmd_quit },
};
static int execf(char *arg)
@@ -1280,7 +1275,6 @@ out:
static char *shell_comp_gen(const char *buf, int state)
{
static int list_index, len;
- const char *name;
if (!state) {
list_index = 0;
@@ -1288,9 +1282,9 @@ static char *shell_comp_gen(const char *buf, int state)
}
for (; list_index < array_size(cmds); ) {
- name = cmds[list_index].name;
- list_index++;
+ const char *name = cmds[list_index].name;
+ list_index++;
if (strncmp(name, buf, len) == 0)
return strdup(name);
}
@@ -1322,16 +1316,9 @@ static void init_shell(FILE *fin, FILE *fout)
{
char file[128];
- memset(file, 0, sizeof(file));
- snprintf(file, sizeof(file) - 1,
- "%s/.bpf_dbg_history", getenv("HOME"));
-
+ snprintf(file, sizeof(file), "%s/.bpf_dbg_history", getenv("HOME"));
read_history(file);
- memset(file, 0, sizeof(file));
- snprintf(file, sizeof(file) - 1,
- "%s/.bpf_dbg_init", getenv("HOME"));
-
rl_instream = fin;
rl_outstream = fout;
@@ -1348,37 +1335,41 @@ static void init_shell(FILE *fin, FILE *fout)
rl_bind_key_in_map('\t', rl_complete, emacs_meta_keymap);
rl_bind_key_in_map('\033', rl_complete, emacs_meta_keymap);
+ snprintf(file, sizeof(file), "%s/.bpf_dbg_init", getenv("HOME"));
rl_read_init_file(file);
+
rl_prep_terminal(0);
rl_set_signals();
signal(SIGINT, intr_shell);
}
-static void exit_shell(void)
+static void exit_shell(FILE *fin, FILE *fout)
{
char file[128];
- memset(file, 0, sizeof(file));
- snprintf(file, sizeof(file) - 1,
- "%s/.bpf_dbg_history", getenv("HOME"));
-
+ snprintf(file, sizeof(file), "%s/.bpf_dbg_history", getenv("HOME"));
write_history(file);
+
clear_history();
rl_deprep_terminal();
try_close_pcap();
+
+ if (fin != stdin)
+ fclose(fin);
+ if (fout != stdout)
+ fclose(fout);
}
static int run_shell_loop(FILE *fin, FILE *fout)
{
char *buf;
- int ret;
init_shell(fin, fout);
while ((buf = readline("> ")) != NULL) {
- ret = execf(buf);
+ int ret = execf(buf);
if (ret == CMD_EX)
break;
if (ret == CMD_OK && strlen(buf) > 0)
@@ -1387,7 +1378,7 @@ static int run_shell_loop(FILE *fin, FILE *fout)
free(buf);
}
- exit_shell();
+ exit_shell(fin, fout);
return 0;
}
OpenPOWER on IntegriCloud